text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
import telebot
from telebot.types import LabeledPrice, ShippingOption
token = '1234567890:AAAABBBBCCCCDDDDeeeeFFFFgggGHHHH'
provider_token = '1234567890:TEST:AAAABBBBCCCCDDDD' # @BotFather -> Bot Settings -> Payments
bot = telebot.TeleBot(token)
# More about Payments: https://core.telegram.org/bots/payments
prices = [LabeledPrice(label='Working Time Machine', amount=5750), LabeledPrice('Gift wrapping', 500)]
shipping_options = [
ShippingOption(id='instant', title='WorldWide Teleporter').add_price(LabeledPrice('Teleporter', 1000)),
ShippingOption(id='pickup', title='Local pickup').add_price(LabeledPrice('Pickup', 300))]
@bot.message_handler(commands=['start'])
def command_start(message):
bot.send_message(message.chat.id,
"Hello, I'm the demo merchant bot."
" I can sell you a Time Machine."
" Use /buy to order one, /terms for Terms and Conditions")
@bot.message_handler(commands=['terms'])
def command_terms(message):
bot.send_message(message.chat.id,
'Thank you for shopping with our demo bot. We hope you like your new time machine!\n'
'1. If your time machine was not delivered on time, please rethink your concept of time and try again.\n'
'2. If you find that your time machine is not working, kindly contact our future service workshops on Trappist-1e.'
' They will be accessible anywhere between May 2075 and November 4000 C.E.\n'
'3. If you would like a refund, kindly apply for one yesterday and we will have sent it to you immediately.')
@bot.message_handler(commands=['buy'])
def command_pay(message):
bot.send_message(message.chat.id,
"Real cards won't work with me, no money will be debited from your account."
" Use this test card number to pay for your Time Machine: `4242 4242 4242 4242`"
"\n\nThis is your demo invoice:", parse_mode='Markdown')
bot.send_invoice(message.chat.id, title='Working Time Machine',
description='Want to visit your great-great-great-grandparents?'
' Make a fortune at the races?'
' Shake hands with Hammurabi and take a stroll in the Hanging Gardens?'
' Order our Working Time Machine today!',
provider_token=provider_token,
currency='usd',
photo_url='http://erkelzaar.tsudao.com/models/perrotta/TIME_MACHINE.jpg',
photo_height=512, # !=0/None or picture won't be shown
photo_width=512,
photo_size=512,
is_flexible=False, # True If you need to set up Shipping Fee
prices=prices,
start_parameter='time-machine-example',
invoice_payload='HAPPY FRIDAYS COUPON')
@bot.shipping_query_handler(func=lambda query: True)
def shipping(shipping_query):
print(shipping_query)
bot.answer_shipping_query(shipping_query.id, ok=True, shipping_options=shipping_options,
error_message='Oh, seems like our Dog couriers are having a lunch right now. Try again later!')
@bot.pre_checkout_query_handler(func=lambda query: True)
def checkout(pre_checkout_query):
bot.answer_pre_checkout_query(pre_checkout_query.id, ok=True,
error_message="Aliens tried to steal your card's CVV, but we successfully protected your credentials,"
" try to pay again in a few minutes, we need a small rest.")
@bot.message_handler(content_types=['successful_payment'])
def got_payment(message):
bot.send_message(message.chat.id,
'Hoooooray! Thanks for payment! We will proceed your order for `{} {}` as fast as possible! '
'Stay in touch.\n\nUse /buy again to get a Time Machine for your friend!'.format(
message.successful_payment.total_amount / 100, message.successful_payment.currency),
parse_mode='Markdown')
bot.skip_pending = True
bot.polling(none_stop=True, interval=0)
|
sgomez/pyTelegramBotAPI
|
examples/payments_example.py
|
Python
|
gpl-2.0
| 4,308
|
[
"VisIt"
] |
2840afd25016346e036ae8f0ebba26a866e851dda999ceb41d784c786f65d2ee
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawPoints(myscreen, clpoints, ccpoints):
c=camvtk.PointCloud( pointlist=clpoints, collist=ccpoints)
c.SetPoints()
myscreen.addActor(c )
def drawFiber(myscreen, f, fibercolor=camvtk.red):
inter = f.getInts()
for i in inter:
if not i.empty():
ip1 = f.point( i.lower )
ip2 = f.point( i.upper )
myscreen.addActor( camvtk.Line(p1=(ip1.x,ip1.y,ip1.z),p2=(ip2.x,ip2.y,ip2.z), color=fibercolor) )
myscreen.addActor( camvtk.Sphere(center=(ip1.x,ip1.y,ip1.z),radius=0.005, color=camvtk.clColor( i.lower_cc) ) )
myscreen.addActor( camvtk.Sphere(center=(ip2.x,ip2.y,ip2.z),radius=0.005, color=camvtk.clColor( i.upper_cc) ) )
cc1 = i.lower_cc
cc2 = i.upper_cc
myscreen.addActor( camvtk.Sphere(center=(cc1.x,cc1.y,cc1.z),radius=0.005, color=camvtk.lgreen ) )
myscreen.addActor( camvtk.Sphere(center=(cc2.x,cc2.y,cc2.z),radius=0.005, color=camvtk.lgreen ) )
# cutter circle
#c1 = camvtk.Circle(center=(ip1.x,ip1.y,ip1.z), radius = 0.3/2, color=fibercolor)
#myscreen.addActor(c1)
#c2 = camvtk.Circle(center=(ip2.x,ip2.y,ip2.z), radius = 0.3/2, color=fibercolor)
#myscreen.addActor(c2)
if __name__ == "__main__":
myscreen = camvtk.VTKScreen()
a = ocl.Point(0,1,0.3)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
b = ocl.Point(1,0.5,0.3)
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
c = ocl.Point(0,0,0)
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
t = ocl.Triangle(b,c,a)
diameter = 0.2
length = 5
angle = math.pi/4
cutter = ocl.CylCutter(diameter, length)
#cutter = ocl.BallCutter(diameter, length)
#cutter = ocl.BullCutter(diameter, diameter/5, length)
#cutter = ocl.ConeCutter(diameter, angle, length)
print cutter
range=6
Nmax = 500
yvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
xvals = [float(n-float(Nmax)/2)/Nmax*range for n in xrange(0,Nmax+1)]
zh = 0.1071567
for y in yvals:
f1 = ocl.Point(-0.5,y,zh) # start point of fiber
f2 = ocl.Point(1.5,y,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
i = ocl.Interval()
cutter.pushCutter(f,i,t)
#cutter.vertexPush(f,i,t)
#cutter.facetPush(f,i,t)
#cutter.edgePush(f,i,t)
f.addInterval(i)
drawFiber(myscreen, f, camvtk.red)
for x in xvals:
f1 = ocl.Point(x,-0.5,zh) # start point of fiber
f2 = ocl.Point(x,1.5,zh) # end point of fiber
f = ocl.Fiber( f1, f2)
i = ocl.Interval()
cutter.pushCutter(f,i,t)
#cutter.vertexPush(f,i,t)
#cutter.facetPush(f,i,t)
#cutter.edgePush(f,i,t)
f.addInterval(i)
drawFiber(myscreen, f, camvtk.lblue)
print "done."
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
AlanZatarain/opencamlib
|
scripts/fiber_02_onetriangle_drawfibers.py
|
Python
|
gpl-3.0
| 3,692
|
[
"VTK"
] |
f3e696eaedaf1ffaabccd6bb21d5eb920e13b82f66f1d31dec001ad9d01425fe
|
"""
Learning functions for Projections.
For example, CFProjectionLearningFunctions compute a new set of
ConnectionFields when given an input and output pattern and a set of
ConnectionField objects.
$Id$
"""
__version__ = "$Revision$"
from numpy import ones,zeros
import numpy.oldnumeric as Numeric
from numpy.oldnumeric import Float,Float32
import param
from topo.base.cf import CFPLearningFn
from topo.base.sheet import activity_type
from topo.base.functionfamily import Hebbian,LearningFn
# Imported here so that all ProjectionLearningFns will be in the same package
from topo.base.cf import CFPLF_Identity,CFPLF_Plugin
from basic import BCMFixed
class CFPLF_EuclideanHebbian(CFPLearningFn):
"""
Hebbian CFProjection learning rule based on Euclidean distance.
Learning is driven by the distance from the input pattern to the
weights, scaled by the current activity. To implement a Kohonen
SOM algorithm, the activity should be the neighborhood kernel
centered around the winning unit, as implemented by KernelMax.
"""
# CEBERRORALERT: ignoring the sheet mask
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
# This learning function does not need to scale the learning
# rate like some do, so it does not use constant_sum_connection_rate()
cfs = iterator.flatcfs
rows,cols = output_activity.shape
for r in xrange(rows):
for c in xrange(cols):
flati = r*cols+c
out = output_activity.flat[flati]
if out !=0:
rate = learning_rate * out
cf = cfs[flati]
X = cf.get_input_matrix(input_activity)
cf.weights += rate * (X - cf.weights)
# CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
#### JABHACKALERT: Untested
##class CFPLF_BCM(CFPLearningFn):
## """
## Bienenstock, Cooper, and Munro (1982) learning rule with sliding threshold.
##
## (See Dayan and Abbott, 2001, equation 8.12, 8.13).
##
## Activities change only when there is both pre- and post-synaptic activity.
## Threshold is adjusted based on recent firing rates.
## """
## single_cf_fn = param.ClassSelector(LearningFn,default=BCMFixed())
##
## unit_threshold_0=param.Number(default=0.5,bounds=(0,None),
## doc="Initial value of threshold between LTD and LTP; actual value computed based on recent history.")
## unit_threshold_learning_rate=param.Number(default=0.1,bounds=(0,None),
## doc="Amount by which the unit_threshold is adjusted for each activity calculation.")
##
## def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
## cfs = iterator.proj._cfs
## # Initialize thresholds the first time we learn the size of the output_activity.
## if not hasattr(self,'unit_thresholds'):
## self.unit_thresholds=ones(output_activity.shape,Float32)*self.unit_threshold_0
##
## rows,cols = output_activity.shape
##
## # JABALERT: Is this correct?
## single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
##
## # avoid evaluating these references each time in the loop
## single_cf_fn = self.single_cf_fn
## for r in xrange(rows):
## for c in xrange(cols):
## cf = cfs[r][c]
## input_act = cf.get_input_matrix(input_activity)
## unit_activity = output_activity[r,c]
## threshold=self.unit_thresholds[r,c]
## #print cf.weights, type(cf.weights)
## #print input_act, type(input_act)
## #print single_connection_learning_rate,unit_activity,threshold, (unit_activity-threshold)
## cf.weights += (single_connection_learning_rate * unit_activity * (unit_activity-threshold)) * input_act
## self.unit_thresholds[r,c] += self.unit_threshold_learning_rate*(unit_activity*unit_activity-threshold)
##
## # CEBHACKALERT: see ConnectionField.__init__()
## cf.weights *= cf.mask
class CFPLF_Trace(CFPLearningFn):
"""
LearningFn that incorporates a trace of recent activity,
not just the current activity.
Based on P. Foldiak (1991), "Learning Invariance from
Transformation Sequences", Neural Computation 3:194-200. Also see
Sutton and Barto (1981) and Wallis and Rolls (1997).
Incorporates a decay term to keep the weight vector bounded, and
so it does not normally require any output_fn normalization for
stability.
NOT YET TESTED.
"""
trace_strength=param.Number(default=0.5,bounds=(0.0,1.0),
doc="How much the learning is dominated by the activity trace, relative to the current value.")
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="LearningFn that will be applied to each CF individually.")
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
single_cf_fn = self.single_cf_fn
##Initialise traces to zero if they don't already exist
if not hasattr(self,'traces'):
self.traces=zeros(output_activity.shape,activity_type)
for cf,i in iterator():
unit_activity = output_activity.flat[i]
# print "unit activity is",unit_activity
# print "self trace is",self.traces[r,c]
new_trace = (self.trace_strength*unit_activity)+((1-self.trace_strength)*self.traces.flat[i])
# print "and is now",new_trace
self.traces.flat[i] = new_trace
cf.weights += single_connection_learning_rate * new_trace * \
(cf.get_input_matrix(input_activity) - cf.weights)
#CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
class CFPLF_OutstarHebbian(CFPLearningFn):
"""
CFPLearningFunction applying the specified (default is Hebbian)
single_cf_fn to each CF, where normalization is done in an outstar-manner.
Presumably does not need a separate output_fn for normalization.
NOT YET TESTED.
"""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="LearningFn that will be applied to each CF individually.")
outstar_wsum = None
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
outstar_wsum = zeros(input_activity.shape)
for cf,i in iterator():
single_cf_fn(cf.get_input_matrix(input_activity),
output_activity.flat[i], cf.weights, single_connection_learning_rate)
# Outstar normalization
wrows,wcols = cf.weights.shape
for wr in xrange(wrows):
for wc in xrange(wcols):
outstar_wsum[wr][wc] += cf.weights[wr][wc]
# CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
class HomeoSynaptic(CFPLearningFn):
"""
Learning function using homeostatic synaptic scaling from
Sullivan & de Sa, "Homeostatic Synaptic Scaling in Self-Organizing Maps",
Neural Networks (2006), 19(6-7):734-43.
Does not necessarily require output_fn normalization for stability.
"""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="LearningFn that will be applied to each CF individually")
beta_n = param.Number(default=0.01,bounds=(0,None),
doc="homeostatic learning rate")
beta_c = param.Number(default=0.005,bounds=(0,None),
doc="time window over which the neuron's firing rate is averaged")
activity_target = param.Number(default=0.1,bounds=(0,None),
doc="Target average activity")
#debug = param.Boolean(default=False,doc="Print average activity values")
#beta_n = param.Number(default=0.00033,bounds=(0,None),doc="Homeostatic learning rate") #Too small?
#beta_c = param.Number(default=0.000033,bounds=(0,None),doc="Time window over which the neuron's firing rate is averaged")
def __init__(self,**params):
super(HomeoSynaptic,self).__init__(**params)
self.temp_hist = []
self.ave_hist = []
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
"""
Update the value of the given weights matrix based on the
input_activity matrix (of the same size as the weights matrix)
and the response of this unit (the unit_activity), governed by
a per-connection learning rate.
"""
if not hasattr(self,'averages'):
self.averages = ones(output_activity.shape,Float) * 0.1
# normalize initial weights to 1.0
for cf,i in iterator():
current_norm_value = 1.0*Numeric.sum(abs(cf.weights.ravel()))
if current_norm_value != 0:
factor = (1.0/current_norm_value)
cf.weights *= factor
# compute recent average of output activity
self.averages = self.beta_c * output_activity + (1.0-self.beta_c) * self.averages
activity_norm = 1.0 + self.beta_n * \
((self.averages - self.activity_target)/self.activity_target)
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
# avoid evaluating these references each time in the loop
single_cf_fn = self.single_cf_fn
for cf,i in iterator():
single_cf_fn(cf.get_input_matrix(input_activity),
output_activity.flat[i], cf.weights, single_connection_learning_rate)
# homeostatic normalization
cf.weights /= activity_norm.flat[i]
# CEBHACKALERT: see ConnectionField.__init__()
cf.weights *= cf.mask
# For analysis only; can be removed (in which case also remove the initializations above)
# CEBALERT: I changed [0][7] to [0]!
self.ave_hist.append(self.averages.flat[0])
self.temp_hist.append (Numeric.sum(abs(iterator.flatcfs[0].weights.ravel())))
class CFPLF_PluginScaled(CFPLearningFn):
"""
CFPLearningFunction applying the specified single_cf_fn to each CF.
Scales the single-connection learning rate by a scaling factor
that is different for each individual unit. Thus each individual
connection field uses a different learning rate.
"""
single_cf_fn = param.ClassSelector(LearningFn,default=Hebbian(),
doc="Accepts a LearningFn that will be applied to each CF individually.")
learning_rate_scaling_factor = param.Parameter(default=None,
doc="Matrix of scaling factors for scaling the learning rate of each CF individually.")
def __call__(self, iterator, input_activity, output_activity, learning_rate, **params):
"""Apply the specified single_cf_fn to every CF."""
if self.learning_rate_scaling_factor is None:
self.learning_rate_scaling_factor = ones(output_activity.shape)
single_cf_fn = self.single_cf_fn
single_connection_learning_rate = self.constant_sum_connection_rate(iterator.proj_n_units,learning_rate)
for cf,i in iterator():
sc_learning_rate = self.learning_rate_scaling_factor.flat[i] * single_connection_learning_rate
single_cf_fn(cf.get_input_matrix(input_activity),
output_activity.flat[i], cf.weights, sc_learning_rate)
# CEBHACKALERT: see ConnectionField.__init__() re. mask & output fn
cf.weights *= cf.mask
def update_scaling_factor(self, new_scaling_factor):
"""Update the single-connection learning rate scaling factor."""
self.learning_rate_scaling_factor = new_scaling_factor
|
jesuscript/topo-mpi
|
topo/learningfn/projfn.py
|
Python
|
bsd-3-clause
| 12,442
|
[
"NEURON"
] |
a21e549c0715817d7669767f5bceb46745daa64f4e6e0239b89d9cd1c8947ffb
|
from __future__ import print_function, absolute_import, division, unicode_literals
"""Convert to and from Roman numerals"""
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = """Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
Modified by JXP
"""
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) != n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return str(result) # JXP -- Avoid unicode
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
""" ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
|
profxj/xastropy
|
xastropy/outils/roman.py
|
Python
|
bsd-3-clause
| 2,817
|
[
"VisIt"
] |
0b31dda1994fcb9b56d01dab66c08caec66d053cdbbf0890fd39ae303fc4330d
|
#!/usr/bin/env python
##############################################################################################
#
#
# regrid_emissions_N96e.py
#
#
# Requirements:
# Iris 1.10, time, cf_units, numpy
#
#
# This Python script has been written by N.L. Abraham as part of the UKCA Tutorials:
# http://www.ukca.ac.uk/wiki/index.php/UKCA_Chemistry_and_Aerosol_Tutorials_at_vn10.4
#
# Copyright (C) 2015 University of Cambridge
#
# This is free software: you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# It is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You find a copy of the GNU Lesser General Public License at <http://www.gnu.org/licenses/>.
#
# Written by N. Luke Abraham 2016-10-20 <nla27@cam.ac.uk>
# Modified by Marcus Koehler 2018-01-05 <mok21@cam.ac.uk>
#
#
##############################################################################################
# preamble
import time
import iris
import cf_units
import numpy
# --- CHANGE THINGS BELOW THIS LINE TO WORK WITH YOUR FILES ETC. ---
# name of file containing an ENDGame grid, e.g. your model output
# NOTE: all the fields in the file should be on the same horizontal
# grid, as the field used MAY NOT be the first in order of STASH
grid_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/um/archer/ag542/apm.pp/ag542a.pm1988dec'
#
# name of emissions file
# NOTE: We use the fluxes from the Gregorian calendar file also for the 360_day emission files
emissions_file='/group_workspaces/jasmin2/ukca/vol1/mkoehler/emissions/OXBUDS/0.5x0.5/v4/combined_sources_iso-pentane_1950-2020_v4.nc'
# --- BELOW THIS LINE, NOTHING SHOULD NEED TO BE CHANGED ---
species_name='i-C5H12'
# this is the grid we want to regrid to, e.g. N96 ENDGame
grd=iris.load(grid_file)[0]
grd.coord(axis='x').guess_bounds()
grd.coord(axis='y').guess_bounds()
# This is the original data
ems=iris.load_cube(emissions_file)
# make intersection between 0 and 360 longitude to ensure that
# the data is regridded correctly
nems = ems.intersection(longitude=(0, 360))
# make sure that we use the same coordinate system, otherwise regrid won't work
nems.coord(axis='x').coord_system=grd.coord_system()
nems.coord(axis='y').coord_system=grd.coord_system()
# now guess the bounds of the new grid prior to regridding
nems.coord(axis='x').guess_bounds()
nems.coord(axis='y').guess_bounds()
# now regrid
ocube=nems.regrid(grd,iris.analysis.AreaWeighted())
# now add correct attributes and names to netCDF file
ocube.var_name='emissions_'+str.strip(species_name)
ocube.long_name='iso-pentane surface emissions'
ocube.units=cf_units.Unit('kg m-2 s-1')
ocube.attributes['vertical_scaling']='surface'
ocube.attributes['tracer_name']=str.strip(species_name)
# global attributes, so don't set in local_keys
# NOTE: all these should be strings, including the numbers!
# basic emissions type
ocube.attributes['emission_type']='1' # time series
ocube.attributes['update_type']='1' # same as above
ocube.attributes['update_freq_in_hours']='120' # i.e. 5 days
ocube.attributes['um_version']='10.6' # UM version
ocube.attributes['source']='combined_sources_iso-pentane_1950-2020_v4.nc'
ocube.attributes['title']='Time-varying monthly surface emissions of iso-pentane from 1950 to 2020.'
ocube.attributes['File_version']='v4'
ocube.attributes['File_creation_date']=time.ctime(time.time())
ocube.attributes['grid']='regular 1.875 x 1.25 degree longitude-latitude grid (N96e)'
ocube.attributes['history']=time.ctime(time.time())+': '+__file__+' \n'+ocube.attributes['history']
ocube.attributes['institution']='Centre for Atmospheric Science, Department of Chemistry, University of Cambridge, U.K.'
ocube.attributes['reference']='Granier et al., Clim. Change, 2011; Lamarque et al., Atmos. Chem. Phys., 2010; Helmig et al., Atmos. Environ., 2014.'
del ocube.attributes['file_creation_date']
del ocube.attributes['description']
# rename and set time coord - mid-month from 1950-Jan to 2020-Dec
# this bit is annoyingly fiddly
ocube.coord(axis='t').var_name='time'
ocube.coord(axis='t').standard_name='time'
ocube.coords(axis='t')[0].units=cf_units.Unit('days since 1950-01-01 00:00:00', calendar='360_day')
ocube.coord(axis='t').points=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945, 21975, 22005, 22035, 22065, 22095, 22125, 22155, 22185,
22215, 22245, 22275, 22305, 22335, 22365, 22395, 22425, 22455, 22485,
22515, 22545, 22575, 22605, 22635, 22665, 22695, 22725, 22755, 22785,
22815, 22845, 22875, 22905, 22935, 22965, 22995, 23025, 23055, 23085,
23115, 23145, 23175, 23205, 23235, 23265, 23295, 23325, 23355, 23385,
23415, 23445, 23475, 23505, 23535, 23565, 23595, 23625, 23655, 23685,
23715, 23745, 23775, 23805, 23835, 23865, 23895, 23925, 23955, 23985,
24015, 24045, 24075, 24105, 24135, 24165, 24195, 24225, 24255, 24285,
24315, 24345, 24375, 24405, 24435, 24465, 24495, 24525, 24555, 24585,
24615, 24645, 24675, 24705, 24735, 24765, 24795, 24825, 24855, 24885,
24915, 24945, 24975, 25005, 25035, 25065, 25095, 25125, 25155, 25185,
25215, 25245, 25275, 25305, 25335, 25365, 25395, 25425, 25455, 25485,
25515, 25545 ])
# make z-direction.
zdims=iris.coords.DimCoord(numpy.array([0]),standard_name = 'model_level_number',
units='1',attributes={'positive':'up'})
ocube.add_aux_coord(zdims)
ocube=iris.util.new_axis(ocube, zdims)
# now transpose cube to put Z 2nd
ocube.transpose([1,0,2,3])
# make coordinates 64-bit
ocube.coord(axis='x').points=ocube.coord(axis='x').points.astype(dtype='float64')
ocube.coord(axis='y').points=ocube.coord(axis='y').points.astype(dtype='float64')
#ocube.coord(axis='z').points=ocube.coord(axis='z').points.astype(dtype='float64') # integer
ocube.coord(axis='t').points=ocube.coord(axis='t').points.astype(dtype='float64')
# for some reason, longitude_bounds are double, but latitude_bounds are float
ocube.coord('latitude').bounds=ocube.coord('latitude').bounds.astype(dtype='float64')
# add forecast_period & forecast_reference_time
# forecast_reference_time
frt=numpy.array([
15, 45, 75, 105, 135, 165, 195, 225, 255, 285, 315, 345, 375, 405,
435, 465, 495, 525, 555, 585, 615, 645, 675, 705, 735, 765, 795, 825,
855, 885, 915, 945, 975, 1005, 1035, 1065, 1095, 1125, 1155, 1185, 1215,
1245, 1275, 1305, 1335, 1365, 1395, 1425, 1455, 1485, 1515, 1545, 1575,
1605, 1635, 1665, 1695, 1725, 1755, 1785, 1815, 1845, 1875, 1905, 1935,
1965, 1995, 2025, 2055, 2085, 2115, 2145, 2175, 2205, 2235, 2265, 2295,
2325, 2355, 2385, 2415, 2445, 2475, 2505, 2535, 2565, 2595, 2625, 2655,
2685, 2715, 2745, 2775, 2805, 2835, 2865, 2895, 2925, 2955, 2985, 3015,
3045, 3075, 3105, 3135, 3165, 3195, 3225, 3255, 3285, 3315, 3345, 3375,
3405, 3435, 3465, 3495, 3525, 3555, 3585, 3615, 3645, 3675, 3705, 3735,
3765, 3795, 3825, 3855, 3885, 3915, 3945, 3975, 4005, 4035, 4065, 4095,
4125, 4155, 4185, 4215, 4245, 4275, 4305, 4335, 4365, 4395, 4425, 4455,
4485, 4515, 4545, 4575, 4605, 4635, 4665, 4695, 4725, 4755, 4785, 4815,
4845, 4875, 4905, 4935, 4965, 4995, 5025, 5055, 5085, 5115, 5145, 5175,
5205, 5235, 5265, 5295, 5325, 5355, 5385, 5415, 5445, 5475, 5505, 5535,
5565, 5595, 5625, 5655, 5685, 5715, 5745, 5775, 5805, 5835, 5865, 5895,
5925, 5955, 5985, 6015, 6045, 6075, 6105, 6135, 6165, 6195, 6225, 6255,
6285, 6315, 6345, 6375, 6405, 6435, 6465, 6495, 6525, 6555, 6585, 6615,
6645, 6675, 6705, 6735, 6765, 6795, 6825, 6855, 6885, 6915, 6945, 6975,
7005, 7035, 7065, 7095, 7125, 7155, 7185, 7215, 7245, 7275, 7305, 7335,
7365, 7395, 7425, 7455, 7485, 7515, 7545, 7575, 7605, 7635, 7665, 7695,
7725, 7755, 7785, 7815, 7845, 7875, 7905, 7935, 7965, 7995, 8025, 8055,
8085, 8115, 8145, 8175, 8205, 8235, 8265, 8295, 8325, 8355, 8385, 8415,
8445, 8475, 8505, 8535, 8565, 8595, 8625, 8655, 8685, 8715, 8745, 8775,
8805, 8835, 8865, 8895, 8925, 8955, 8985, 9015, 9045, 9075, 9105, 9135,
9165, 9195, 9225, 9255, 9285, 9315, 9345, 9375, 9405, 9435, 9465, 9495,
9525, 9555, 9585, 9615, 9645, 9675, 9705, 9735, 9765, 9795, 9825, 9855,
9885, 9915, 9945, 9975, 10005, 10035, 10065, 10095, 10125, 10155, 10185,
10215, 10245, 10275, 10305, 10335, 10365, 10395, 10425, 10455, 10485,
10515, 10545, 10575, 10605, 10635, 10665, 10695, 10725, 10755, 10785,
10815, 10845, 10875, 10905, 10935, 10965, 10995, 11025, 11055, 11085,
11115, 11145, 11175, 11205, 11235, 11265, 11295, 11325, 11355, 11385,
11415, 11445, 11475, 11505, 11535, 11565, 11595, 11625, 11655, 11685,
11715, 11745, 11775, 11805, 11835, 11865, 11895, 11925, 11955, 11985,
12015, 12045, 12075, 12105, 12135, 12165, 12195, 12225, 12255, 12285,
12315, 12345, 12375, 12405, 12435, 12465, 12495, 12525, 12555, 12585,
12615, 12645, 12675, 12705, 12735, 12765, 12795, 12825, 12855, 12885,
12915, 12945, 12975, 13005, 13035, 13065, 13095, 13125, 13155, 13185,
13215, 13245, 13275, 13305, 13335, 13365, 13395, 13425, 13455, 13485,
13515, 13545, 13575, 13605, 13635, 13665, 13695, 13725, 13755, 13785,
13815, 13845, 13875, 13905, 13935, 13965, 13995, 14025, 14055, 14085,
14115, 14145, 14175, 14205, 14235, 14265, 14295, 14325, 14355, 14385,
14415, 14445, 14475, 14505, 14535, 14565, 14595, 14625, 14655, 14685,
14715, 14745, 14775, 14805, 14835, 14865, 14895, 14925, 14955, 14985,
15015, 15045, 15075, 15105, 15135, 15165, 15195, 15225, 15255, 15285,
15315, 15345, 15375, 15405, 15435, 15465, 15495, 15525, 15555, 15585,
15615, 15645, 15675, 15705, 15735, 15765, 15795, 15825, 15855, 15885,
15915, 15945, 15975, 16005, 16035, 16065, 16095, 16125, 16155, 16185,
16215, 16245, 16275, 16305, 16335, 16365, 16395, 16425, 16455, 16485,
16515, 16545, 16575, 16605, 16635, 16665, 16695, 16725, 16755, 16785,
16815, 16845, 16875, 16905, 16935, 16965, 16995, 17025, 17055, 17085,
17115, 17145, 17175, 17205, 17235, 17265, 17295, 17325, 17355, 17385,
17415, 17445, 17475, 17505, 17535, 17565, 17595, 17625, 17655, 17685,
17715, 17745, 17775, 17805, 17835, 17865, 17895, 17925, 17955, 17985,
18015, 18045, 18075, 18105, 18135, 18165, 18195, 18225, 18255, 18285,
18315, 18345, 18375, 18405, 18435, 18465, 18495, 18525, 18555, 18585,
18615, 18645, 18675, 18705, 18735, 18765, 18795, 18825, 18855, 18885,
18915, 18945, 18975, 19005, 19035, 19065, 19095, 19125, 19155, 19185,
19215, 19245, 19275, 19305, 19335, 19365, 19395, 19425, 19455, 19485,
19515, 19545, 19575, 19605, 19635, 19665, 19695, 19725, 19755, 19785,
19815, 19845, 19875, 19905, 19935, 19965, 19995, 20025, 20055, 20085,
20115, 20145, 20175, 20205, 20235, 20265, 20295, 20325, 20355, 20385,
20415, 20445, 20475, 20505, 20535, 20565, 20595, 20625, 20655, 20685,
20715, 20745, 20775, 20805, 20835, 20865, 20895, 20925, 20955, 20985,
21015, 21045, 21075, 21105, 21135, 21165, 21195, 21225, 21255, 21285,
21315, 21345, 21375, 21405, 21435, 21465, 21495, 21525, 21555, 21585,
21615, 21645, 21675, 21705, 21735, 21765, 21795, 21825, 21855, 21885,
21915, 21945, 21975, 22005, 22035, 22065, 22095, 22125, 22155, 22185,
22215, 22245, 22275, 22305, 22335, 22365, 22395, 22425, 22455, 22485,
22515, 22545, 22575, 22605, 22635, 22665, 22695, 22725, 22755, 22785,
22815, 22845, 22875, 22905, 22935, 22965, 22995, 23025, 23055, 23085,
23115, 23145, 23175, 23205, 23235, 23265, 23295, 23325, 23355, 23385,
23415, 23445, 23475, 23505, 23535, 23565, 23595, 23625, 23655, 23685,
23715, 23745, 23775, 23805, 23835, 23865, 23895, 23925, 23955, 23985,
24015, 24045, 24075, 24105, 24135, 24165, 24195, 24225, 24255, 24285,
24315, 24345, 24375, 24405, 24435, 24465, 24495, 24525, 24555, 24585,
24615, 24645, 24675, 24705, 24735, 24765, 24795, 24825, 24855, 24885,
24915, 24945, 24975, 25005, 25035, 25065, 25095, 25125, 25155, 25185,
25215, 25245, 25275, 25305, 25335, 25365, 25395, 25425, 25455, 25485,
25515, 25545 ], dtype='float64')
frt_dims=iris.coords.AuxCoord(frt,standard_name = 'forecast_reference_time',
units=cf_units.Unit('days since 1950-01-01 00:00:00', calendar='360_day'))
ocube.add_aux_coord(frt_dims,data_dims=0)
ocube.coord('forecast_reference_time').guess_bounds()
# forecast_period
fp=numpy.array([-360],dtype='float64')
fp_dims=iris.coords.AuxCoord(fp,standard_name = 'forecast_period',
units=cf_units.Unit('hours'),bounds=numpy.array([-720,0],dtype='float64'))
ocube.add_aux_coord(fp_dims,data_dims=None)
# add-in cell_methods
ocube.cell_methods = [iris.coords.CellMethod('mean', 'time')]
# set _FillValue
fillval=1e+20
ocube.data = numpy.ma.array(data=ocube.data, fill_value=fillval, dtype='float32')
# output file name, based on species
outpath='ukca_emiss_iC5H12.nc'
# don't want time to be cattable, as is a periodic emissions file
iris.FUTURE.netcdf_no_unlimited=True
# annoying hack to set a missing_value attribute as well as a _FillValue attribute
dict.__setitem__(ocube.attributes, 'missing_value', fillval)
# now write-out to netCDF
saver = iris.fileformats.netcdf.Saver(filename=outpath, netcdf_format='NETCDF3_CLASSIC')
saver.update_global_attributes(Conventions=iris.fileformats.netcdf.CF_CONVENTIONS_VERSION)
saver.write(ocube, local_keys=['vertical_scaling', 'missing_value','um_stash_source','tracer_name'])
# end of script
|
acsis-project/emissions
|
emissions/python/timeseries_1950-2020/regrid_iC5H12_emissions_n96e_360d.py
|
Python
|
gpl-3.0
| 18,872
|
[
"NetCDF"
] |
8b63cdf346b7104badf9e96eb9e012335fa6ff6951a66298c45b02c99636907c
|
#!/usr/bin/env python
#pylint: disable=missing-docstring
#################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
#################################################################
import chigger
reader = chigger.exodus.ExodusReader('../input/mug_blocks_out.e', timestep=0)
mug = chigger.exodus.ExodusResult(reader, variable='diffused', min=1.5)
cbar = chigger.exodus.ExodusColorBar(mug, primary={'precision':2, 'num_ticks':3, 'notation':'fixed'})
window = chigger.RenderWindow(mug, cbar, size=[300,300], test=True)
for i in range(5):
reader.setOptions(timestep=i)
window.write('min_' + str(i) + '.png')
window.start()
|
Chuban/moose
|
python/chigger/tests/range/min.py
|
Python
|
lgpl-2.1
| 1,362
|
[
"MOOSE"
] |
d386c1f76931f9e4971ed8098ce4db5d7f694cc403af52bdb6fbe0d779d54c3a
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from django.views import defaults as default_views
from django.contrib.auth import views as auth_views
from django.views import i18n as django_views_i18n
from imagetrac_docker.b5.views import *
from imagetrac_docker.taskmanager.views import *
js_info_dict = {
'packages': ('django.conf',),
}
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, admin.site.urls),
# User management
url(r'^users/', include('imagetrac_docker.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
# url('^', include('django.contrib.auth.urls')),
# url(r'^accounts/password_change/$', 'django.contrib.auth.views.password_change', {'template_name': 'registration/password_change_form.html'}),
# url(r'^accounts/', include('django.contrib.auth.urls')),
# url(r'^api/v1/products/', include('imagetrac_docker.b5.urls', namespace='b5', app_name='b5')),
# url(r'^$', login_user, name='login_user'),
# url(r'^user/password/reset/$', auth_views.password_reset, {'post_reset_redirect' : '/user/password/reset/done/'},
# name="password_reset"),
# url(r'^user/password/reset/done/$',auth_views.password_reset_done),
# url(r'^user/password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$', auth_views.password_reset_confirm, {'post_reset_redirect' : '/user/password/done/'}),
# url(r'^user/password/done/$', auth_views.password_reset_complete),
url(r'^robots\.txt$', lambda r: HttpResponse("User-agent: *\nDisallow: /", mimetype="text/plain")),
url(r'^main/', postsearch),
url(r'^rumbasearch/', rumbasearch),
url(r'^first/', frsearch),
url(r'^adscreen/', adscreen),
url(r'^pickup/', pickupsearch),
url(r'^success/', success_message),
url(r'^deployed/', record_deployed),
url(r'^buyer-class/', buyer_class),
url(r'^export/unavailable/', export_data, name="export"),
url(r'^export/(.*)', export_data, name="export"),
url(r'^export-daily/(.*)', export_daily, name="export_daily"),
url(r'^export-mods/(.*)', export_mods, name="export_mods"),
url(r'^export-pickup/(.*)', export_pickup, name="export_pickup"),
url(r'^export-required/(.*)', export_required, name="export_required"),
url(r'^export-studiocheck/', export_studiocheck, name="export_studiocheck"),
url(r'^export-hotlist/(.*)', export_hotlist, name="export_hotlist"),
url(r'^import/', import_data, name="import"),
url(r'^import-fr/', import_first, name="import_first"),
url(r'^import-check/', import_check, name="import_check"),
url(r'^adminwatched/(?P<id>\w+)/', display_adminwatchlist, name='display_adminwatchlist'),
url(r'^hotlist/', display_hotlist, name='display_hotlist'),
url(r'^mywatched/(?P<id>\w+)/', display_watchlist, name='display_watchlist'),
url(r'^quickcard/(?P<id>\w+)/', quickcard, name='quickcard'),
url(r'^check_sequence/', check_sequence, name='check_sequence'),
url(r'^docs/adsheet/', docs_adsheet, name='docs_adsheet'),
url(r'^docs/daily/', docs_daily, name='docs_daily'),
url(r'^docs/deploy/', docs_deploy, name='docs_deploy'),
url(r'^docs/importer/', docs_importer, name='docs_importer'),
url(r'^docs/inventory/', docs_inventory, name='docs_inventory'),
url(r'^docs/restore/', docs_restore, name='docs_restore'),
url(r'^docs/system/', docs_system, name='docs_system'),
url(r'^edit_watcheditem/(?P<id>\w+)/(?P<userid>\w+)/', delete_watchlist_item, name='delete_watchlist_item'),
url(r'^edit_adminwatched/(?P<id>\w+)/(?P<userid>\w+)/', delete_adminwatchlist_item, name='delete_adminwatchlist_item'),
url(r'^edit_hotlist/(?P<id>\w+)/', delete_hotlist_item, name='delete_hotlist_item'),
# url(r'^logout/$', auth_views.logout, {'next_page': '/'}),
url(r'^logout/$', auth_views.logout, name='logout_page'),
url(r'^b5/', include('imagetrac_docker.b5.urls', namespace='b5', app_name='b5')),
url(r'^taskmanager/', include('imagetrac_docker.taskmanager.urls', namespace="taskmanager")),
url(r'^utilities/', include('imagetrac_docker.utilities.urls', namespace='utilities', app_name='utilities')),
url(r'^product/(?P<sku>[A-Z0-9- ]+)/(?P<item_no>[A-Z0-9- ]+)', display_record, name="display_record"),
url(r'^edit/', edit_view),
url(r'^819/', inventory_819, name="inventory_819"),
url(r'^studio_check/', studio_check),
url(r'^exportcsv/', export_csv),
# url(r'^grappelli/', include('grappelli.urls')),
url(r'^quicksearch/(?P<searchstring>[A-Za-z0-9 _@./#&+-]+)/', quicksearch, name="quicksearch"),
url(r'^productsearch/(?P<searchstring>[A-Za-z0-9 _@./#&+-]+)/', productsearch, name="productsearch"),
# url(r'^admin_tools/', include('admin_tools.urls')),
url(r'^admin/', include(admin.site.urls)),
# url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
# url(r'^passwords/', include('password_reset.urls')),
url(r'^jsi18n$', django_views_i18n.javascript_catalog, js_info_dict),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\
# + static(settings.UPLOAD_URL, document_root=settings.UPLOAD_ROOT) + static(settings.PRINTMEDIA_URL, document_root=settings.PRINTMEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
arsenalstriker14/imagetraccloud
|
config/urls.py
|
Python
|
mit
| 6,487
|
[
"VisIt"
] |
0878b8d3065a8ca14be00b7ec8594ee761ff4b12a6127d8d6dd6894763b5e829
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: PyZDDEunittest.py
# Purpose: PyZDDE unit test using the python unittest framework
#
# Licence: MIT License
# This file is subject to the terms and conditions of the MIT License.
# For further details, please refer to LICENSE.txt
#-------------------------------------------------------------------------------
from __future__ import division
from __future__ import print_function
import os
import sys
import imp
import unittest
# Put both the "Test" and the "PyZDDE" directory in the python search path.
testdirectory = os.path.dirname(os.path.realpath(__file__))
#ind = testdirectory.find('Test')
pyzddedirectory = os.path.split(testdirectory)[0]
if testdirectory not in sys.path:
sys.path.append(testdirectory)
if pyzddedirectory not in sys.path:
sys.path.append(pyzddedirectory)
import pyzdde.zdde as pyzdde
import pyzdde.zfileutils as zfile
imp.reload(pyzdde) # In order to ensure that the latest changes in the pyzdde module
imp.reload(zfile) # are updated here.
# Flag to enable printing of returned values.
PRINT_RETURNED_VALUES = 1 # if test results are not going to be viewed by
# humans, turn this off.
class TestPyZDDEFunctions(unittest.TestCase):
pRetVar = PRINT_RETURNED_VALUES
def setUp(self):
# Create the DDE channel object
self.ln = pyzdde.PyZDDE()
# Initialize the DDE
# The DDE initialization has be done here, and so cannot be tested
# otherwise, as no zDDExxx functions can be carried before initialization.
status = self.ln.zDDEInit()
# if TestPyZDDEFunctions.pRetVar:
# print("Status for link 0:", status)
self.assertEqual(status,0)
if status:
print("Couldn't initialize DDE.")
# Make sure to reset the lens
ret = self.ln.zNewLens()
assert ret==0
def tearDown(self):
# Tear down unit test
#self.ln.zNewLens()
if self.ln._connection:
self.ln.zDDEClose()
#print("Tearing Down")
else:
print("Server was already terminated")
@unittest.skip("Init is now being called in the setup")
def test_zDDEInit(self):
# Test initialization
print("\nTEST: zDDEInit()")
status = self.ln.zDDEInit()
print("Status for link 0:", status)
self.assertEqual(status,0)
if status ==0:
TestPyZDDEFunctions.b_zDDEInit_test_done = True
else:
print("Couldn't initialize DDE.")
@unittest.skip("Being called in the teardown")
def test_zDDEClose(self):
print("\nTEST: zDDEClose()")
self.ln.zDDEClose()
@unittest.skip("To implement test")
def test_zCloseUDOData(self):
print("\nTEST: zCloseUDOData()")
pass
@unittest.skip("To implement test")
def test_zDeleteMFO(self):
print("\nTEST: zDeleteMFO()")
pass
@unittest.skip("To implement test")
def test_zDeleteObject(self):
print("\nTEST: zDeleteObject()")
pass
def test_zDeleteConfig(self):
print("\nTEST: zDeleteConfig()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
currConfig = self.ln.zGetConfig()
# Since no configuration is initally present, it should return (1,1,1)
self.assertTupleEqual(currConfig,(1,1,1))
# Insert a config
self.ln.zInsertConfig(currConfig[1]+1)
# Assert if the number of configurations didn't increase, however the
# current configuration shouldn't change, and the number of multiple
# configurations must remain same.
newConfig = self.ln.zGetConfig()
self.assertTupleEqual(newConfig,(currConfig[0],currConfig[1]+1,currConfig[2]))
# Now, finally, call zDeleteConfig() to switch configuration
configNum = self.ln.zDeleteConfig(2)
self.assertEqual(configNum,2)
newConfig = self.ln.zGetConfig()
self.assertTupleEqual(newConfig,currConfig)
if TestPyZDDEFunctions.pRetVar:
print('zDeleteConfig test successful')
def test_zDeleteMCO(self):
print("\nTEST: zDeleteMCO()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
# Get the current number of configurations (columns and rows)
currConfig = self.ln.zGetConfig()
self.assertTupleEqual(currConfig,(1,1,1))
# Insert a operand (row)
newOperNumber = self.ln.zInsertMCO(2)
self.assertEqual(newOperNumber,2)
newConfig = self.ln.zGetConfig()
self.assertTupleEqual(newConfig,(currConfig[0],currConfig[1],currConfig[2]+1))
# Finally delete an MCO
newOperNumber = self.ln.zDeleteMCO(2)
self.assertEqual(newOperNumber,1)
if TestPyZDDEFunctions.pRetVar:
print('zDeleteMCO test successful')
@unittest.skip("To implement test")
def test_zDeleteSurface(self):
print("\nTEST: zDeleteSurface()")
pass
@unittest.skip("To implement")
def test_zExecuteZPLMacro(self):
print("\nTEST: zExecuteZPLMacro()")
@unittest.skip("To implement test")
def test_zExportCAD(self):
print("\nTEST: zExportCAD()")
pass
@unittest.skip("To implement test")
def test_zExportCheck(self):
print("\nTEST: zExportCheck()")
pass
@unittest.skip("To implement")
def test_zFindLabel(self):
print("\nTEST: zFindLabel()")
pass
@unittest.skip("To implement")
def test_zGetAddress(self):
print("\nTEST: zGetAddress()")
pass
def test_zGetAngularMagnification(self):
print("\nTEST: zGetAngularMagnification()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
aMag =self.ln.zGetAngularMagnification()
self.assertAlmostEqual(0.977161033, aMag, places=4)
if TestPyZDDEFunctions.pRetVar:
print('zGetAngularMagnification test successful')
@unittest.skip("To implement")
def test_zGetAperture(self):
print("\nTEST: zGetAperture()")
pass
@unittest.skip("To implement")
def test_zGetApodization(self):
print("\nTEST: zGetApodization()")
pass
@unittest.skip("To implement")
def test_zGetAspect(self):
print("\nTEST: zGetAspect()")
pass
@unittest.skip("To implement")
def test_zGetBuffer(self):
print("\nTEST: zGetBuffer()")
pass
@unittest.skip("Not important")
def test_zGetComment(self):
print("\nTEST: zGetComment()")
pass
def test_zGetConfig(self):
print("\nTEST: zGetConfig()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
currConfig = self.ln.zGetConfig()
#Since no configuration is initally present, it should return (1,1,1)
self.assertTupleEqual(currConfig,(1,1,1))
#Insert a config
self.ln.zInsertConfig(currConfig[1]+1)
#Assert if the number of configurations didn't increase, however the
#current configuration shouldn't change, and the number of multiple
#configurations must remain same.
newConfig = self.ln.zGetConfig()
self.assertTupleEqual(newConfig,(currConfig[0],currConfig[1]+1,currConfig[2]))
if TestPyZDDEFunctions.pRetVar:
print("CONFIG: ", newConfig)
print('zGetConfig test successful')
def test_zGetDate(self):
print("\nTEST: zGetDate()")
date = self.ln.zGetDate().rstrip()
self.assertIsInstance(date,str)
if TestPyZDDEFunctions.pRetVar:
print("DATE: ", date)
print('zGetDate test successful')
@unittest.skip("To implement")
def test_zGetExtra(self):
print("\nTEST: zGetExtra()")
def test_zGetField(self):
print("\nTEST: zGetField()")
# First set some valid field parameters in the ZEMAX DDE server
# set field with 4 arguments, n=0, 3 field points
fieldData = self.ln.zSetField(0,0,3,1)
# Set the first, second and third field point
fieldData = self.ln.zSetField(1,0,0)
fieldData = self.ln.zSetField(2,0,5,2.0,0.5,0.5,0.5,0.5,0.5)
fieldData = self.ln.zSetField(3,0,10,1.0,0.0,0.0,0.0)
# Now, verify the set parameters using zGetField()
fieldData = self.ln.zGetField(0)
self.assertTupleEqual((0,3),(fieldData[0],fieldData[1]))
fieldData = self.ln.zGetField(1)
self.assertTupleEqual(fieldData,(0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
fieldData = self.ln.zGetField(2)
self.assertTupleEqual(fieldData,(0.0, 5.0, 2.0, 0.5, 0.5, 0.5, 0.5, 0.5))
fieldData = self.ln.zGetField(3)
self.assertTupleEqual(fieldData,(0.0, 10.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
if TestPyZDDEFunctions.pRetVar:
print('zGetField test successful')
def test_zGetFieldTuple(self):
print("\nTEST: zGetFieldTuple()")
# First set the field using setField tuple data in the ZEMAX server
iFieldDataTuple = ((0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0), # field1: xf=0.0,yf=0.0,wgt=1.0,
# vdx=vdy=vcx=vcy=van=0.0
(0.0,5.0,1.0), # field2: xf=0.0,yf=5.0,wgt=1.0
(0.0,10.0)) # field3: xf=0.0,yf=10.0
# Set the field data, such that fieldType is angle with rectangular normalization
self.ln.zSetFieldTuple(0,1,iFieldDataTuple)
# Now get the field data by callling zGetFieldTuple
oFieldDataTuple_g = self.ln.zGetFieldTuple()
if TestPyZDDEFunctions.pRetVar:
for i in range(len(iFieldDataTuple)):
print("oFieldDataTuple_g, field {i} : {t}".format(i=i,
t=oFieldDataTuple_g[i]))
# Verify
for i in range(len(iFieldDataTuple)):
self.assertEqual(oFieldDataTuple_g[i][:len(iFieldDataTuple[i])],
iFieldDataTuple[i])
if TestPyZDDEFunctions.pRetVar:
print('zGetFieldTuple test successful')
def test_zGetFile(self):
print("\nTEST: zGetFile()")
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
reply = self.ln.zGetFile()
self.assertEqual(reply,filename)
if TestPyZDDEFunctions.pRetVar:
print("zGetFile return value: {}".format(reply))
print('zGetFile test successful')
def test_zGetFirst(self):
print("\nTEST: zGetFirst()")
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
(focal,pwfn,rwfn,pima,pmag) = self.ln.zGetFirst()
# Just going to check the validity of the returned data type
self.assertIsInstance(focal,float)
self.assertIsInstance(pwfn,float)
self.assertIsInstance(rwfn,float)
self.assertIsInstance(pima,float)
if TestPyZDDEFunctions.pRetVar:
print(("zGetFirst ret: {:.4f},{:.4f},{:.4f},{:.4f},{:.4f}"
.format(focal,pwfn,rwfn,pima,pmag)))
print('zGetFirst test successful')
@unittest.skip("To implement")
def test_zGetGlass(self):
print("\nTEST: zGetGlass()")
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
glass = self.ln.zGetGlass(0)
self.assertEqual(glass, None)
glass = self.ln.zGetGlass(1)
self.assertEqual(len(glass),3)
self.assertEqual(glass[0],'SK16')
#make the first surface gradient, it should return None
self.ln.zSetSurfaceData(1,4,'GRINSUR1')
self.assertEqual(glass, None)
def test_zGetImageSpaceNA(self):
print("\nTEST: zGetImageSpaceNA()")
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
isna = self.ln.zGetImageSpaceNA()
self.assertAlmostEqual(isna, 0.09950372, places=5,
msg='Expected ISNA to be 0.09950372')
if TestPyZDDEFunctions.pRetVar:
print("zGetImageSpaceNA test successful")
def test_zGetPOP(self):
print("\nTest: zGetPOP()")
def check_popinfo(pidata, length, sfile=None):
"""Helper function to validate popinfo data"""
self.assertIsInstance(pidata, tuple,
"Expecting popinfo as a tuple")
self.assertEqual(len(pidata), length,
"Expecting {} elements in popinfo tuple".format(length))
# validate the actual data in the pop info
if sfile == 'default':
expPidata = (4, 10078.0, 1.0, 1.0, 0.999955, 0.999955, 0.0079605,
0.0079605, 0.00060786, 0.19908, 256, 256, 0.3201024,
0.3201024)
if sfile == 'nofibint':
expPidata = (4, 10078.0, 1.0, None, None, None, 0.0079605,
0.0079605, 0.00060786, 0.19908, 256, 256, 0.3201024,
0.3201024)
if sfile == 'nzstbirr':
expPidata = (4, 10058.0, 1.0, 1.0, 0.985784, 0.985784, 0.0079602,
0.0077483, -0.044419, 0.18861, 256, 256, 0.320384,
0.320384)
if sfile == 'nzstbpha':
expPidata = (4, -0.2339, None, 1.0, 0.985784, 0.985784, 0.0079602,
0.0077483, -0.044419, 0.18861, 256, 256, 0.320384,
0.320384)
if sfile: # perform test iff there is an sfile
for x, y in zip(pidata, expPidata):
self.assertAlmostEqual(x, y, places=5)
def check_data(data, dim, dtype=None, sfile=None):
"""Helper function to validate data"""
self.assertIsInstance(data, list, "Expecting data as a list")
self.assertEqual(len(data), dim[0],
"Expecting the first dimension of popinfo to be {}".format(dim[0]))
self.assertEqual(len(data[0]), dim[1],
"Expecting the second dimension of popinfo to be {}".format(dim[1]))
# first file (default settings)
filename, sfilename = get_test_file('pop', settings=True, sfile='default')
ret = self.ln.zLoadFile(filename)
assert ret == 0
#print("Lens file: {}\nSettings file: {}".format(filename, sfilename))
# zGetPOP() without any arguments ... it is not possible to test
# most other parameters without settings file. This is because any other
# .CFG settings file can influence the anlysis
popinfo = self.ln.zGetPOP()
self.assertEqual(len(popinfo), 14, 'Expected 14 fields')
# zGetPOP() with settings file
popinfo = self.ln.zGetPOP(settingsFile=sfilename)
check_popinfo(popinfo, 14, sfile='default')
# second file (no fiber coupling integral)
filename, sfilename = get_test_file('pop', settings=True, sfile='nofibint')
ret = self.ln.zLoadFile(filename)
assert ret == 0
#print("Lens file: {}\nSettings file: {}".format(filename, sfilename))
# zGetPOP() with the settingfile with no fiber coupling integral
popinfo, data = self.ln.zGetPOP(settingsFile=sfilename, displayData=True)
check_popinfo(popinfo, 14, sfile='nofibint')
check_data(data, (256, 256))
# third file (irradiance data)
filename, sfilename = get_test_file('pop', settings=True, sfile='nzstbirr')
ret = self.ln.zLoadFile(filename)
assert ret == 0
#print("Lens file: {}\nSettings file: {}".format(filename, sfilename))
# zGetPOP() with settings to irradiance data with non-zero surf to beam
# value
popinfo, data = self.ln.zGetPOP(settingsFile=sfilename, displayData=True)
check_popinfo(popinfo, 14, sfile='nzstbirr')
check_data(data, (256, 256))
# fourth file (phase data)
filename, sfilename = get_test_file('pop', settings=True, sfile='nzstbpha')
ret = self.ln.zLoadFile(filename)
assert ret == 0
#print("Lens file: {}\nSettings file: {}".format(filename, sfilename))
# zGetPOP() with settings to phase data with non-zero surf to beam value
popinfo, data = self.ln.zGetPOP(settingsFile=sfilename, displayData=True)
check_popinfo(popinfo, 14, sfile='nzstbpha')
check_data(data, (256, 256))
if TestPyZDDEFunctions.pRetVar:
print('zGetPop test successful')
def test_zSetPOPSettings(self):
print("\nTEST: zSetPOPSettings()")
filename = get_test_file('pop')
ret = self.ln.zLoadFile(filename)
assert ret == 0
# Set POP settings, without specifying a settings file name.
srcParam = ((1, 2, 7, 8), (2, 2, 0, 0)) # x/y waist = 2mm, TEM00
fibParam = ((1, 2, 7, 8), (0.008, 0.008, 0, 0)) # x/y waist = 0.008 mm, TEM00
sfilename = self.ln.zSetPOPSettings(data=0, startSurf=1, endSurf=4,
field=1, wave=1, beamType=0,
paramN=srcParam, tPow=1, sampx=4,
sampy=4, widex=40, widey=40,
fibComp=1, fibType=0, fparamN=fibParam)
exception = None
try:
self.assertTrue(checkFileExist(sfilename),
"Expected function to create settings file")
dirname, fn = os.path.split(filename)
sdirname, sfn = os.path.split(sfilename)
self.assertEqual(dirname, sdirname,
"Expected settings file to be in same dir as lens file")
self.assertTrue(sfn.endswith('_pyzdde_POP.CFG'),
"Expected file name to end with '_pyzdde_POP.CFG'")
# Get POP info with the above settings
popinfo = self.ln.zGetPOP(sfilename)
self.assertEqual(popinfo.surf, 4, 'Expected surf to be 4')
self.assertEqual(popinfo.totPow, 1.0, 'Expected tot power 1.0')
self.assertIsNot(popinfo.fibEffSys, None, 'Expected non-None')
self.assertEqual(popinfo.gridX, 256, 'Expected grid x be 256')
# Change to phase data, with few different settings but with the
# same settings file name
sfilename_new = self.ln.zSetPOPSettings(data=1,
settingsFile=sfilename, startSurf=1,
endSurf=4, field=1, wave=1, beamType=0,
paramN=srcParam, pIrr=1, sampx=3, sampy=3,
widex=40, widey=40, fibComp=0, fibType=0,
fparamN=fibParam)
self.assertEqual(sfilename, sfilename_new, 'Expecting same filenames')
# Get POP info with the above settings
popinfo = self.ln.zGetPOP(sfilename_new)
self.assertEqual(popinfo.blank, None, 'Expected None for blank phase field')
self.assertEqual(popinfo.fibEffSys, None, 'Expected None for no fiber integral')
self.assertEqual(popinfo.gridX, 128, 'Expected grid x be 128')
except Exception as exception:
pass # nothing to do here, raise it after cleaning up
finally:
# It is important to delete these settings files after the test. If not
# deleted, they WILL interfere with the ohter POP tests
deleteFile(sfilename)
if exception:
raise exception
if TestPyZDDEFunctions.pRetVar:
print('zSetPOPSettings test successful')
def test_zModifyPOPSettings(self):
print("\nTEST: zModifyPOPSettings()")
filename = get_test_file('pop')
ret = self.ln.zLoadFile(filename)
assert ret == 0
# Set POP settings, without specifying a settings file name.
srcParam = ((1, 2, 7, 8), (2, 2, 0, 0)) # x/y waist = 2mm, TEM00
fibParam = ((1, 2, 7, 8), (0.008, 0.008, 0, 0)) # x/y waist = 0.008 mm, TEM00
sfilename = self.ln.zSetPOPSettings(data=0, startSurf=1, endSurf=4,
field=1, wave=1, beamType=0,
paramN=srcParam, tPow=1, sampx=4,
sampy=4, widex=40, widey=40,
fibComp=1, fibType=0, fparamN=fibParam)
exception = None
try:
# Get POP info with the above settings
popinfo = self.ln.zGetPOP(sfilename)
assert popinfo.gridX == 256, 'Expected grid x be 256' #
# Change settings using zModifyPOPSettings
errCode = self.ln.zModifyPOPSettings(settingsFile=sfilename,
endSurf=1, sampx=2, sampy=2,
paramN=((1, 2),(1, 2)), tPow=2)
self.assertIsInstance(errCode, tuple)
self.assertTupleEqual(errCode, (0, (0, 0), 0, 0, 0))
# Get POP info with the above settings
popinfo = self.ln.zGetPOP(sfilename)
print(popinfo)
self.assertEqual(popinfo.totPow, 2.0, 'Expected tot pow 2.0')
self.assertEqual(popinfo.gridX, 64, 'Expected grid x be 64')
except Exception as exception:
pass # nothing to do here, raise it after cleaning up
finally:
# It is important to delete these settings files after the test. If not
# deleted, they WILL interfere with the ohter POP tests
deleteFile(sfilename)
if exception:
raise exception
if TestPyZDDEFunctions.pRetVar:
print('zModifyPOPSettings test successful')
def test_zGlobalMatrix(self):
print("\nTEST: zGetGlobalMatrix()")
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
gmd = self.ln.zGetGlobalMatrix(2)
expGmd = (1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 3.25895583)
for x, y in zip(gmd, expGmd):
self.assertAlmostEqual(x, y, places=4)
if TestPyZDDEFunctions.pRetVar:
print("Global Matrix:", gmd)
print('zGetGlobalMatrix test successful')
def test_zGetIndex(self):
print("\nTEST: zGetIndex()")
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
index = self.ln.zGetIndex(1)
self.assertIsInstance(index, tuple)
expIndex = (1.628139266, 1.622607688, 1.617521122)
for i, j in zip(index, expIndex):
self.assertAlmostEqual(i, j, places=5)
if TestPyZDDEFunctions.pRetVar:
print('zGetIndex Test successful')
@unittest.skip("To implement")
def test_zGetLabel(self):
print("\nTEST: zGetLabel()")
pass
@unittest.skip("To implement")
def test_zGetMetaFile(self):
print("\nTEST: zGetMetaFile()")
pass
## @unittest.skip("To implement test")
## def test_zGetMode(self):
## print("\nTEST: zGetMode()")
## pass
def test_zGetMulticon(self):
print("***\nTEST: zGetMulticon()***")
print("Lens", self.ln.zGetFile())
# Test zGetMulticon return when the MCE is "empty" (it should return None)
multiConData = self.ln.zGetMulticon(2, 3) # configuration 2, row 3 (both are fictitious)
self.assertEqual(multiConData, None)
# insert an additional configuration (column)
self.ln.zInsertConfig(1)
# insert an additional operand (row)
self.ln.zInsertMCO(2)
# Set the row operands (both to thickness, of surfaces 2, and 4 respectively)
multiConData = self.ln.zSetMulticon(0,1,'THIC',2,0,0)
multiConData = self.ln.zSetMulticon(0,2,'THIC',4,0,0)
# Set configuration 1
multiConData = self.ln.zSetMulticon(1,1,6.0076,0,1,1,1.0,0.0)
multiConData = self.ln.zSetMulticon(1,2,4.7504,0,1,1,1.0,0.0)
# Set configuration 2
multiConData = self.ln.zSetMulticon(2,1,7.0000,0,1,1,1.0,0.0)
multiConData = self.ln.zSetMulticon(2,2,5.0000,0,1,1,1.0,0.0)
# use zGetMulticon() to verify the set values
multiConData = self.ln.zGetMulticon(1,1) # row 1, config 1
self.assertTupleEqual(multiConData,(6.0076, 2, 2, 0, 1, 1, 1.0, 0.0))
multiConData = self.ln.zGetMulticon(2,1) # row 1, config 2
self.assertTupleEqual(multiConData,(7.0, 2, 2, 0, 1, 1, 1.0, 0.0))
multiConData = self.ln.zGetMulticon(1,2) # row 2, config 1
self.assertTupleEqual(multiConData,(4.7504, 2, 2, 0, 1, 1, 1.0, 0.0))
multiConData = self.ln.zGetMulticon(2,2) # row 2, config 2
self.assertTupleEqual(multiConData,(5.0, 2, 2, 0, 1, 1, 1.0, 0.0))
if TestPyZDDEFunctions.pRetVar:
print('zGetMulticon test successful')
@unittest.skip("To implement test")
def test_zGetName(self):
print("\nTEST: zGetName()")
filename = get_test_file()
self.ln.zLoadFile(filename)
reply = self.ln.zGetName()
self.assertEqual(reply,"A SIMPLE COOKE TRIPLET.")
@unittest.skip("To implement test")
def test_zGetNSCData(self):
print("\nTEST: zGetNSCData()")
pass
@unittest.skip("To implement test")
def test_zGetNSCMatrix(self):
print("\nTEST: zGetNSCMatrix()")
pass
@unittest.skip("To implement test")
def test_zGetNSCObjectData(self):
print("\nTEST: zGetNSCObjectData()")
pass
@unittest.skip("To implement test")
def test_zGetNSCObjectFaceData(self):
print("\nTEST: zGetNSCObjectFaceData()")
pass
@unittest.skip("To implement test")
def test_zGetNSCParameter(self):
print("\nTEST: zGetNSCParameter()")
pass
@unittest.skip("To implement test")
def test_zGetNSCPosition(self):
print("\nTEST: zGetNSCPosition()")
pass
@unittest.skip("To implement test")
def test_zGetNSCProperty(self):
print("\nTEST: zGetNSCProperty()")
pass
@unittest.skip("To implement test")
def test_zGetNSCSetting(self):
print("\nTEST: zGetNSCSetting()")
pass
@unittest.skip("To implement test")
def test_zGetNSCSolve(self):
print("\nTEST: zGetNSCSolve()")
pass
@unittest.skip("To implement test")
def test_zGetOperand(self):
print("\nTEST: zGetOperand()")
pass
def test_zGetPath(self):
print("\nTEST: zGetPath()")
(p2DataFol,p2DefaultFol) = self.ln.zGetPath()
self.assertTrue(os.path.isabs(p2DataFol))
self.assertTrue(os.path.isabs(p2DefaultFol))
if TestPyZDDEFunctions.pRetVar:
print('zGetPath test successful')
def test_zGetPolState(self):
print("\nTEST: zGetPolState()")
# Set polarization of the "new" lens
self.ln.zSetPolState(0,0.5,0.5,10.0,10.0)
polStateData = self.ln.zGetPolState()
self.assertTupleEqual(polStateData,(0,0.5,0.5,10.0,10.0))
if TestPyZDDEFunctions.pRetVar:
print('zGetPolState test successful')
def test_zGetPolTrace(self):
print("\nTEST: zGetPolTrace()")
# Load a lens file into the LDE
filename = get_test_file()
self.ln.zLoadFile(filename)
# Set up the data
waveNum, mode, surf = 1, 0, -1
hx, hy, px, py = 0.0, 0.5, 0.0, 1.0
Ex, Ey, Phax, Phay = 0.7071067, 0.7071067, 0, 0
rayTraceArg = (waveNum, mode, surf, hx, hy, px, py, Ex, Ey, Phax, Phay)
expRayTraceData = (0, 0.94884799, -0.26984638, -0.27515251, 0.02023320,
0.63034490, 0.63278178, -0.04653128)
# test returned tuple
rayTraceData = self.ln.zGetPolTrace(*rayTraceArg)
for i,d in enumerate(expRayTraceData):
self.assertAlmostEqual(rayTraceData[i], d, places=6)
if TestPyZDDEFunctions.pRetVar:
print('zGetPolTrace test successful')
@unittest.skip("To implement test")
def test_zGetPolTraceDirect(self):
print("\nTEST: zGetPolTraceDirect()")
pass
def test_zGetPupil(self):
print("\nTEST: zGetPupil()")
# Load a lens to the ZEMAX DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
# Get the pupil data
pupilData = self.ln.zGetPupil()
expPupilData = (0, 10.0, 10.0, 11.51215705, 10.23372788, -50.96133853, 0, 0.0)
for i,d in enumerate(expPupilData):
self.assertAlmostEqual(pupilData[i],d,places=4)
# Print the pupil data if switch is on.
if TestPyZDDEFunctions.pRetVar:
pupil_data = dict(zip((0,1,2,3,4,5,6,7),('type','value','ENPD','ENPP',
'EXPD','EXPP','apodization_type','apodization_factor')))
pupil_type = dict(zip((0,1,2,3,4,5),
('entrance pupil diameter','image space F/#','object space NA',
'float by stop','paraxial working F/#','object cone angle')))
pupil_value_type = dict(zip((0,1),("stop surface semi-diameter",
"system aperture")))
apodization_type = dict(zip((0,1,2),('none','Gaussian','Tangential')))
# Print the pupil data
print("Pupil data:")
print("{pT} : {pD}".format(pT=pupil_data[0],pD=pupil_type[pupilData[0]]))
print("{pT} : {pD} {pV}".format(pT = pupil_data[1], pD=pupilData[1],
pV = (pupil_value_type[0]
if pupilData[0]==3 else
pupil_value_type[1])))
for i in range(2,6):
print("{pd} : {pD:2.4f}".format(pd=pupil_data[i],pD=pupilData[i]))
print("{pd} : {pD}".format(pd=pupil_data[6],pD=apodization_type[pupilData[6]]))
print("{pd} : {pD:2.4f}".format(pd=pupil_data[7],pD=pupilData[7]))
if TestPyZDDEFunctions.pRetVar:
print('zGetPupil test successful')
@unittest.skip("To implement")
def test_zGetRefresh(self):
print("\nTEST: zGetRefresh()")
# Load & then push a lens file into the LDE
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
ret = self.ln.zPushLens(1)
# Copy the lens data from the LDE into the stored copy of the ZEMAX server.
ret = self.ln.zGetRefresh()
self.assertIn(ret,(-998, -1, 0))
if ret == -1:
print("MSG: ZEMAX couldn't copy the lens data to the LDE")
if ret == -998:
print("MSG: zGetRefresh() function timed out")
# Push new lens to the LDE
self.ln.zNewLens()
self.ln.zPushLens(1)
if TestPyZDDEFunctions.pRetVar:
print("zGetRefresh return value", ret)
print('zGetRefresh test successful')
@unittest.skip("To implement")
def test_zGetSag(self):
print("\nTEST: zGetSag()")
# Load a lens file
@unittest.skip("To implement")
def test_zGetSequence(self):
print("\nTEST: zGetSequence()")
# Load a lens file
def test_zGetSerial(self):
print("\nTEST: zGetSerial()")
ser = self.ln.zGetSerial()
self.assertIsInstance(ser,int)
if TestPyZDDEFunctions.pRetVar:
print("SERIAL:", ser)
@unittest.skip("To implement")
def test_zGetSettingsData(self):
print("\nTEST: zGetSettingsData()")
def test_zGetSolve(self):
print("\nTEST: zGetSolve()")
filename = get_test_file()
self.ln.zLoadFile(filename)
# set a solve on the curvature (0) of surface number 6 such that the
# Marginal Ray angle (2) value is 0.1.
solveData_set = self.ln.zSetSolve(6, 0, *(2, 0.1))
solveData_get = self.ln.zGetSolve(6, 0)
self.assertTupleEqual(solveData_set, solveData_get)
if TestPyZDDEFunctions.pRetVar:
print('zGetSolve test successful')
def test_zGetSurfaceData(self):
print("\nTEST: zGetSurfaceData()")
# Load a lens file
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
surfName = self.ln.zGetSurfaceData(1,0) # Surface name
self.assertEqual(surfName,'STANDARD')
radius = 1.0/self.ln.zGetSurfaceData(1,2) # curvature
self.assertAlmostEqual(radius,22.01359,places=3)
thick = self.ln.zGetSurfaceData(1,3) # thickness
self.assertAlmostEqual(thick,3.25895583,places=3)
if TestPyZDDEFunctions.pRetVar:
print("surfName :", surfName)
print("radius :", radius)
print("thickness: ", thick)
print('zGetSurfaceData test successful')
#TODO: call zGetSurfaceData() with 3 arguments
@unittest.skip("To implement")
def test_zGetSurfaceDLL(self):
print("\nTEST: zGetSurfaceDLL()")
# Load a lens file
@unittest.skip("To implement")
def test_zGetSurfaceParameter(self):
print("\nTEST: zGetSurfaceParameter()")
# Load a lens file
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
surfParam1 = self.ln.zGetSurfaceParameter(1,1)
print("Surface name: ", surfParam1)
surfParam3 = self.ln.zGetSurfaceParameter(1,3)
print("Radius: ", surfParam3)
#TODO!!! not complete
def test_zGetSystem(self):
print("\nTEST: zGetSystem()")
#Setup the arguments to set a specific system first
unitCode,stopSurf,rayAimingType = 0,4,0 # mm, 4th,off
useEnvData,temp,pressure,globalRefSurf = 0,20,1,1 # off, 20C,1ATM,ref=1st surf
setSystemArg = (unitCode,stopSurf,rayAimingType,useEnvData,
temp,pressure,globalRefSurf)
expSystemData = (2, 0, 2, 0, 0, 0, 20.0, 1, 1, 0)
recSystemData_s = self.ln.zSetSystem(*setSystemArg)
# Now get the system data using zGetSystem(), the returned structure
# should be same as that returned by zSetSystem()
recSystemData_g = self.ln.zGetSystem()
self.assertTupleEqual(recSystemData_s,recSystemData_g)
if TestPyZDDEFunctions.pRetVar:
systemDataPar = ('numberOfSurfaces','lens unit code',
'stop surface-number','non axial flag',
'ray aiming type','adjust index','current temperature',
'pressure','global surface reference') #'need_save' Deprecated.
print("System data:")
for i,elem in enumerate(systemDataPar):
print("{el} : {sd}".format(el=elem,sd=recSystemData_g[i]))
if TestPyZDDEFunctions.pRetVar:
print('zGetSystem test successful')
def test_zGetSystemApr(self):
print("\nTEST: zGetSystemApr()")
# First set the system aperture to known parameters in the ZEMAX server
systemAperData_s = self.ln.zSetSystemAper(0,1,25.5) #sysAper=25mm,EPD
systemAperData_g = self.ln.zGetSystemAper()
self.assertTupleEqual(systemAperData_s,systemAperData_g)
if TestPyZDDEFunctions.pRetVar:
print('zGetSystemApr test successful')
def test_zGetSystemProperty(self):
print("\nTEST: zGetSystemProperty():")
# Set Aperture type as EPD
sysPropData_s = self.ln.zSetSystemProperty(10,0)
sysPropData_g = self.ln.zGetSystemProperty(10)
self.assertEqual(sysPropData_s,sysPropData_g)
# Let lens title
sysPropData_s = self.ln.zSetSystemProperty(16,"My Lens")
sysPropData_g = self.ln.zGetSystemProperty(16)
self.assertEqual(sysPropData_s,sysPropData_g)
# Set glass catalog
sysPropData_s = self.ln.zSetSystemProperty(23,"SCHOTT HOYA OHARA")
sysPropData_g = self.ln.zGetSystemProperty(23)
self.assertEqual(sysPropData_s,sysPropData_g)
if TestPyZDDEFunctions.pRetVar:
print('zGetSystemProperty test successful')
def test_zGetTextFile(self):
print("\nTEST: zGetTextFile()")
# Load a lens file into the DDE Server (Not required to Push lens)
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
# create text files
spotDiagFileName = 'SpotDiagram.txt' # Change appropriately
abberSCFileName = 'SeidelCoefficients.txt' # Change appropriately
# Request to dump prescription file, without giving fullpath name. It
# should return -1
preFileName = 'Prescription_unitTest_00.txt'
ret = self.ln.zGetTextFile(preFileName,'Pre',"None",0)
self.assertEqual(ret,-1)
# filename path is absolute, however, doesn't have extension
textFileName = testdirectory + '\\' + os.path.splitext(preFileName)[0]
ret = self.ln.zGetTextFile(textFileName,'Pre',"None",0)
self.assertEqual(ret,-1)
# Request to dump prescription file, without providing a valid settings file
# and flag = 0 ... so that the default settings will be used for the text
# Create filename with full path
textFileName = testdirectory + '\\' + preFileName
ret = self.ln.zGetTextFile(textFileName,'Pre',"None",0)
self.assertIn(ret,(0,-1,-998)) #ensure that the ret is any valid return
if ret == -1:
print("MSG: zGetTextFile failed")
if ret == -998:
print("MSG: zGetTextFile() function timed out")
if TestPyZDDEFunctions.pRetVar:
print("zGetTextFile return value", ret)
# Request zemax to dump prescription file, with a settings
ret = self.ln.zGetRefresh()
settingsFileName = "Cooke_40_degree_field_PreSettings_OnlyCardinals.CFG"
preFileName = 'Prescription_unitTest_01.txt'
textFileName = testdirectory + '\\' + preFileName
ret = self.ln.zGetTextFile(textFileName,'Pre',settingsFileName,1)
self.assertIn(ret,(0,-1,-998)) #ensure that the ret is any valid return
if ret == -1:
print("MSG: zGetText failed")
if ret == -998:
print("MSG: zGetText() function timed out")
if TestPyZDDEFunctions.pRetVar:
print("zGetText return value", ret)
print('zGetText test successful')
#TODO!!!
# unit test for (purposeful) fail cases....
# clean-up the dumped text files.
def test_zGetTol(self):
print("\nTEST: zGetTol()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
# Try to set a valid tolerance operand
self.ln.zSetTol(1,1,'TCON') # set tol operand of 1st row
self.ln.zSetTol(1,2,1) # set int1 =1
self.ln.zSetTol(1,5,0.25) # set min = 0.25
self.ln.zSetTol(1,6,0.75) # set max = 0.75
tolData = self.ln.zGetTol(1)
self.assertTupleEqual(tolData,('TCON', 1, 0, 0.25, 0.75, 0))
if TestPyZDDEFunctions.pRetVar:
print('zGetTol test successful')
def test_zGetTrace(self):
print("\nTEST: zGetTrace()")
# Load a lens file into the LDE (Not required to Push lens)
filename = get_test_file()
self.ln.zLoadFile(filename)
# Set up the data
waveNum,mode,surf,hx,hy,px,py = 3,0,5,0.0,1.0,0.0,0.0
rayTraceArg = (waveNum,mode,surf,hx,hy,px,py)
expRayTraceData = (0, 0, 0.0, 2.750250805, 0.04747610066, 0.0,
0.2740755916, 0.9617081522, 0.0, 0.03451463936,
-0.9994041923, 1.0)
# test returned tuple
rayTraceData = self.ln.zGetTrace(*rayTraceArg)
for i,d in enumerate(expRayTraceData):
self.assertAlmostEqual(rayTraceData[i],d,places=4)
(errorCode,vigCode,x,y,z,l,m,n,l2,
m2,n2,intensity) = self.ln.zGetTrace(*rayTraceArg)
traceDataTuple = (errorCode,vigCode,x,y,z,l,m,n,l2,m2,n2,intensity)
for i,d in enumerate(expRayTraceData):
self.assertAlmostEqual(traceDataTuple[i],d,places=4)
# Check for individual values ... (not necessary)
self.assertEqual(rayTraceData[0],errorCode)
self.assertEqual(rayTraceData[1],vigCode)
self.assertEqual(rayTraceData[2],x)
self.assertEqual(rayTraceData[3],y)
self.assertEqual(rayTraceData[4],z)
self.assertEqual(rayTraceData[5],l)
self.assertEqual(rayTraceData[6],m)
self.assertEqual(rayTraceData[7],n)
self.assertEqual(rayTraceData[8],l2)
self.assertEqual(rayTraceData[9],m2)
self.assertEqual(rayTraceData[10],n2)
self.assertEqual(rayTraceData[11],intensity)
if TestPyZDDEFunctions.pRetVar:
print("Ray trace", rayTraceData)
print('zGetTrace test successful')
@unittest.skip("To implement")
def test_zGetTraceDirect(self):
print("\nTEST: zGetTraceDirect()")
#Load a lens file
@unittest.skip("To implement")
def test_zGetUDOSystem(self):
print("\nTEST: zGetUDOSystem()")
#@unittest.skip("To implement")
def test_zGetUpdate(self):
print("\nTEST: zGetUpdate()")
# Load & then push a lens file into the LDE
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
# Push the lens in the Zemax DDE server into the LDE
ret = self.ln.zPushLens(update=1)
# Update the lens to recompute
ret = self.ln.zGetUpdate()
self.assertIn(ret,(-998, -1, 0))
if ret == -1:
print("MSG: ZEMAX couldn't update the lens")
if ret == -998:
print("MSG: zGetUpdate() function timed out")
# Push new lens to the LDE (to keep it clean)
#self.ln.zNewLens()
#self.ln.zGetUpdate()
#self.ln.zPushLens(1)
loadDefaultZMXfile2LDE(self.ln)
if TestPyZDDEFunctions.pRetVar:
print("zGetUpdate return value", ret)
print('zGetUpdate test successful')
def test_zGetVersion(self):
print("\nTEST: zGetVersion()")
ver = self.ln.zGetVersion()
self.assertIsInstance(ver,int)
if TestPyZDDEFunctions.pRetVar:
print("VERSION: ", ver)
print('GetVersion test successful')
def test_zGetWave(self):
print("\nTEST: zGetWave()")
# First set the waveslength data in the ZEMAX DDE server
wavelength1 = 0.48613270
wavelength2 = 0.58756180
# set the number of wavelengths and primary wavelength
waveData0 = self.ln.zSetWave(0,1,2)
# set the wavelength data
waveData1 = self.ln.zSetWave(1,wavelength1,0.5)
waveData2 = self.ln.zSetWave(2,wavelength2,0.5)
# Get the wavelength data using the zGetWave() function
waveData_g0 = self.ln.zGetWave(0)
waveData_g1 = self.ln.zGetWave(1)
waveData_g2 = self.ln.zGetWave(2)
if TestPyZDDEFunctions.pRetVar:
print("Primary wavelength number = {}".format(waveData_g0[0]))
print("Total number of wavelengths set = {}".format(waveData_g0[1]))
print("Wavelength: {}, weight : {}".format(waveData_g1[0],waveData_g1[1]))
print("Wavelength: {}, weight : {}".format(waveData_g2[0],waveData_g2[1]))
# verify
waveData_s_tuple = (waveData0[0],waveData0[1],waveData1[0],waveData1[1],
waveData2[0],waveData2[1],)
waveData_g_tuple = (waveData_g0[0],waveData_g0[1],waveData_g1[0],waveData_g1[1],
waveData_g2[0],waveData_g2[1],)
self.assertEqual(waveData_s_tuple,waveData_g_tuple)
if TestPyZDDEFunctions.pRetVar:
print('zGetWave test successful')
def test_zGetWaveTuple(self):
print("\nTEST: zGetWaveTuple()")
# First, set the wave fields in the ZEMAX DDE server
# Create the wavelength and weight tuples
wavelengths = (0.48613270,0.58756180,0.65627250)
weights = (1.0,1.0,1.0)
iWaveDataTuple = (wavelengths,weights)
self.ln.zSetWaveTuple(iWaveDataTuple)
# Now, call the zGetWaveTuple() to get teh wave data
oWaveDataTuple_g = self.ln.zGetWaveTuple()
if TestPyZDDEFunctions.pRetVar:
print("Output wave data tuple",oWaveDataTuple_g)
#verify that the returned wavelengths are same
oWavelengths = oWaveDataTuple_g[0]
for i,d in enumerate(oWavelengths):
self.assertAlmostEqual(wavelengths[i],d,places=4)
if TestPyZDDEFunctions.pRetVar:
print('zGetWaveTuple test successful')
@unittest.skip("To implement")
def test_zHammer(self):
print("\nTEST: zHammer()")
@unittest.skip("To implement")
def test_zImportExtraData(self):
print("\nTEST: zImportExtraData()")
def test_zInsertConfig(self):
print("\nTEST: zInsertConfig()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
# Get the current number of configurations (columns)
currConfig = self.ln.zGetConfig()
# Insert a config
self.ln.zInsertConfig(currConfig[1]+1)
# Assert if the number of configurations didn't increase, however the
# current configuration shouldn't change, and the number of multiple
# configurations must remain same.
newConfig = self.ln.zGetConfig()
self.assertTupleEqual(newConfig,(currConfig[0],currConfig[1]+1,currConfig[2]))
if TestPyZDDEFunctions.pRetVar:
print('zInsertConfig test successful')
def test_zInsertMCO(self):
print("\nTEST: zInsertMCO()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
# Get the current number of configurations (columns and rows)
currConfig = self.ln.zGetConfig()
self.assertTupleEqual(currConfig,(1,1,1))
# Insert a operand (row)
newOperNumber = self.ln.zInsertMCO(2)
self.assertEqual(newOperNumber,2)
newConfig = self.ln.zGetConfig()
self.assertTupleEqual(newConfig,(currConfig[0],currConfig[1],currConfig[2]+1))
if TestPyZDDEFunctions.pRetVar:
print('zInsertMCO test successful')
@unittest.skip("To implement")
def test_zInsertObject(self):
print("\nTEST: zInsertObject()")
def test_zInsertSurface(self):
print("\nTEST: zInsertSurface()")
# Find the current number of surfaces
systemData = self.ln.zGetSystem()
init_surfaceNum = systemData[0]
# call to insert surface
self.ln.zInsertSurface(1)
# verify that we now have the appropriate number of surfaces
systemData = self.ln.zGetSystem()
curr_surfaceNum = systemData[0]
self.assertEqual(curr_surfaceNum,init_surfaceNum+1)
if TestPyZDDEFunctions.pRetVar:
print('zInsertSurface test successful')
@unittest.skip("To implement")
def test_zLoadDetector(self):
print("\nTEST: zLoadDetector()")
def test_zLoadFile(self):
print("\nTEST: zLoadFile()")
# Try to load a non existant file
filename = "C:\\nonExistantFile.zmx"
ret = self.ln.zLoadFile(filename)
self.assertEqual(ret,-999)
# Now, try to load a real file
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
self.assertEqual(ret,0)
if TestPyZDDEFunctions.pRetVar:
print("zLoadFile return value:", ret)
print('zLoadFile test successful')
@unittest.skip("To implement")
def test_zLoadMerit(self):
print("\nTEST: zLoadMerit()")
@unittest.skip("To implement")
def test_zLoadTolerance(self):
print("\nTEST: zLoadTolerance()")
@unittest.skip("To implement")
def test_zMakeGraphicWindow(self):
print("\nTEST: zMakeGraphicWindow()")
@unittest.skip("To implement")
def test_zMakeTextWindow(self):
print("\nTEST: zMakeTextWindow()")
def test_zModifySettings(self):
print("\nTEST: zModifySettings()")
# Load the ZEMAX DDE server with a lens so that it has something to begin with
filename, sfilename = get_test_file(fileType='seq', settings=True)
ret = self.ln.zLoadFile(filename)
# Pass valid parameters and integer value
ret = self.ln.zModifySettings(sfilename,'LAY_RAYS', 5)
self.assertEqual(ret, 0)
# Send an invalid filename
ret = self.ln.zModifySettings('invalidFileName.CFG','LAY_RAYS', 5)
self.assertEqual(ret, -1)
# Pass valid parameters and string type value
ret = self.ln.zModifySettings(sfilename,'UN1_OPERAND', 'ZERN')
self.assertEqual(ret, 0, 'This test fails in OpticStudio16 and above!')
# this test fails in OpticStidio16 and above, and I don't imagine Zemax
# fixing any bugs in the DDE interface.
if TestPyZDDEFunctions.pRetVar:
print('zModifySettings test successful')
def test_zNewLens(self):
print("\nTEST: zNewLens()")
# Load the ZEMAX DDE server with a lens so that it has something to begin with
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
# Call zNewLens to erase the current lens.
ret = self.ln.zNewLens()
if TestPyZDDEFunctions.pRetVar:
print("zNewLens return val:", ret)
# Call getSystem to see if we really have the "minimum" lens
systemData = self.ln.zGetSystem()
self.assertEqual(systemData[0],2,'numberOfSurfaces')
self.assertEqual(systemData[1],0,'lens unit code')
self.assertEqual(systemData[2],1,'stop surface-number')
self.assertEqual(systemData[3],0,'non axial flag')
self.assertEqual(systemData[4],0,'ray aiming type')
self.assertEqual(systemData[5],0,'adjust index')
self.assertEqual(systemData[6],20.0),'current temperature'
self.assertEqual(systemData[7],1,'pressure')
self.assertEqual(systemData[8],1,'global surface reference')
#self.assertEqual(systemData[9],0,'need_save') #'need_save' deprecated
if TestPyZDDEFunctions.pRetVar:
print('zNewLens test successful')
@unittest.skip("To implement")
def test_zNSCCoherentData(self):
print("\nTEST: zNSCCoherentData()")
@unittest.skip("To implement")
def test_zNSCDetectorData(self):
print("\nTEST: zNSCDetectorData()")
@unittest.skip("To implement")
def test_zNSCLightingTrace(self):
print("\nTEST: zNSCLightingTrace()")
@unittest.skip("To implement")
def test_zNSCTrace(self):
print("\nTEST: zNSCTrace()")
@unittest.skip("To implement")
def test_zOpenWindow(self):
print("\nTEST: zOpenWindow()")
@unittest.skip("To implement")
def test_zOperandValue(self):
print("\nTEST: zOperandValue()")
@unittest.skip("To implement")
def test_zOptimize(self):
print("\nTEST: zOptimize()")
@unittest.skip("To implement")
def test_zOptimize2(self):
print("\nTEST: zOptimize2()")
#@unittest.skip("No push lens permission")
def test_zPushLens(self):
print("\nTEST: zPushLens()")
# push a lens with an invalid flag, should rise ValueError exception
self.assertRaises(ValueError, self.ln.zPushLens, update=10)
# push a lens with valid flag.
ret = self.ln.zPushLens()
self.assertIn(ret,(0, -999, -998))
ret = self.ln.zPushLens(update=1)
self.assertIn(ret,(0,-999,-998))
# Notify depending on return type
# Note that the test as such should not "fail" if ZEMAX server returned
# -999 (lens couldn't be pushed" or the function timed out (-998)
if ret == -999:
print("MSG: Lens could not be pushed into the LDE (check PushLensPermission)")
if ret == -998:
print("MSG: zPushLens() function timed out")
# Push new lens to the LDE to keep it clean
#self.ln.zNewLens()
#self.ln.zPushLens(1)
loadDefaultZMXfile2LDE(self.ln)
if TestPyZDDEFunctions.pRetVar:
print("zPushLens return value:", ret)
print('zPushLens test successful')
def test_zPushLensPermission(self):
print("\nTEST: zPushLensPermission()")
status = self.ln.zPushLensPermission()
self.assertIn(status,(0,1))
if status:
print("MSG: Push lens allowed")
else:
print("MSG: Push lens not allowed")
if TestPyZDDEFunctions.pRetVar:
print("zPushLens return status:", status)
print('zPushLensPermission test successful')
def test_zQuickFocus(self):
print("\nTEST: zQuickFocus()")
# Setup the system, wavelength, field points
# Add some surfaces
self.ln.zInsertSurface(1)
# System
unitCode,stopSurf,rayAimingType = 0,2,0 # mm, 4th,off
useEnvData,temp,pressure,globalRefSurf = 0,20,1,1 # off, 20C,1ATM,ref=1st surf
setSystemArg = (unitCode,stopSurf,rayAimingType,useEnvData,
temp,pressure,globalRefSurf)
expSystemData = (2, 0, 2, 0, 0, 0, 20.0, 1, 1, 0)
recSystemData = self.ln.zSetSystem(*setSystemArg)
# Field
iFieldDataTuple = ((0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0), # field1: xf=0.0,yf=0.0,wgt=1.0,
# vdx=vdy=vcx=vcy=van=0.0
(0.0,5.0,1.0), # field2: xf=0.0,yf=5.0,wgt=1.0
(0.0,10.0)) # field3: xf=0.0,yf=10.0
# Set the field data, such that fieldType is angle with rectangular normalization
oFieldDataTuple = self.ln.zSetFieldTuple(0,1,iFieldDataTuple)
# Wavelength
wavelengths = (0.48613270,0.58756180,0.65627250)
weights = (1.0,1.0,1.0)
iWaveDataTuple = (wavelengths,weights)
oWaveDataTuple = self.ln.zSetWaveTuple(iWaveDataTuple)
# Now, call the zQuickFocus() function
ret = self.ln.zQuickFocus(0,0) # RMS spot size, chief ray as reference
print('Return status from zQuickFocus', ret)
print('zQuickFocus test successful')
# I might need to have some surfaces here.
@unittest.skip("To implement test")
def test_zReleaseWindow(self):
print("\nTEST: zReleaseWindow()")
pass
@unittest.skip("To implement test")
def test_zRemoveVariables(self):
print("\nTEST: zRemoveVariables()")
pass
@unittest.skip("To implement test")
def test_zSaveDetector(self):
print("\nTEST: zSaveDetector()")
pass
@unittest.skip("To implement test")
def test_zSaveFile(self):
print("\nTEST: zSaveFile()")
pass
@unittest.skip("To implement test")
def test_zSaveMerit(self):
print("\nTEST: zSaveMerit()")
pass
@unittest.skip("To implement test")
def test_zSaveTolerance(self):
print("\nTEST: zSaveTolerance()")
pass
@unittest.skip("To implement test")
def test_zSetAperture(self):
print("\nTEST: zSetAperture()")
pass
@unittest.skip("To implement test")
def test_zSetBuffer(self):
print("\nTEST: zSetBuffer()")
pass
def test_zSetConfig(self):
print("\nTEST: zSetConfig()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
currConfig = self.ln.zGetConfig()
# Since no configuration is initally present, it should return (1,1,1)
self.assertTupleEqual(currConfig,(1,1,1))
# Insert a config
self.ln.zInsertConfig(currConfig[1]+1)
# Assert if the number of configurations didn't increase, however the
# current configuration shouldn't change, and the number of multiple
# configurations must remain same.
newConfig = self.ln.zGetConfig()
self.assertTupleEqual(newConfig,(currConfig[0],currConfig[1]+1,currConfig[2]))
# Now, finally, call zSetConfig() to switch configuration
newConfig = self.ln.zSetConfig(2)
self.assertEqual(newConfig[0],2) # current configuration
self.assertEqual(newConfig[1],2) # number of configurations
self.assertEqual(newConfig[2],0) # error
if TestPyZDDEFunctions.pRetVar:
print("CONFIG: ", newConfig)
print('zSetConfig test successful')
#TODO: Check error/test scenario
@unittest.skip("To implement")
def test_zSetExtra(self):
print("\nTEST: zSetExtra()")
def test_zSetField(self):
print("\nTEST: zSetField()")
# Set field with only 3 arguments, n=0
# type = angle; 2 fields; rect normalization (default)
fieldData = self.ln.zSetField(0,0,2)
self.assertTupleEqual((0,2),(fieldData[0],fieldData[1]))
# set field with 4 arguments, n=0
fieldData = self.ln.zSetField(0,0,3,1)
self.assertTupleEqual((0,3),(fieldData[0],fieldData[1]))
#FIXME: zSetField is supposed to return more parameters.
# is it a version issue?
# set field with 3 args, n=1
# 1st field, on-axis x, on-axis y, weight = 1 (default)
fieldData = self.ln.zSetField(1,0,0)
self.assertTupleEqual(fieldData,(0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
# Set field with all input arguments, set first field
fieldData = self.ln.zSetField(2,0,5,2.0,0.5,0.5,0.5,0.5,0.5)
self.assertTupleEqual(fieldData,(0.0, 5.0, 2.0, 0.5, 0.5, 0.5, 0.5, 0.5))
fieldData = self.ln.zSetField(3,0,10,1.0,0.0,0.0,0.0)
self.assertTupleEqual(fieldData,(0.0, 10.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0))
if TestPyZDDEFunctions.pRetVar:
print('zSetField test successful')
def test_zSetFieldTuple(self):
print("\nTEST: zSetFieldTuple()")
iFieldDataTuple = ((0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0), # field1: xf=0.0,yf=0.0,wgt=1.0,
# vdx=vdy=vcx=vcy=van=0.0
(0.0,5.0,1.0), # field2: xf=0.0,yf=5.0,wgt=1.0
(0.0,10.0)) # field3: xf=0.0,yf=10.0
# Set the field data, such that fieldType is angle with rectangular normalization
oFieldDataTuple = self.ln.zSetFieldTuple(0,1,iFieldDataTuple)
if TestPyZDDEFunctions.pRetVar:
for i in range(len(iFieldDataTuple)):
print("oFieldDataTuple, field {} : {}".format(i,oFieldDataTuple[i]))
# Verify
for i in range(len(iFieldDataTuple)):
self.assertEqual(oFieldDataTuple[i][:len(iFieldDataTuple[i])],
iFieldDataTuple[i])
if TestPyZDDEFunctions.pRetVar:
print('zSetFieldTuple test successful')
@unittest.skip("To implement")
def test_zSetFloat(self):
print("\nTEST: zSetFloat()")
pass
@unittest.skip("To implement")
def test_zSetLabel(self):
print("\nTEST: zSetLabel()")
pass
def test_zSetMulticon(self):
print("\nTEST: zSetMulticon()")
# insert an additional configuration (column)
self.ln.zInsertConfig(1)
# insert an additional operand (row)
self.ln.zInsertMCO(2)
# Try to set invalid row operand at surface 2
try:
multiConData = self.ln.zSetMulticon(0,1,'INVALIDOPERAND',2,0,0)
except ValueError:
print("Expected Value Error raised")
# Set the row operands (both to thickness, of surfaces 2, and 4 respectively)
multiConData = self.ln.zSetMulticon(0,1,'THIC',2,0,0)
self.assertTupleEqual(multiConData,('THIC',2,0,0))
multiConData = self.ln.zSetMulticon(0,2,'THIC',4,0,0)
self.assertTupleEqual(multiConData,('THIC',4,0,0))
# Set configuration 1
multiConData = self.ln.zSetMulticon(1,1,6.0076,0,1,1,1.0,0.0)
self.assertTupleEqual(multiConData,(6.0076, 2, 2, 0, 1, 1, 1.0, 0.0))
multiConData = self.ln.zSetMulticon(1,2,4.7504,0,1,1,1.0,0.0)
self.assertTupleEqual(multiConData,(4.7504, 2, 2, 0, 1, 1, 1.0, 0.0))
# Set configuration 2
multiConData = self.ln.zSetMulticon(2,1,7.0000,0,1,1,1.0,0.0)
self.assertTupleEqual(multiConData,(7.0, 2, 2, 0, 1, 1, 1.0, 0.0))
multiConData = self.ln.zSetMulticon(2,2,5.0000,0,1,1,1.0,0.0)
self.assertTupleEqual(multiConData,(5.0, 2, 2, 0, 1, 1, 1.0, 0.0))
if TestPyZDDEFunctions.pRetVar:
print('zSetMulticon test successful')
@unittest.skip("To implement")
def test_zSetNSCObjectData(self):
print("\nTEST: zSetNSCObjectData()")
pass
@unittest.skip("To implement")
def test_zSetNSCObjectFaceData(self):
print("\nTEST: zSetNSCObjectFaceData()")
pass
@unittest.skip("To implement test")
def test_zSetNSCParameter(self):
print("\nTEST: zSetNSCParameter()")
pass
@unittest.skip("To implement test")
def test_zSetNSCPosition(self):
print("\nTEST: zSetNSCPosition()")
pass
@unittest.skip("To implement test")
def test_zSetNSCPositionTuple(self):
print("\nTEST: zSetNSCPositionTuple()")
pass
@unittest.skip("To implement test")
def test_zSetNSCProperty(self):
print("\nTEST: zSetNSCProperty()")
pass
def test_zSetNSCSetting(self):
print("\nTEST: zSetNSCSetting()")
pass
@unittest.skip("To implement test")
def test_zSetNSCSolve(self):
print("\nTEST: zSetNSCSolve()")
pass
@unittest.skip("To implement test")
def test_zInsertNSCSourceEllipse(self):
pass
@unittest.skip("To implement test")
def test_zInsertNSCSourceRectangle(self):
pass
@unittest.skip("To implement test")
def test_zInsertNSCEllipse(self):
pass
@unittest.skip("To implement test")
def test_zInsertNSCRectangle(self):
pass
@unittest.skip("To implement test")
def test_zInsertNSCDetectorRectangle(self):
pass
@unittest.skip("To implement test")
def test_zNSCDetectorClear(self):
pass
def test_zSetPolState(self):
print("\nTEST: zSetPolState()")
# Set polarization of the "new" lens
polStateData = self.ln.zSetPolState(0,0.5,0.5,10.0,10.0)
self.assertTupleEqual(polStateData,(0,0.5,0.5,10.0,10.0))
if TestPyZDDEFunctions.pRetVar:
print('zSetPolState test successful')
def test_zSetPrimaryWave(self):
print("\nTEST: zSetPrimaryWave()")
# first set 3 wavelength fields using zSetWaveTuple()
wavelengths = (0.48613270,0.58756180,0.65627250)
weights = (1.0,1.0,1.0)
iWaveDataTuple = (wavelengths,weights)
WaveDataTuple = self.ln.zSetWaveTuple(iWaveDataTuple)
# right now, the first wavefield is the primary (0.48613270)
# make the second wavelength field as the primary
previousPrimary = self.ln.zGetWave(0)[0]
primaryWaveNumber = 2
oWaveData = self.ln.zSetPrimaryWave(primaryWaveNumber)
if TestPyZDDEFunctions.pRetVar:
print("Previous primary wavelength number =", previousPrimary)
print("Current primary wavelength number =", oWaveData[0])
print("Total number of wavelengths =", oWaveData[1])
# verify
self.assertEqual(primaryWaveNumber,oWaveData[0])
self.assertEqual(len(wavelengths),oWaveData[1])
if TestPyZDDEFunctions.pRetVar:
print('zSetPrimaryWave test successful')
def test_zSetOperand(self):
print("\nTEST: zSetOperand()")
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
# Try to set an invalid operand
operandData = self.ln.zSetOperand(1,1,'INVALID')
self.assertEqual(operandData,-1)
# Now try to set an operand in the first row
operandData = self.ln.zSetOperand(1,1,'EFFL')
self.assertEqual(operandData,'EFFL')
operandData = self.ln.zSetOperand(1,3,1) # Set wave to 1
self.assertEqual(operandData,1)
operandData = self.ln.zSetOperand(1,8,55.5) # Set Target to 55.5 mm
self.assertAlmostEqual(operandData,55.5,places=4)
# Now try to set an operand into a row, that is not yet inserted in the MFE
operandData = self.ln.zSetOperand(2,1,'CVLT')
self.assertEqual(operandData,-1)
# Insert a multi-function operand row
ret = self.ln.zInsertMFO(2)
self.assertEqual(ret,2)
operandData = self.ln.zSetOperand(2,1,'CVLT')
self.assertEqual(operandData,'CVLT')
self.ln.zInsertMFO(3)
operandData = self.ln.zSetOperand(3,1,'CVGT')
self.assertEqual(operandData,'CVGT')
operandData = self.ln.zSetOperand(3,2,1) # surface = 1
self.assertEqual(operandData,1)
operandData = self.ln.zSetOperand(3,9,0.5) # weight = 0.5
self.assertEqual(operandData,0.5)
if TestPyZDDEFunctions.pRetVar:
print('zSetOperand test successful')
@unittest.skip("To implement test")
def test_zSetSettingsData(self):
print("\nTEST: zSetSettingsData()")
pass
def test_zSetSolve(self):
print("\nTEST: zSetSolve()")
filename = get_test_file()
ret = self.ln.zLoadFile(filename)
assert ret == 0
# set a solve on the curvature (0) of surface number 6 such that the
# Marginal Ray angle (2) value is 0.1. The following 4 methods are
# equivalent and should produce the same output
solveData = self.ln.zSetSolve(6, 0, *(2, 0.1))
self.assertTupleEqual(solveData, (2, 0.1, 0.0, 0))
solveData = self.ln.zSetSolve(6, 0, *[2, 0.1])
self.assertTupleEqual(solveData, (2, 0.1, 0.0, 0))
solveData = self.ln.zSetSolve(6, 0, 2, 0.1)
self.assertTupleEqual(solveData, (2, 0.1, 0.0, 0))
solveData = self.ln.zSetSolve(6, self.ln.SOLVE_SPAR_CURV,
self.ln.SOLVE_CURV_MR_ANG, 0.1)
self.assertTupleEqual(solveData, (2, 0.1, 0.0, 0))
# thickness solve on surface 5
solveData = self.ln.zSetSolve(5, self.ln.SOLVE_SPAR_THICK,
self.ln.SOLVE_THICK_PICKUP, 1, -1, 0.3, 0)
self.assertTupleEqual(solveData, (5, 1.0, -1.0, 0.3, 0))
if TestPyZDDEFunctions.pRetVar:
print('zSetSolve test successful')
@unittest.skip("To implement")
def test_zSetSurfaceData(self):
print("\nTEST: zSetSurfaceData()")
# Insert some surfaces
@unittest.skip("To implement")
def test_zSetSurfaceParameter(self):
print("\nTEST: zSetSurfaceParameter()")
## filename = get_test_file()
## ret = self.ln.zLoadFile(filename)
## assert ret == 0
## surfParam1 = self.ln.zGetSurfaceParameter(1,1)
## print "Surface name: ", surfParam1
## surfParam3 = self.ln.zGetSurfaceParameter(1,3)
## print "Radius: ", surfParam3
def test_zSetSystem(self):
print("\nTEST: zSetSystem()")
#Setup the arguments
unitCode,stopSurf,rayAimingType = 0,4,0 # mm, 4th,off
useEnvData,temp,pressure,globalRefSurf = 0,20,1,1 # off, 20C,1ATM,ref=1st surf
setSystemArg = (unitCode,stopSurf,rayAimingType,useEnvData,
temp,pressure,globalRefSurf)
expSystemData = (2, 0, 2, 0, 0, 0, 20.0, 1, 1)
recSystemData = self.ln.zSetSystem(*setSystemArg)
self.assertTupleEqual(expSystemData,recSystemData)
if TestPyZDDEFunctions.pRetVar:
systemDataPar = ('numberOfSurfaces','lens unit code',
'stop surface-number','non axial flag',
'ray aiming type','adjust index','current temperature',
'pressure','global surface reference') #'need_save' deprecated
print("System data:")
for i,elem in enumerate(systemDataPar):
print("{} : {}".format(elem,recSystemData[i]))
if TestPyZDDEFunctions.pRetVar:
print('zSetSystem test successful')
def test_zSetSystemAper(self):
print("\nTEST: zSetSystemAper():")
systemAperData_s = self.ln.zSetSystemAper(0,1,25.5) #sysAper=25.5mm,EPD
self.assertEqual(systemAperData_s[0], 0, 'aperType = EPD')
self.assertEqual(systemAperData_s[1], 1, 'stop surface number')
self.assertEqual(systemAperData_s[2],25.5,'EPD value = 25.5 mm')
if TestPyZDDEFunctions.pRetVar:
print('zSetSystemAper test successful')
def test_zSetSystemProperty(self):
print("\nTEST: zSetSystemProperty():")
# Set Aperture type as EPD
sysPropData = self.ln.zSetSystemProperty(10,0)
self.assertEqual(sysPropData,0)
# Let lens title
sysPropData = self.ln.zSetSystemProperty(16,"My Lens")
self.assertEqual(sysPropData,"My Lens")
# Set glass catalog
sysPropData = self.ln.zSetSystemProperty(23,"SCHOTT HOYA OHARA")
self.assertEqual(sysPropData,"SCHOTT HOYA OHARA")
if TestPyZDDEFunctions.pRetVar:
print('zSetSystemProperty test successful')
def test_zSetTol(self):
print("\nTEST: zSetTol()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
# Try to set a wrong tolerance operand
tolData = self.ln.zSetTol(1,1,'INVALIDOPERAND') # set tol operand of 1st row
self.assertEqual(tolData,-1)
# Try to set a valid tolerance operand
tolData = self.ln.zSetTol(1,1,'TCON') # set tol operand of 1st row
self.assertTupleEqual(tolData,('TCON', 0, 0, 0.0, 0.0, 0))
if TestPyZDDEFunctions.pRetVar:
print('zSetTol test successful')
def test_zSetTolRow(self):
print("\nTEST: zSetTolRow()")
# Load a lens file into the DDE server
filename = get_test_file()
self.ln.zLoadFile(filename)
# Try to set a wrong tolerance operand
tolData = self.ln.zSetTolRow(1,'INVALIDOPERAND',1,0,0,0.25,0.75)
self.assertEqual(tolData,-1)
# Try to set a valid tolerance row data
tolData = self.ln.zSetTolRow(1,'TRAD',1,0,0,0.25,0.75)
self.assertTupleEqual(tolData,('TRAD', 1, 0, 0.25, 0.75, 0))
if TestPyZDDEFunctions.pRetVar:
print('zSetTolRow test successful')
@unittest.skip("To implement")
def test_zSetUDOItem(self):
print("\nTEST: zSetUDOItem()")
def test_zSetWave(self):
print("\nTEST: zSetWave()")
wavelength1 = 0.48613270
wavelength2 = 0.58756180
# Call the zSetWave() function to set the primary wavelength & number
# of wavelengths to set
waveData = self.ln.zSetWave(0,1,2)
if TestPyZDDEFunctions.pRetVar:
print("Primary wavelength number = ", waveData[0])
print("Total number of wavelengths set = ",waveData[1])
# Verify
self.assertEqual(waveData[0],1)
self.assertEqual(waveData[1],2)
# Set the first and second wavelength
waveData1 = self.ln.zSetWave(1,wavelength1,0.5)
waveData2 = self.ln.zSetWave(2,wavelength2,0.5)
if TestPyZDDEFunctions.pRetVar:
print("Wavelength: {}, weight: {}".format(waveData1[0],waveData1[1]))
print("Wavelength: {}, weight: {}".format(waveData2[0],waveData2[1]))
# Verify
self.assertEqual(waveData1[0],wavelength1)
self.assertEqual(waveData1[1],0.5)
self.assertEqual(waveData2[0],wavelength2)
self.assertEqual(waveData2[1],0.5)
if TestPyZDDEFunctions.pRetVar:
print('zSetWave test successful')
def test_zSetVig(self):
print("\nTEST: zSetVig()")
retVal = self.ln.zSetVig()
self.assertEqual(retVal,0)
if TestPyZDDEFunctions.pRetVar:
print('zSetVig test successful')
def test_zSetWaveTuple(self):
print("\nTEST: zSetWaveTuple()")
# Create the wavelength and weight tuples
wavelengths = (0.48613270,0.58756180,0.65627250)
weights = (1.0,1.0,1.0)
iWaveDataTuple = (wavelengths,weights)
oWaveDataTuple = self.ln.zSetWaveTuple(iWaveDataTuple)
if TestPyZDDEFunctions.pRetVar:
print("Output wave data tuple",oWaveDataTuple)
# verify that the returned wavelengths are same
oWavelengths = oWaveDataTuple[0]
for i,d in enumerate(oWavelengths):
self.assertAlmostEqual(wavelengths[i],d,places=4)
if TestPyZDDEFunctions.pRetVar:
print('zSetWaveTuple test successful')
@unittest.skip("Not necessary!")
def test_zWindowMaximize(self):
pass
@unittest.skip("Not necessary!")
def test_zWindowMinimize(self):
pass
@unittest.skip("Not necessary!")
def test_zWindowRestore(self):
pass
@unittest.skip("Function not yet implemented")
def test_zSetTimeout(self):
print("\nTEST: zSetTimeout()")
self.ln.zSetTimeout(3)
def test_zSetSemiDiameter(self):
print("\nTEST: zSetSemiDiameter()")
# Load a lens file into the DDE server
ln = self.ln
filename = get_test_file()
ln.zLoadFile(filename)
ln.zInsertSurface(surfNum=3) # semi-dia will be non-zero for this surface
retVal = ln.zSetSemiDiameter(surfNum=3, value=0)
self.assertEqual(retVal, 0)
#ln.zGetSolve(surfNum=3, code=ln.SOLVE_SPAR_SEMIDIA)
retVal = ln.zGetSurfaceData(surfNum=3, code=ln.SDAT_SEMIDIA)
self.assertEqual(retVal, 0)
retVal = ln.zSetSemiDiameter(surfNum=3, value=10.25)
self.assertEqual(retVal, 10.25)
if TestPyZDDEFunctions.pRetVar:
print('zSetSemiDiameter test successful')
def test_zInsertDummySurface(self):
print("\nTEST: zInsertDummySurface()")
# Load a lens file into the DDE server
ln = self.ln
filename = get_test_file()
ln.zLoadFile(filename)
numSurf = ln.zGetNumSurf()
self.assertEqual(ln.zInsertDummySurface(surfNum=1), numSurf+1)
self.assertEqual(ln.zInsertDummySurface(surfNum=1, thick=10), numSurf+2)
self.assertEqual(ln.zInsertDummySurface(surfNum=1, semidia=5.0), numSurf+3)
self.assertEqual(ln.zGetSurfaceData(surfNum=1, code=ln.SDAT_SEMIDIA), 5.0)
if TestPyZDDEFunctions.pRetVar:
print('zInsertDummySurface test successful')
def test_zInsertCoordinateBreak(self):
print("\nTEST: zInsertCoordinateBreak()")
# Load a lens file into the DDE server
ln = self.ln
filename = get_test_file()
ln.zLoadFile(filename)
xdec, ydec, xtilt, ytilt, ztilt, order, thick = 1.5, 2.5, 5, 10, 15, 1, 10
comment = 'tilt'
params = [xdec, ydec, xtilt, ytilt, ztilt, order]
surf = 1
retVal = ln.zInsertCoordinateBreak(surf, xdec, ydec, xtilt, ytilt, ztilt, order, thick, comment)
self.assertEqual(retVal, 0)
for para, value in enumerate(params, 1):
self.assertAlmostEqual(value, ln.zGetSurfaceParameter(surf, para))
self.assertAlmostEqual(thick, ln.zGetSurfaceData(surf, code=ln.SDAT_THICK))
if TestPyZDDEFunctions.pRetVar:
print('zInsertDummySurface test successful')
def test_zTiltDecenterElements(self):
print("\nTEST: zTiltDecenterElements()")
# Load a lens file into the DDE server
ln = self.ln
filename = get_test_file()
# Test to match Zemax's Tilt/Decenter Tool's default behaviour
ln.zLoadFile(filename)
firstSurf, lastSurf = 3, 4
cb1, cb2, dummy = ln.zTiltDecenterElements(firstSurf, lastSurf)
# check surface numbers
self.assertSequenceEqual(seq1=(cb1, cb2, dummy), seq2=(3, 6, 7))
# check order parameters
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb1, param=6), 0)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb2, param=6), 1)
# check Thickness solves
posSlvOnThick, pickSlvThick = 7, 5
slvType, fromSurf, length, _, _ = ln.zGetSolve(surfNum=cb2-1, code=ln.SOLVE_SPAR_THICK)
self.assertSequenceEqual(seq1=(slvType, fromSurf, length),
seq2=(posSlvOnThick, cb1, 0) )
slvType, param1, param2, param3, param4 = ln.zGetSolve(surfNum=cb2, code=ln.SOLVE_SPAR_THICK)
scale, offset, currCol = -1, 0, 0
self.assertSequenceEqual(seq1=(slvType, param1, param2, param3, param4),
seq2=(pickSlvThick, cb2-1, scale, offset, currCol))
# Test alternate order parameters setting
ln.zLoadFile(filename)
firstSurf, lastSurf = 3, 4
cb1, cb2, dummy = ln.zTiltDecenterElements(firstSurf, lastSurf, order=1)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb1, param=6), 1)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb2, param=6), 0)
# Test dummy surface semi-diameter setting
ln.zLoadFile(filename)
firstSurf, lastSurf = 3, 4
cb1, cb2, dummy = ln.zTiltDecenterElements(firstSurf, lastSurf, dummySemiDiaToZero=True)
self.assertEqual(ln.zGetSurfaceData(surfNum=dummy, code=ln.SDAT_SEMIDIA), 0)
# Test the accuracy of values of tilts and restorations
# load file again
ln.zLoadFile(filename)
numSurfBefore = ln.zGetNumSurf()
firstSurf, lastSurf = 5, 6
xdec, ydec, xtilt, ytilt, ztilt = 0.25, 0.5, 5.0, 10.0, -15.0
# get the Thickness and solve on the thickness (if any) on the
# last surface to include in tilt-decenter group
thick = ln.zGetSurfaceData(surfNum=lastSurf, code=ln.SDAT_THICK)
solve = ln.zGetSolve(surfNum=lastSurf, code=ln.SOLVE_SPAR_THICK)
ret = ln.zTiltDecenterElements(firstSurf, lastSurf, xdec, ydec, xtilt, ytilt, ztilt)
cb1, cb2, dummy = ret
# Test number of surfaces
numSurfAfter = ln.zGetNumSurf()
self.assertEqual(numSurfAfter, numSurfBefore + 3)
# Test the restoration of axis in the second CB
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb2, param=1), -xdec)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb2, param=2), -ydec)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb2, param=3), -xtilt)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb2, param=4), -ytilt)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb2, param=5), -ztilt)
# Test the value of the thickness and solve on the dummy surface
thickDummy = ln.zGetSurfaceData(surfNum=dummy, code=ln.SDAT_THICK)
solveDummy = ln.zGetSolve(surfNum=dummy, code=ln.SOLVE_SPAR_THICK)
self.assertEqual(thickDummy, thick)
self.assertEqual(solveDummy, solve)
# Test the order flags which should be 0 and 1 (default case)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb1, param=6), 0)
self.assertEqual(ln.zGetSurfaceParameter(surfNum=cb2, param=6), 1)
if TestPyZDDEFunctions.pRetVar:
print('zTiltDecenterElements test successful')
@unittest.skip("To implement test")
def test_readZRD(self):
print("\nTEST: readZRD()")
try:
zfile.readZRD('..\ZMXFILES\TESTRAYS.ZRD','uncompressed')
print('readZRD test successful')
except:
print('readZRD test failed')
@unittest.skip("To implement test")
def test_writeZRD(self):
print("\nTEST: writeZRD()")
a = zfile.zemax_ray()
a.filetype = 'uncompressed'
a.status = [0, 1]
a.level = [0, 1]
a.hit_object = [1, 0]
a.hit_face = [0, 0]
a.unused = [0, 0]
a.in_object = [0, 0]
a.parent = [0, 0]
a.storage = [1, 0]
a.xybin = [0, 0]
a.lmbin = [0, 0]
a.index = [1.0, 1.0]
a.starting_phase = [0.0, 0.0]
a.x = [0.0, -1.2185866220459416]
a.y = [0.0, 1.492338206172348e-16]
a.z = [0.0, 4.849231551964771]
a.l = [-0.24371732440918834, -0.24371732440918834]
a.m = [2.984676412344696e-17, 2.984676412344696e-17]
a.n = [0.9698463103929542, 0.9698463103929542]
a.nx = [0.55, 0.0]
a.ny = [0.0, 0.0]
a.nz = [0.0, 0.0]
a.path_to = [0.0, 0.0]
a.intensity = [0.0001, 0.0001]
a.phase_of = [0.0, 0.0]
a.phase_at = [0.0, 0.0]
a.exr = [0.0, 0.0]
a.exi = [0.0, 0.0]
a.eyr = [0.0, 0.0]
a.eyi = [0.0, 0.0]
a.ezr = [0.0, 0.0]
a.ezi = [0.0, 0.0]
try:
zfile.writeZRD(a, 'TESTRAYS_uncompressed.ZRD','uncompressed')
print('\nWrite to uncompressed file successful')
except:
print('\nWrite to uncompressed file failed')
try:
zfile.writeZRD(a, 'TESTRAYS_compressed.ZRD','compressed')
print('\nWrite to compressed zrd file successful')
except:
print('\nWrite to compressed zrd file failed')
#%% Helper functions
def get_test_file(fileType='seq', settings=False, **kwargs):
"""helper function to get test lens file(s) for each unit test function
Parameters
----------
fileType : string, optional
3-character code for loading different (pre-specified) lens files:
"seq" = file for sequential ray tracing function tests;
"pop" = file for physical optics propagation tests;
settings : bool, optional
if ``True``, a tuple is returned with the second element being the name
of the settings file associated with the lens file.
kwargs : keyword arguments
sfile : string (for POP settings)
"default" = use default settings file associated with the lens file;
"nofibint" = settings with fiber integral calculation disabled;
"nzstbirr" = non-zero surface to beam setting, irradiance data;
"nzstbpha" = non-zero surface to beam setting, phase data;
loadfile : string (for loading a particular lens file)
"LENS.ZMX" = the default lens, LENS.ZMX is loaded in to the LDE
This is really a hack. Use the exact name inlucing the exact upper/
lower case letters in the name, else it will not be found.
Returns
-------
file : string/ tuple
filenames are complete complete paths
"""
zmxfp = os.path.join(pyzddedirectory, 'ZMXFILES')
lensFile = ["Cooke_40_degree_field.zmx",
"Double_Gauss_5_degree_field.ZMX",
"LENS.ZMX",]
settingsFile = ["Cooke_40_degree_field_unittest.CFG", ]
popFiles = ["Fiber_Coupling.ZMX", ]
popSettingsFile = ["Fiber_Coupling_POPunittest.CFG",
"Fiber_Coupling_POPunittest_Irradiance.CFG",
"Fiber_Coupling_POPunittest_Phase.CFG",
"Fiber_Coupling_POPunittest_NoFiberCompute.CFG", ]
lenFileIndex = 0
setFileIndex = 0
if len(kwargs):
if 'loadfile' in kwargs:
try:
lenFileIndex = lensFile.index(kwargs['loadfile'])
except ValueError:
print("Couldn't find the specified lens file. Loading default file")
else:
pass # for extending later
files = []
if fileType == 'seq':
files.append(lensFile[lenFileIndex])
if settings:
files.append(settingsFile[setFileIndex])
elif fileType == 'pop':
if settings:
if len(kwargs):
if kwargs['sfile'] == 'nofibint':
lenFileIndex, setFileIndex = 0, 3
elif kwargs['sfile'] == 'nzstbirr':
lenFileIndex, setFileIndex = 0, 1
elif kwargs['sfile'] == 'nzstbpha':
lenFileIndex, setFileIndex = 0, 2
else:
lenFileIndex, setFileIndex = 0, 0
else: # if settings == True, but there is no kwargs
lenFileIndex, setFileIndex = 0, 0
# add the appropriate files
files.append(popFiles[lenFileIndex])
files.append(popSettingsFile[setFileIndex])
else: # if settings == False
files.append(popFiles[lenFileIndex])
files = [os.path.join(zmxfp, f) for f in files]
if len(files) > 1:
return tuple(files)
else:
return files[0]
def deleteFile(fileName):
"""delete a file using zdde's internal delete function"""
return pyzdde._deleteFile(fileName, 5)
def checkFileExist(fileName):
"""check if a file exist, using zdde's internal function"""
return pyzdde._checkFileExist(fileName)
def loadDefaultZMXfile2LDE(ln):
"""loads the default lens file LENS.ZMX into the LDE
"""
lensfile = os.path.join(ln.zGetPath()[1], 'LENS.ZMX')
if not os.path.exists(lensfile):
lensfile = get_test_file(loadfile='LENS.ZMX')
ln.zLoadFile(lensfile)
ln.zPushLens(1)
if __name__ == '__main__':
unittest.main()
|
indranilsinharoy/PyZDDE
|
Test/pyZDDEunittest.py
|
Python
|
mit
| 86,617
|
[
"Gaussian"
] |
a675cb3c1d1696f21d96daf99bb3fdf6906589185f0c01277105e20e698e5c7f
|
# This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2010 Karen Rustad
# Copyright (C) 2011 Jack Grigg
# Copyright (C) 2009, 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import importlib
from django.db import models
from django.core.files.base import ContentFile
from django.core.files.images import get_image_dimensions
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.core.cache import cache
from django.conf import settings
import datetime
import StringIO
import uuid
import urllib
from urlparse import urljoin
import random
from django.db.models import Q
import mysite.customs
import mysite.base.unicode_sanity
from django.core.urlresolvers import reverse
import voting
import hashlib
import celery.decorators
import mysite.customs.ohloh
try:
import Image
except:
from PIL import Image
class OpenHatchModel(models.Model):
created_date = models.DateTimeField(null=True, auto_now_add=True)
modified_date = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
def get_image_data_scaled(image_data, width):
# scale it
image_fd = StringIO.StringIO(image_data)
im = Image.open(image_fd)
image_fd.seek(0)
w, h = get_image_dimensions(image_fd)
new_w = width
new_h = (h * 1.0 / w) * width
smaller = im.resize((new_w, new_h),
Image.ANTIALIAS)
# "Save" it to memory
new_image_fd = StringIO.StringIO()
smaller.save(new_image_fd, format='PNG')
new_image_fd.seek(0)
# pull data out
image_data = new_image_fd.getvalue()
return image_data
class Project(OpenHatchModel):
def save(self, *args, **kwargs):
if not self.display_name:
self.display_name = self.name
super(Project, self).save(*args, **kwargs)
def get_corresponding_bug_trackers(self):
'''This method returns all the bug trackers that should appear in the
project's +projedit page.
This is probably pretty inefficient, but it's not called very often.'''
# Grab all the bug trackers that bugs refer to
all_corresponding_bug_trackers = set([b.tracker for b in self.bug_set.all()
if b.tracker])
# Grab all the bug trackers that refer to the project
for tracker in mysite.customs.models.TrackerModel.objects.filter(created_for_project=self).select_subclasses():
all_corresponding_bug_trackers.add(tracker)
return all_corresponding_bug_trackers
@staticmethod
def generate_random_icon_path(instance, filename):
# MEDIA_ROOT is prefixed automatically.
return 'images/icons/projects/%s.png' % uuid.uuid4().hex
def name_with_quotes_if_necessary(self):
if '"' in self.name:
# GIVE UP NOW, it will not tokenize properly
return self.name
elif ' ' in self.name:
return '"%s"' % self.name
return self.name
@mysite.base.decorators.cached_property
def potential_mentors(self):
"""Return the union of the people who can mentor in this project,
or who can mentor in the project's language."""
import mysite.profile.controllers
mentor_set = set(mysite.profile.controllers.people_matching(
'can_mentor', self.name))
mentor_set.update(mysite.profile.controllers.people_matching(
'can_mentor', self.language))
return mentor_set
@staticmethod
def create_dummy(**kwargs):
now = datetime.datetime.utcnow()
data = dict(name=uuid.uuid4().hex,
icon_raw='/static/no-project-icon.png',
language='C')
data.update(kwargs)
ret = Project(**data)
ret.save()
return ret
@staticmethod
def create_dummy_no_icon(**kwargs):
now = datetime.datetime.utcnow()
data = dict(name=uuid.uuid4().hex,
icon_raw='',
language='C')
data.update(kwargs)
ret = Project(**data)
ret.save()
return ret
name = models.CharField(max_length=200, unique=True,
help_text='<span class="example">This is the name that will uniquely identify this project (e.g. in URLs), and this box is fixing capitalization mistakes. To change the name of this project, email <a style="color: #666;" href="mailto:%s">%s</a>.</span>' % (('hello@openhatch.org',)*2))
display_name = models.CharField(max_length=200, default='',
help_text='<span class="example">This is the name that will be displayed for this project, and is freely editable.</span>')
display_name = models.CharField(max_length=200, default='')
homepage = models.URLField(max_length=200, blank=True, default='',
verbose_name='Project homepage URL')
language = models.CharField(max_length=200, blank=True, default='',
verbose_name='Primary programming language')
def invalidate_all_icons(self):
self.icon_raw = None
self.icon_url = u''
self.icon_for_profile = None
self.icon_smaller_for_badge = None
self.icon_for_search_result = None
pass
def get_random_pfentry_that_has_a_project_description(self):
import random
pfentries = self.portfolioentry_set.exclude(project_description='')
only_good_pfentries = lambda pfe: pfe.project_description.strip()
pfentries = filter(only_good_pfentries, pfentries)
if pfentries:
return random.choice(pfentries)
else:
return None
# FIXME: Remove this field and update fixtures.
icon_url = models.URLField(max_length=200)
icon_raw = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a, b),
null=True,
default=None,
blank=True,
verbose_name='Icon',
)
date_icon_was_fetched_from_ohloh = models.DateTimeField(null=True, default=None)
icon_for_profile = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a,b),
null=True,
default=None)
icon_smaller_for_badge = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a,b),
null=True,
default=None)
icon_for_search_result = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a,b),
null=True,
default=None)
logo_contains_name = models.BooleanField(default=False)
people_who_wanna_help = models.ManyToManyField('profile.Person',
related_name='projects_i_wanna_help')
# Cache the number of OpenHatch members who have contributed to this project.
cached_contributor_count = models.IntegerField(default=0, null=True)
def populate_icon_from_ohloh(self):
oh = mysite.customs.ohloh.get_ohloh()
try:
icon_data = oh.get_icon_for_project(self.name)
self.date_icon_was_fetched_from_ohloh = datetime.datetime.utcnow()
except ValueError:
self.date_icon_was_fetched_from_ohloh = datetime.datetime.utcnow()
return False
# if you want to scale, use get_image_data_scaled(icon_data)
self.icon_raw.save('', ContentFile(icon_data))
# Since we are saving an icon, also update our scaled-down version of
# that icon for the badge.
self.update_scaled_icons_from_self_icon()
return True
def get_url_of_icon_or_generic(self):
# Recycle icon_smaller_for_badge since it's the same size as
# the icon for most other uses (profiles, etc.).
if self.icon_for_profile:
return self.icon_for_profile.url
else:
return settings.MEDIA_URL + 'no-project-icon.png'
def get_url_of_badge_size_icon_or_generic(self):
if self.icon_smaller_for_badge:
return self.icon_smaller_for_badge.url
else:
return settings.MEDIA_URL + 'no-project-icon-w=40.png'
def get_url_of_search_result_icon_or_generic(self):
if self.icon_for_search_result:
return self.icon_for_search_result.url
else:
return settings.MEDIA_URL + 'no-project-icon-w=20.png'
def update_scaled_icons_from_self_icon(self):
'''This method should be called when you update the Project.icon_raw attribute.
Side-effect: Saves a scaled-down version of that icon in the
Project.icon_smaller_for_badge field.'''
# First of all, do nothing if self.icon_raw is a false value.
if not self.icon_raw:
return
# Okay, now we must have some normal-sized icon.
raw_icon_data = self.icon_raw.file.read()
# Scale raw icon to a size for the profile.
profile_icon_data = get_image_data_scaled(raw_icon_data, 64)
self.icon_for_profile.save('', ContentFile(profile_icon_data))
# Scale it down to badge size, which
# happens to be width=40
badge_icon_data = get_image_data_scaled(raw_icon_data, 40)
self.icon_smaller_for_badge.save('', ContentFile(badge_icon_data))
# Scale normal-sized icon down to a size that fits in the search results--20px by 20px
search_result_icon_data = get_image_data_scaled(raw_icon_data, 20)
self.icon_for_search_result.save('', ContentFile(search_result_icon_data))
def get_contributors(self):
"""Return a list of Person objects who are contributors to
this Project."""
from mysite.profile.models import Person
return Person.objects.filter(
portfolioentry__project=self,
portfolioentry__is_deleted=False,
portfolioentry__is_published=True
).distinct()
def update_cached_contributor_count_and_save(self):
contributors = self.get_contributors()
self.cached_contributor_count = len(contributors)
self.save()
def get_n_other_contributors_than(self, n, person):
import random
# FIXME: Use the method above.
from mysite.profile.models import PortfolioEntry
pf_entries = list(PortfolioEntry.published_ones.filter(
project=self).exclude(person=person))
random.shuffle(pf_entries)
other_contributors = [p.person for p in pf_entries]
photod_people = [person for person in other_contributors if person.photo]
unphotod_people = [person for person in other_contributors if not person.photo]
ret = []
ret.extend(photod_people)
ret.extend(unphotod_people)
return ret[:n]
def __unicode__(self):
return "name='%s' display_name='%s' language='%s'" % (self.name, self.display_name, self.language)
def get_url(self):
import mysite.project.views
return reverse(mysite.project.views.project,
kwargs={'project__name': mysite.base.unicode_sanity.quote(self.name)})
def get_edit_page_url(self):
import mysite.project.views
return reverse(mysite.project.views.edit_project,
kwargs={'project__name': mysite.base.unicode_sanity.quote(self.name)})
@mysite.base.decorators.cached_property
def get_mentors_search_url(self):
import mysite.profile.controllers
mentor_count = len(set(mysite.profile.controllers.people_matching(
'can_mentor', self.name)))
if mentor_count > 0 or self.language:
query_var = self.name
if mentor_count == 0:
query_var = self.language
query_string = mysite.base.unicode_sanity.urlencode({u'q': u'can_mentor:"%s"' %
query_var})
return reverse(mysite.profile.views.people) + '?' + query_string
else:
return ""
def get_open_bugs(self):
return Bug.open_ones.filter(project=self)
def get_open_bugs_randomly_ordered(self):
return self.get_open_bugs().order_by('?')
def get_pfentries_with_descriptions(self, listen_to_the_community=False, **kwargs):
pfentries = self.portfolioentry_set.exclude(project_description='').filter(**kwargs)
if listen_to_the_community:
# Exclude pfentries that have been unchecked on the project edit page's
# descriptions section.
pfentries = pfentries.filter(use_my_description=True, )
has_a_description = lambda pfe: pfe.project_description.strip()
return filter(has_a_description, pfentries)
def get_pfentries_with_usable_descriptions(self):
return self.get_pfentries_with_descriptions(listen_to_the_community=True)
def get_random_description(self):
pfentries = self.get_pfentries_with_usable_descriptions()
if pfentries:
return random.choice(pfentries)
else:
return None
def populate_icon_on_project_creation(instance, raw, created, *args, **kwargs):
if raw:
return
import mysite.search.tasks
if created and not instance.icon_raw:
task = mysite.search.tasks.PopulateProjectIconFromOhloh()
task.delay(project_id=instance.id)
def grab_project_language_from_ohloh(instance, raw, created, *args,
**kwargs):
if raw:
return
import mysite.search.tasks
if created and not instance.language:
task = mysite.search.tasks.PopulateProjectLanguageFromOhloh()
task.delay(project_id=instance.id)
models.signals.post_save.connect(populate_icon_on_project_creation, Project)
models.signals.post_save.connect(grab_project_language_from_ohloh, Project)
class WrongIcon(OpenHatchModel):
@staticmethod
def spawn_from_project(project):
kwargs = {
'project': project,
'icon_url': project.icon_url,
'icon_raw': project.icon_raw,
'date_icon_was_fetched_from_ohloh': project.date_icon_was_fetched_from_ohloh,
'icon_for_profile': project.icon_for_profile,
'icon_smaller_for_badge': project.icon_smaller_for_badge,
'icon_for_search_result': project.icon_for_search_result,
'logo_contains_name': project.logo_contains_name,
}
wrong_icon_obj = WrongIcon(**kwargs)
wrong_icon_obj.save()
return wrong_icon_obj
project = models.ForeignKey(Project)
icon_url = models.URLField(max_length=200)
icon_raw = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a, b),
null=True,
default=None)
date_icon_was_fetched_from_ohloh = models.DateTimeField(null=True, default=None)
icon_for_profile = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a,b),
null=True,
default=None)
icon_smaller_for_badge = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a,b),
null=True,
default=None)
icon_for_search_result = models.ImageField(
upload_to=lambda a,b: Project.generate_random_icon_path(a,b),
null=True,
default=None)
logo_contains_name = models.BooleanField(default=False)
class Buildhelper(OpenHatchModel):
'''Model where all the steps in the buildhelper live'''
project = models.ForeignKey(Project)
default_frustration_handler = models.URLField(max_length=200, default='')
def addStep(self, name, time, is_prerequisite = False, description='', command='', hint='', frustration_handler = None):
'''creates and saves a BuildhelperStep object'''
if frustration_handler is None:
import pdb; pdb.set_trace()
frustration_handler = self.default_frustration_handler
s = BuildhelperStep(buildhelper = self,is_prerequisite = is_prerequisite, name = name, description = description, command = command, time = time, hint= hint, frustration_handler = frustration_handler)
s.save()
def __unicode__(self):
return self.project.display_name +"'s Buildhelper"
class BuildhelperStep(OpenHatchModel):
'''A single step in the buildhelper'''
buildhelper = models.ForeignKey(Buildhelper)
is_prerequisite = models.BooleanField(default=False)
is_checked = models.BooleanField(default=False)
name = models.CharField(max_length=255)
description = models.TextField(default='',blank=True)
command = models.TextField(default='',blank=True)
time = models.IntegerField(default=0)
hint = models.URLField(max_length=200, default='http://cuteoverload.com',blank=True)
frustration_handler = models.URLField(max_length=200, blank=True)
def __unicode__(self):
return "Buildhelper step for project " + self.buildhelper.project.display_name + ": " + self.name
class ProjectInvolvementQuestion(OpenHatchModel):
key_string = models.CharField(max_length=255)
text = models.TextField()
is_bug_style = models.BooleanField(default=False)
def get_answers_for_project(self, a_project):
def get_score(obj):
return (-1)* voting.models.Vote.objects.get_score(obj)['score']
the_answers = list(self.answers.filter(project=a_project))
# TODO: sort them
the_answers.sort(key=get_score)
return the_answers
@staticmethod
def create_dummy(**kwargs):
data = {'text': 'how are you doing?'}
data.update(kwargs)
ret = ProjectInvolvementQuestion(**data)
ret.save()
return ret
class OwnedAnswersManager(models.Manager):
def get_query_set(self):
return super(OwnedAnswersManager, self).get_query_set().filter(
author__isnull=False)
class Answer(OpenHatchModel):
title = models.CharField(null=True, max_length=255)
text = models.TextField(blank=False)
author = models.ForeignKey(User, null=True)
question = models.ForeignKey(ProjectInvolvementQuestion, related_name='answers')
project = models.ForeignKey(Project)
objects = OwnedAnswersManager()
all_even_unowned = models.Manager()
def get_question_text(self, mention_project_name=True):
if self.question.key_string == 'where_to_start':
retval = "I'd like to participate%s. How do I begin?" % (
" in %s" % self.project.display_name if mention_project_name else "")
elif self.question.key_string == 'stress':
retval = "What is a bug or issue%s that you've been putting off, neglecting or just plain avoiding?" % (
" with %s" % self.project.display_name if mention_project_name else "")
elif self.question.key_string == 'newcomers':
retval = "What's a good bug%s for a newcomer to tackle?" % (
" in %s" % self.project.display_name if mention_project_name else "")
elif self.question.key_string == 'non_code_participation':
retval = "Other than writing code, how can I contribute%s?" % (
" to %s" % self.project.display_name if mention_project_name else "")
else: # Shouldn't get here.
retval = ""
return retval
@property
def template_for_feed(self):
return 'base/answer-in-feed.html'
def get_title_for_atom(self):
return "%s added an answer for %s" % (
self.author.get_profile().get_full_name_and_username(),
self.project.display_name)
def get_description_for_atom(self):
return "%s added an answer to the question \"%s\"" % (
self.author.get_profile().get_full_name_and_username(),
self.get_question_text())
def get_absolute_url(self):
return urljoin(reverse('mysite.project.views.project', args=[self.project.name]), "#answer_whose_pk_is_%d" % self.pk)
@staticmethod
def create_dummy(**kwargs):
data = {
'text': 'i am doing well',
'author': User.objects.get_or_create(username='yooz0r')[0],
'question': ProjectInvolvementQuestion.objects.get_or_create(
key_string='where_to_start', is_bug_style=False)[0],
'project': Project.create_dummy()
}
data.update(kwargs)
ret = Answer(**data)
ret.save()
return ret
class BugTracker(OpenHatchModel):
# The purpose of this BugTracker model is to permit Bug objects to
# find and instantiate their bug tracker, whatever type of bug tracker
# it is.
#
# For now, it can only represent static classes, like the various Bugzilla
# bug trackers.
#
# It is a Model so that Bug objects can simply use a ForeignKey to point at a
# BugTracker object.
#
# I do realize there is some duplication between this and the various bug tracker
# classes in customs. This model should probably be renamed.
#
# We could maybe use the Content Types framework in Django instead of writing
# our own wrapper: http://docs.djangoproject.com/en/dev/ref/contrib/contenttypes/
# See the (very exciting) "Generic relations" section of that page.
#
# However, right now, some of our bug tracker classes are not models at all, so
# we can't access them through the Django content types framework. Once we convert
# those classes to be model instances, then we can ditch this BugTracker class.
#
# EDIT: The Content Types framework is used for all trackers (including hard-coded
# special cases) handled by the asynchronous bug importer. Once all existing bug
# importers have been migrated over, this BugTracker class can be removed.
# If it is a hard-coded class, use these
module_name = models.CharField(max_length=500, blank=True)
class_name = models.CharField(max_length=500, blank=True)
# If it is a bug tracker that we create from the database, use these
bug_tracker_model_module = models.CharField(max_length=500, blank=True)
bug_tracker_model_class_name = models.CharField(max_length=500, blank=True)
bug_tracker_model_pk = models.IntegerField(default=0)
@staticmethod
def _instance2module_and_class(instance):
module_name = instance.__module__
try:
class_name = instance.__name__
except AttributeError:
class_name = instance.__class__.__name__
return module_name, class_name
@staticmethod
def get_or_create_from_bug_tracker_instance(bug_tracker_instance):
# First, make sure that the bug tracker is actually capable of refreshing this bug,
# if we asked it.
assert hasattr(bug_tracker_instance.__class__, 'refresh_one_bug')
# Okay, so it provides the necessary method to be something worth serializing.
# Next question: Is it autogenerated from the database, or is it a hard-coded class
# that lives in the database?
if hasattr(bug_tracker_instance, 'provide_hints_for_how_to_recreate_self'):
# Okay, so it's autogenerated.
hints = bug_tracker_instance.provide_hints_for_how_to_recreate_self()
(bug_tracker_model_module,
bug_tracker_model_class_name) = BugTracker._instance2module_and_class(hints['bug_tracker_class'])
bug_tracker_model_pk = hints['corresponding_pk']
obj, was_created = BugTracker.objects.get_or_create(
bug_tracker_model_module=bug_tracker_model_module,
bug_tracker_model_class_name=bug_tracker_model_class_name,
bug_tracker_model_pk=bug_tracker_model_pk)
return obj
# If we get this far, the instance does not provide a provide_hints_for_how_to_recreate_self
# method, so we assume that it's an importable class that lives in the database.
module_name, class_name = BugTracker._instance2module_and_class(bug_tracker_instance)
obj, was_created = BugTracker.objects.get_or_create(module_name=module_name,
class_name=class_name)
return obj
def make_instance(self):
if self.bug_tracker_model_class_name:
# Then try to reconstitute the instance through the class
module = importlib.import_module(self.bug_tracker_model_module)
cls = getattr(module, self.bug_tracker_model_class_name)
return cls.all_trackers.get(pk=self.bug_tracker_model_pk).create_class_that_can_actually_crawl_bugs()
# Otherwise, it's a raw, hard-coded class that we can import.
module = importlib.import_module(self.module_name)
cls = getattr(module, self.class_name)
return cls()
class OpenBugsManager(models.Manager):
def get_query_set(self):
return super(OpenBugsManager, self).get_query_set().filter(
looks_closed=False)
class Bug(OpenHatchModel):
project = models.ForeignKey(Project)
title = models.CharField(max_length=500)
description = models.TextField()
status = models.CharField(max_length=200)
importance = models.CharField(max_length=200)
people_involved = models.IntegerField(null=True)
date_reported = models.DateTimeField()
last_touched = models.DateTimeField()
last_polled = models.DateTimeField(default=datetime.datetime(1970, 1, 1))
submitter_username = models.CharField(max_length=200)
submitter_realname = models.CharField(max_length=200, null=True)
canonical_bug_link = models.URLField(max_length=200, unique=True,
blank=False, null=False)
good_for_newcomers = models.BooleanField(default=False)
looks_closed = models.BooleanField(default=False)
bize_size_tag_name = models.CharField(max_length=50)
concerns_just_documentation = models.BooleanField(default=False)
as_appears_in_distribution = models.CharField(max_length=200, default='')
tracker_type = models.ForeignKey(ContentType, null=True)
tracker_id = models.PositiveIntegerField(null=True)
tracker = generic.GenericForeignKey('tracker_type', 'tracker_id')
bug_tracker = models.ForeignKey(BugTracker, null=True)
all_bugs = models.Manager()
open_ones = OpenBugsManager()
def data_is_more_fresh_than_one_day(self):
age = datetime.datetime.now() - self.last_polled
seems_really_fresh = age < datetime.timedelta(hours=20)
return seems_really_fresh
def __unicode__(self):
return "title='%s' project='%s' project__language='%s' description='%s...'" % (self.title, self.project.name, self.project.language, self.description[:50])
def set_bug_tracker_class_from_instance(self, instance):
self.bug_tracker = BugTracker.get_or_create_from_bug_tracker_instance(instance)
@staticmethod
def create_dummy(**kwargs):
now = datetime.datetime.utcnow()
n = str(Bug.all_bugs.count())
# FIXME (?) Project.objects.all()[0] call below makes an out-of-bounds error in testing...
data = dict(title=n, project=Project.objects.all()[0],
date_reported=now,
last_touched=now,
last_polled=now,
canonical_bug_link="http://asdf.com/" + uuid.uuid4().hex,
submitter_username='dude',
description='')
data.update(kwargs)
ret = Bug(**data)
ret.save()
return ret
@staticmethod
def create_dummy_with_project(**kwargs):
kwargs['project'] = Project.create_dummy()
return Bug.create_dummy(**kwargs)
class BugAlert(OpenHatchModel):
user = models.ForeignKey(User, null=True)
query_string = models.CharField(max_length=255)
how_many_bugs_at_time_of_request = models.IntegerField()
email = models.EmailField(max_length=255)
class WannaHelperNote(OpenHatchModel):
class Meta:
unique_together = [('project', 'person')]
person = models.ForeignKey('profile.Person')
project = models.ForeignKey(Project)
@staticmethod
def add_person_project(person, project):
note, _ = WannaHelperNote.objects.get_or_create(
person=person, project=project)
return note
@staticmethod
def remove_person_project(person, project):
try:
note = WannaHelperNote.objects.get(person=person, project=project)
note.delete()
except WannaHelperNote.DoesNotExist:
pass
@property
def template_for_feed(self):
return 'base/wannahelp-in-feed.html'
def get_title_for_atom(self):
return "%s is willing to help %s" % (
self.person.get_full_name_and_username(), self.project.display_name)
def get_description_for_atom(self):
return self.get_title_for_atom()
def get_absolute_url(self):
return urljoin(reverse('mysite.project.views.project', args=[self.project.name]), "#person_summary_%d" % self.person.pk)
def post_bug_save_delete_increment_hit_count_cache_timestamp(sender, instance, **kwargs):
# always bump it
import mysite.base.models
mysite.base.models.Timestamp.update_timestamp_for_string('hit_count_cache_timestamp'),
def post_bug_save_increment_bug_model_timestamp(sender, instance, **kwargs):
if instance.looks_closed:
# bump it
import mysite.base.models
mysite.base.models.Timestamp.update_timestamp_for_string(str(sender))
# and clear the search cache
import mysite.search.tasks
mysite.search.tasks.clear_search_cache.delay()
def post_bug_delete_increment_bug_model_timestamp(sender, instance, **kwargs):
# always bump it
import mysite.base.models
mysite.base.models.Timestamp.update_timestamp_for_string(str(sender))
# Clear the hit count cache whenever Bugs are added or removed. This is
# simply done by bumping the Timestamp used to generate the cache keys.
# The hit count cache is used in get_or_create_cached_hit_count() in
# mysite.search.controllers.Query.
# Clear all people's recommended bug cache when a bug is deleted
# (or when it has been modified to say it looks_closed)
models.signals.post_save.connect(
post_bug_save_delete_increment_hit_count_cache_timestamp,
Bug)
models.signals.post_delete.connect(
post_bug_save_delete_increment_hit_count_cache_timestamp
,Bug)
# Re-index the person when he says he likes a new project
def update_the_person_index_from_project(sender, instance, **kwargs):
import mysite.profile.tasks
for person in instance.people_who_wanna_help.all():
task = mysite.profile.tasks.ReindexPerson()
task.delay(person.id)
models.signals.post_save.connect(update_the_person_index_from_project, sender=Project)
# vim: set ai ts=4 nu:
|
jledbetter/openhatch
|
mysite/search/models.py
|
Python
|
agpl-3.0
| 31,607
|
[
"exciting"
] |
dfa917e480db7be793c99a4abd009e02cc1ffd6d45985a5230a07b46666126e2
|
"""
Test basic molecular features.
"""
import numpy as np
import unittest
from rdkit import Chem
from deepchem.feat.basic import MolecularWeight, RDKitDescriptors
class TestMolecularWeight(unittest.TestCase):
"""
Test MolecularWeight.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
self.engine = MolecularWeight()
def testMW(self):
"""
Test MW.
"""
assert np.allclose(self.engine([self.mol]), 180, atol=0.1)
class TestRDKitDescriptors(unittest.TestCase):
"""
Test RDKitDescriptors.
"""
def setUp(self):
"""
Set up tests.
"""
smiles = 'CC(=O)OC1=CC=CC=C1C(=O)O'
self.mol = Chem.MolFromSmiles(smiles)
self.engine = RDKitDescriptors()
def testRDKitDescriptors(self):
"""
Test simple descriptors.
"""
descriptors = self.engine([self.mol])
assert np.allclose(
descriptors[0, self.engine.descriptors.index('ExactMolWt')], 180,
atol=0.1)
|
joegomes/deepchem
|
deepchem/feat/tests/test_basic.py
|
Python
|
mit
| 1,025
|
[
"RDKit"
] |
cef7b9c9e0ad487e99db265fab901c6f85b0d0c3aeab9497a99378c3d91510ba
|
#!/usr/bin/env python2.7
"""
Simple sequencing file statistics.
Gather the following numbers:
* Percentages of bases with quality at least Q40, Q30, and Q20 from FASTQ
files.
* Percentages of reads whose average quality is at least Q40, Q30, and Q20.
Requirements:
* Python == 2.7.x
* Biopython >= 1.60
Copyright (c) 2013 Wibowo Arindrarto <w.arindrarto@lumc.nl>
Copyright (c) 2013 LUMC Sequencing Analysis Support Core <sasc@lumc.nl>
MIT License <http://opensource.org/licenses/MIT>
"""
RELEASE = False
__version_info__ = ('0', '1',)
__version__ = '.'.join(__version_info__)
__version__ += '-dev' if not RELEASE else ''
import argparse
import json
import os
import sys
from Bio import SeqIO
# quality points we want to measure
QVALS = range(0, 60, 10)
def dict2json(d_input, f_out):
"""Dump the given dictionary as a JSON file."""
if isinstance(f_out, str):
target = open(f_out, 'w')
else:
target = f_out
json.dump(d_input, target, sort_keys=True, indent=4,
separators=(',', ': '))
target.close()
def gather_stat(in_fastq, out_json, fmt):
total_bases, total_reads = 0, 0
bcntd = dict.fromkeys(QVALS, 0)
rcntd = dict.fromkeys(QVALS, 0)
for rec in SeqIO.parse(in_fastq, fmt):
read_quals = rec.letter_annotations['phred_quality']
read_len = len(read_quals)
avg_qual = sum(read_quals) / len(read_quals)
for qval in QVALS:
bcntd[qval] += len([q for q in read_quals if q >= qval])
if avg_qual >= qval:
rcntd[qval] += 1
total_bases += read_len
total_reads += 1
pctd = {
'filename': os.path.abspath(in_fastq),
'stats': {
'bases': {},
'reads': {},
},
}
for qval in QVALS:
key = 'Q' + str(qval)
pctd['stats']['bases'][key] = 100.0 * bcntd[qval] / total_bases
pctd['stats']['reads'][key] = 100.0 * rcntd[qval] / total_reads
# dict2json(pctd, out_json)
target = open(out_json, 'w')
target.writelines("Q20(bases):" + str(round(pctd['stats']['bases']['Q20'],2))+'\n')
target.writelines("Q30(bases):" + str(round(pctd['stats']['bases']['Q30'],2))+'\n')
target.writelines("Q20(reads):" + str(round(pctd['stats']['reads']['Q20'],2))+'\n')
target.writelines("Q30(reads):" + str(round(pctd['stats']['reads']['Q30'],2))+'\n')
if __name__ == '__main__':
usage = __doc__.split('\n\n\n')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=usage[0], epilog=usage[1])
parser.add_argument('--input', type=str, help='Path to input FASTQ file')
parser.add_argument('-o', '--output', type=str, default=sys.stdout,
help='Path to output JSON file')
parser.add_argument('--fmt', type=str, choices=['sanger', 'illumina',
'solexa'], default='illumina', help='FASTQ quality encoding')
parser.add_argument('--version', action='version', version='%(prog)s ' +
__version__)
args = parser.parse_args()
# adjust format name to Biopython-compatible name
fmt = 'fastq-' + args.fmt
gather_stat(args.input, args.output, fmt)
|
ablifedev/ABLIRC
|
ABLIRC/bin/public/seq_quality_stat.py
|
Python
|
mit
| 3,339
|
[
"Biopython"
] |
e47222303e661fd7891a29492ce0fc8b403e44a0b54dba91131d8aff41005ccc
|
# Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SearchIO parser for HMMER domain table output format."""
from itertools import chain
from Bio.Alphabet import generic_protein
from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment
from .hmmer3_tab import Hmmer3TabParser, Hmmer3TabIndexer
class Hmmer3DomtabParser(Hmmer3TabParser):
"""Base hmmer3-domtab iterator."""
def _parse_row(self):
"""Returns a dictionary of parsed row values."""
assert self.line
cols = [x for x in self.line.strip().split(' ') if x]
# if len(cols) > 23, we have extra description columns
# combine them all into one string in the 19th column
if len(cols) > 23:
cols[22] = ' '.join(cols[22:])
elif len(cols) < 23:
cols.append('')
assert len(cols) == 23
# assign parsed column data into qresult, hit, and hsp dicts
qresult = {}
qresult['id'] = cols[3] # query name
qresult['accession'] = cols[4] # query accession
qresult['seq_len'] = int(cols[5]) # qlen
hit = {}
hit['id'] = cols[0] # target name
hit['accession'] = cols[1] # target accession
hit['seq_len'] = int(cols[2]) # tlen
hit['evalue'] = float(cols[6]) # evalue
hit['bitscore'] = float(cols[7]) # score
hit['bias'] = float(cols[8]) # bias
hit['description'] = cols[22] # description of target
hsp = {}
hsp['domain_index'] = int(cols[9]) # # (domain number)
# not parsing cols[10] since it's basically len(hit)
hsp['evalue_cond'] = float(cols[11]) # c-evalue
hsp['evalue'] = float(cols[12]) # i-evalue
hsp['bitscore'] = float(cols[13]) # score
hsp['bias'] = float(cols[14]) # bias
hsp['env_start'] = int(cols[19]) - 1 # env from
hsp['env_end'] = int(cols[20]) # env to
hsp['acc_avg'] = float(cols[21]) # acc
frag = {}
# strand is always 0, since HMMER now only handles protein
frag['hit_strand'] = frag['query_strand'] = 0
frag['hit_start'] = int(cols[15]) - 1 # hmm from
frag['hit_end'] = int(cols[16]) # hmm to
frag['query_start'] = int(cols[17]) - 1 # ali from
frag['query_end'] = int(cols[18]) # ali to
# HMMER alphabets are always protein
frag['alphabet'] = generic_protein
# switch hmm<-->ali coordinates if hmm is not hit
if not self.hmm_as_hit:
frag['hit_end'], frag['query_end'] = \
frag['query_end'], frag['hit_end']
frag['hit_start'], frag['query_start'] = \
frag['query_start'], frag['hit_start']
return {'qresult': qresult, 'hit': hit, 'hsp': hsp, 'frag': frag}
def _parse_qresult(self):
"""Generator function that returns QueryResult objects."""
# state values, determines what to do for each line
state_EOF = 0
state_QRES_NEW = 1
state_QRES_SAME = 3
state_HIT_NEW = 2
state_HIT_SAME = 4
# dummies for initial states
qres_state = None
hit_state = None
file_state = None
# dummies for initial id caches
prev_qid = None
prev_hid = None
# dummies for initial parsed value containers
cur, prev = None, None
hit_list, hsp_list = [], []
while True:
# store previous line's parsed values, for every line after the 1st
if cur is not None:
prev = cur
prev_qid = cur_qid
prev_hid = cur_hid
# only parse the line if it's not EOF
if self.line and not self.line.startswith('#'):
cur = self._parse_row()
cur_qid = cur['qresult']['id']
cur_hid = cur['hit']['id']
else:
file_state = state_EOF
# mock ID values since the line is empty
cur_qid, cur_hid = None, None
# get the state of hit and qresult
if prev_qid != cur_qid:
qres_state = state_QRES_NEW
else:
qres_state = state_QRES_SAME
# new hits are hits with different ids or hits in a new qresult
if prev_hid != cur_hid or qres_state == state_QRES_NEW:
hit_state = state_HIT_NEW
else:
hit_state = state_HIT_SAME
# start creating objects after the first line (i.e. prev is filled)
if prev is not None:
# each line is basically an HSP with one HSPFragment
frag = HSPFragment(prev_hid, prev_qid)
for attr, value in prev['frag'].items():
setattr(frag, attr, value)
hsp = HSP([frag])
for attr, value in prev['hsp'].items():
setattr(hsp, attr, value)
hsp_list.append(hsp)
# create hit object when we've finished parsing all its hsps
# i.e. when hit state is state_HIT_NEW
if hit_state == state_HIT_NEW:
hit = Hit(hsp_list)
for attr, value in prev['hit'].items():
setattr(hit, attr, value)
hit_list.append(hit)
hsp_list = []
# create qresult and yield if we're at a new qresult or EOF
if qres_state == state_QRES_NEW or file_state == state_EOF:
qresult = QueryResult(hit_list, prev_qid)
for attr, value in prev['qresult'].items():
setattr(qresult, attr, value)
yield qresult
# if current line is EOF, break
if file_state == state_EOF:
break
hit_list = []
self.line = self.handle.readline()
class Hmmer3DomtabHmmhitParser(Hmmer3DomtabParser):
"""Parser for the HMMER domain table format that assumes HMM profile
coordinates are hit coordinates."""
hmm_as_hit = True
class Hmmer3DomtabHmmqueryParser(Hmmer3DomtabParser):
"""Parser for the HMMER domain table format that assumes HMM profile
coordinates are query coordinates."""
hmm_as_hit = False
class Hmmer3DomtabHmmhitIndexer(Hmmer3TabIndexer):
"""Indexer class for HMMER domain table output that assumes HMM profile
coordinates are hit coordinates."""
_parser = Hmmer3DomtabHmmhitParser
_query_id_idx = 3
class Hmmer3DomtabHmmqueryIndexer(Hmmer3TabIndexer):
"""Indexer class for HMMER domain table output that assumes HMM profile
coordinates are query coordinates."""
_parser = Hmmer3DomtabHmmqueryParser
_query_id_idx = 3
class Hmmer3DomtabHmmhitWriter(object):
"""Writer for hmmer3-domtab output format which writes hit coordinates
as HMM profile coordinates."""
hmm_as_hit = True
def __init__(self, handle):
self.handle = handle
def write_file(self, qresults):
"""Writes to the handle.
Returns a tuple of how many QueryResult, Hit, and HSP objects were written.
"""
handle = self.handle
qresult_counter, hit_counter, hsp_counter, frag_counter = 0, 0, 0, 0
try:
first_qresult = next(qresults)
except StopIteration:
handle.write(self._build_header())
else:
# write header
handle.write(self._build_header(first_qresult))
# and then the qresults
for qresult in chain([first_qresult], qresults):
if qresult:
handle.write(self._build_row(qresult))
qresult_counter += 1
hit_counter += len(qresult)
hsp_counter += sum(len(hit) for hit in qresult)
frag_counter += sum(len(hit.fragments) for hit in qresult)
return qresult_counter, hit_counter, hsp_counter, frag_counter
def _build_header(self, first_qresult=None):
"""Returns the header string of a domain HMMER table output."""
# calculate whitespace required
# adapted from HMMER's source: src/p7_tophits.c#L1157
if first_qresult:
# qnamew = max(20, len(first_qresult.id))
qnamew = 20
tnamew = max(20, len(first_qresult[0].id))
try:
qaccw = max(10, len(first_qresult.acc))
taccw = max(10, len(first_qresult[0].acc))
except AttributeError:
qaccw, taccw = 10, 10
else:
qnamew, tnamew, qaccw, taccw = 20, 20, 10, 10
header = "#%*s %22s %40s %11s %11s %11s\n" % \
(tnamew + qnamew - 1 + 15 + taccw + qaccw, "", "--- full sequence ---",
"-------------- this domain -------------", "hmm coord",
"ali coord", "env coord")
header += "#%-*s %-*s %5s %-*s %-*s %5s %9s %6s %5s %3s %3s %9s " \
"%9s %6s %5s %5s %5s %5s %5s %5s %5s %4s %s\n" % (tnamew - 1,
" target name", taccw, "accession", "tlen", qnamew,
"query name", qaccw, "accession", "qlen", "E-value", "score",
"bias", "#", "of", "c-Evalue", "i-Evalue", "score", "bias",
"from", "to", "from", "to", "from", "to", "acc",
"description of target")
header += "#%*s %*s %5s %*s %*s %5s %9s %6s %5s %3s %3s %9s %9s " \
"%6s %5s %5s %5s %5s %5s %5s %5s %4s %s\n" % (tnamew - 1,
"-------------------", taccw, "----------", "-----",
qnamew, "--------------------", qaccw, "----------",
"-----", "---------", "------", "-----", "---", "---",
"---------", "---------", "------", "-----", "-----", "-----",
"-----", "-----", "-----", "-----", "----",
"---------------------")
return header
def _build_row(self, qresult):
"""Returns a string or one row or more of the QueryResult object."""
rows = ''
# calculate whitespace required
# adapted from HMMER's source: src/p7_tophits.c#L1083
qnamew = max(20, len(qresult.id))
tnamew = max(20, len(qresult[0].id))
try:
qaccw = max(10, len(qresult.accession))
taccw = max(10, len(qresult[0].accession))
qresult_acc = qresult.accession
except AttributeError:
qaccw, taccw = 10, 10
qresult_acc = '-'
for hit in qresult:
# try to get hit accession
try:
hit_acc = hit.accession
except AttributeError:
hit_acc = '-'
for hsp in hit.hsps:
if self.hmm_as_hit:
hmm_to = hsp.hit_end
hmm_from = hsp.hit_start + 1
ali_to = hsp.query_end
ali_from = hsp.query_start + 1
else:
hmm_to = hsp.query_end
hmm_from = hsp.query_start + 1
ali_to = hsp.hit_end
ali_from = hsp.hit_start + 1
rows += "%-*s %-*s %5d %-*s %-*s %5d %9.2g %6.1f %5.1f %3d %3d" \
" %9.2g %9.2g %6.1f %5.1f %5d %5d %5ld %5ld %5d %5d %4.2f %s\n" % \
(tnamew, hit.id, taccw, hit_acc, hit.seq_len, qnamew, qresult.id,
qaccw, qresult_acc, qresult.seq_len, hit.evalue, hit.bitscore,
hit.bias, hsp.domain_index, len(hit.hsps), hsp.evalue_cond, hsp.evalue,
hsp.bitscore, hsp.bias, hmm_from, hmm_to, ali_from, ali_to,
hsp.env_start + 1, hsp.env_end, hsp.acc_avg, hit.description)
return rows
class Hmmer3DomtabHmmqueryWriter(Hmmer3DomtabHmmhitWriter):
"""Writer for hmmer3-domtab output format which writes query coordinates
as HMM profile coordinates."""
hmm_as_hit = False
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
zjuchenyuan/BioWeb
|
Lib/Bio/SearchIO/HmmerIO/hmmer3_domtab.py
|
Python
|
mit
| 12,530
|
[
"Biopython"
] |
bfe81c72a6aabd0f49f98b85e4165a71881824505249216353d66dff8e6ea0cc
|
"""
Module defining halo model components for halo exclusion.
"""
import numpy as np
from hmf import Component
from cached_property import cached_property
from scipy import integrate as intg
import warnings
from hmf._internals import pluggable
try:
from numba import jit
USE_NUMBA = True
except ImportError: # pragma: no cover
USE_NUMBA = False
warnings.warn(
"Warning: Some Halo-Exclusion models have significant speedup when using Numba"
)
# ===============================================================================
# UTILITIES
# ===============================================================================
def outer(a, b):
r"""
Calculate the outer product of two vectors.
"""
return np.outer(a, b).reshape(a.shape + b.shape)
def dbltrapz(X, dx, dy=None):
"""
Double-integral over the last two dimensions of X using trapezoidal rule.
"""
dy = dy or dx
out = X.copy()
out[..., 1:-1, :] *= 2
out[..., :, 1:-1] *= 2
return dx * dy * np.sum(out, axis=(-2, -1)) / 4.0
def makeW(nx, ny):
r"""
Return a window matrix for double-intergral.
"""
W = np.ones((nx, ny))
W[1 : nx - 1 : 2, :] *= 4
W[:, 1 : ny - 1 : 2] *= 4
W[2 : nx - 1 : 2, :] *= 2
W[:, 2 : ny - 1 : 2] *= 2
return W
if USE_NUMBA:
@jit(nopython=True)
def dblsimps_(X, dx, dy): # pragma: no cover
"""
Double-integral of X **FOR SYMMETRIC FUNCTIONS**.
"""
nx = X.shape[-2]
ny = X.shape[-1]
W = makeW_(nx, ny) # only upper
tot = np.zeros_like(X[..., 0, 0])
for ix in range(nx):
tot += W[ix, ix] * X[..., ix, ix]
for iy in range(ix + 1, ny):
tot += 2 * W[ix, iy] * X[..., ix, iy]
return dx * dy * tot / 9.0
@jit(nopython=True)
def makeW_(nx, ny): # pragma: no cover
r"""
Return a window matrix for symmetric double-intergral.
"""
W = np.ones((nx, ny))
if nx % 2 == 0:
for ix in range(1, nx - 2, 2):
W[ix, -1] *= 4
W[-1, ix] *= 4
for iy in range(ny - 1):
W[ix, iy] *= 4
W[iy, ix] *= 4
for ix in range(2, nx - 2, 2):
W[ix, -1] *= 2
W[-1, ix] *= 2
for iy in range(ny - 1):
W[ix, iy] *= 2
W[iy, ix] *= 2
for ix in range(nx):
W[ix, -2] *= 2.5
W[ix, -1] *= 1.5
W[-2, ix] *= 2.5
W[-1, ix] *= 1.5
else:
for ix in range(1, nx - 1, 2):
for iy in range(ny):
W[ix, iy] *= 4
W[iy, ix] *= 4
for ix in range(2, nx - 1, 2):
for iy in range(ny):
W[ix, iy] *= 2
W[iy, ix] *= 2
return W
@jit(nopython=True)
def makeH_(nx, ny): # pragma: no cover
"""Return the window matrix for trapezoidal intergral."""
H = np.ones((nx, ny))
for ix in range(1, nx - 1):
for iy in range(ny):
H[ix, iy] *= 2
H[iy, ix] *= 2
return H
@jit(nopython=True)
def dbltrapz_(X, dx, dy): # pragma: no cover
"""Double-integral of X for the trapezoidal method."""
nx = X.shape[-2]
ny = X.shape[-1]
H = makeH_(nx, ny)
tot = np.zeros_like(X[..., 0, 0])
for ix in range(nx):
tot += H[ix, ix] * X[ix, ix]
for iy in range(ix + 1, ny):
tot += 2 * H[ix, iy] * X[ix, iy]
return dx * dy * tot / 4.0
# ===============================================================================
# Halo-Exclusion Models
# ===============================================================================
@pluggable
class Exclusion(Component):
"""
Base class for exclusion models.
All models will need to perform single or double integrals over
arrays that may have an extra two dimensions. The maximum possible
size is k*r*m*m, which for normal values of the vectors equates to
~ 1000*50*500*500 = 12,500,000,000 values, which in 64-bit reals is
1e11 bytes = 100GB. We thus limit this to a maximum of either k*r*m
or r*m*m, both of which should be less than a GB of memory.
It is possibly better to limit it to k*r or m*m, which should be quite
memory efficient, but then without accelerators (ie. Numba), these
will be very slow.
"""
def __init__(self, m, density, Ifunc, bias, r, delta_halo, mean_density):
self.density = density # 1d, (m)
self.m = m # 1d, (m)
self.Ifunc = Ifunc # 2d, (k,m)
self.bias = bias # 1d (m) or 2d (r,m)
self.r = r # 1d (r)
self.mean_density = mean_density
self.delta_halo = delta_halo
self.dlnx = np.log(m[1] / m[0])
def raw_integrand(self) -> np.ndarray:
"""
Return either a 2d (k,m) or 3d (r,k,m) array with the general integrand.
"""
if self.bias.ndim == 1:
return self.Ifunc * self.bias * self.m # *m since integrating in logspace
else:
return np.einsum("ij,kj->kij", self.Ifunc * self.m, self.bias)
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
This should pass back whatever is multiplied by P_m(k) to get the two-halo
term. Often this will be a square of an integral, sometimes a Double-integral.
"""
pass
class NoExclusion(Exclusion):
r"""A model where there's no halo exclusion."""
def integrate(self):
"""Integrate the :meth:`raw_integrand` over mass."""
return intg.simps(self.raw_integrand(), dx=self.dlnx) ** 2
class Sphere(Exclusion):
r"""Spherical halo exclusion model.
Only halo pairs where the virial radius of
either halo is smaller than half of the seperation, i.e.:
.. math:: R_{\rm vir} \le r/2
will be accounted for.
"""
def raw_integrand(self):
"""
Return either a 2d (k,m) or 3d (r,k,m) array with the general integrand.
"""
if self.bias.ndim == 1:
# *m since integrating in logspace
return outer(np.ones_like(self.r), self.Ifunc * self.bias * self.m)
else:
return np.einsum("ij,kj->kij", self.Ifunc * self.m, self.bias)
@cached_property
def density_mod(self):
"""The modified density, under new limits."""
density = np.outer(np.ones_like(self.r), self.density * self.m)
density[self.mask] = 0
return intg.simps(density, dx=self.dlnx, even="first")
@cached_property
def mask(self):
"""Elements that should be set to zero."""
return (np.outer(self.m, np.ones_like(self.r)) > self.mlim).T
@property
def mlim(self):
"""The mass threshold for the mask."""
return 4 * np.pi * (self.r / 2) ** 3 * self.mean_density * self.delta_halo / 3
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
"""
integ = self.raw_integrand() # r,k,m
integ.transpose((1, 0, 2))[:, self.mask] = 0
return intg.simps(integ, dx=self.dlnx, even="first") ** 2
class DblSphere(Sphere):
r"""Double Sphere model of halo exclusion.
Only halo pairs for which the sum of virial radii
is smaller than the separation, i.e.:
.. math:: R_{\rm vir,1}+R_{\rm vir,2} \le r
will be accounted for.
"""
@property
def r_halo(self):
"""The virial radius of the halo"""
return (3 * self.m / (4 * np.pi * self.delta_halo * self.mean_density)) ** (
1.0 / 3.0
)
@cached_property
def mask(self):
"""Elements that should be set to zero (r,m,m)."""
rvir = self.r_halo
return (outer(np.add.outer(rvir, rvir), np.ones_like(self.r)) > self.r).T
@cached_property
def density_mod(self):
"""The modified density, under new limits."""
out = np.zeros_like(self.r)
for i, r in enumerate(self.r):
integrand = np.outer(self.density * self.m, np.ones_like(self.density))
integrand[self.mask[i]] = 0
out[i] = intg.simps(
intg.simps(integrand, dx=self.dlnx, even="first"),
dx=self.dlnx,
even="first",
)
return np.sqrt(out)
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
"""
integ = self.raw_integrand() # (r,k,m)
return integrate_dblsphere(integ, self.mask, self.dlnx)
def integrate_dblsphere(integ, mask, dx):
"""
Integration function for double sphere model.
"""
out = np.zeros_like(integ[:, :, 0])
integrand = np.zeros_like(mask, dtype=float)
for ik in range(integ.shape[1]):
for ir in range(mask.shape[0]):
integrand[ir] = np.outer(integ[ir, ik, :], integ[ir, ik, :])
integrand[mask] = 0
out[:, ik] = intg.simps(
intg.simps(integrand, dx=dx, even="first"), dx=dx, even="first"
)
return out
if USE_NUMBA:
@jit(nopython=True)
def integrate_dblsphere_(integ, mask, dx): # pragma: no cover
r"""
The same as :func:`integrate_dblsphere`, but uses NUMBA to speed up.
"""
nr = integ.shape[0]
nk = integ.shape[1]
nm = mask.shape[1]
out = np.zeros((nr, nk))
integrand = np.zeros((nm, nm))
for ir in range(nr):
for ik in range(nk):
for im in range(nm):
for jm in range(im, nm):
if mask[ir, im, jm]:
integrand[im, jm] = 0
else:
integrand[im, jm] = integ[ir, ik, im] * integ[ir, ik, jm]
out[ir, ik] = dblsimps_(integrand, dx, dx)
return out
class DblSphere_(DblSphere): # pragma: no cover
r"""
The same as :class:`DblSphere`. But uses NUMBA to speed up the integration.
"""
def integrate(self):
"""Integrate the :meth:`raw_integrand` over mass."""
integ = self.raw_integrand() # (r,k,m)
return integrate_dblsphere_(integ, self.mask, self.dlnx)
class DblEllipsoid(DblSphere):
r"""
Double Ellipsoid model of halo exclusion.
Assuming a lognormal distribution
of ellipticities for halos, the probability of halo pairs **not** excluded
is:
.. math:: P(y) = 3 y^2 - 2 y^3 ,\; y = (x-0.8)/0.29,\; x = r/(R_{\rm vir,1}+R_{\rm vir,2})
taken from [1]_.
References
----------
.. [1] Tinker, J. et al., " On the Mass-to-Light Ratio of Large-Scale Structure",
https://ui.adsabs.harvard.edu/abs/2005ApJ...631...41T.
"""
@cached_property
def mask(self):
"Unecessary for this approach."
return None
@cached_property
def prob(self):
"""
The probablity distribution used in calculating double integral.
"""
rvir = self.r_halo
x = outer(self.r, 1 / np.add.outer(rvir, rvir))
x = (x - 0.8) / 0.29 # this is y but we re-use the memory
np.clip(x, 0, 1, x)
return 3 * x ** 2 - 2 * x ** 3
@cached_property
def density_mod(self):
"""The modified density, under new limits."""
integrand = self.prob * outer(
np.ones_like(self.r), np.outer(self.density * self.m, self.density * self.m)
)
return np.sqrt(dbltrapz(integrand, self.dlnx))
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
"""
integ = self.raw_integrand() # (r,k,m)
out = np.zeros_like(integ[:, :, 0])
integrand = np.zeros_like(self.prob)
for ik in range(integ.shape[1]):
for ir in range(len(self.r)):
integrand[ir] = self.prob[ir] * np.outer(
integ[ir, ik, :], integ[ir, ik, :]
)
out[:, ik] = intg.simps(intg.simps(integrand, dx=self.dlnx), dx=self.dlnx)
return out
if USE_NUMBA:
class DblEllipsoid_(DblEllipsoid): # pragma: no cover
r"""
The same as :class:`DblEllipsoid`. But uses NUMBA to speed up the integration.
"""
@cached_property
def density_mod(self): # pragma: no cover
"""The modified density, under new limits."""
return density_mod_(
self.r,
self.r_halo,
np.outer(self.density * self.m, self.density * self.m),
self.dlnx,
)
@cached_property
def prob(self): # pragma: no cover
"""
The probablity distribution used in calculating double integral
"""
return prob_inner_(self.r, self.r_halo)
def integrate(self): # pragma: no cover
"""
Integrate the :meth:`raw_integrand` over mass.
"""
return integrate_dblell(self.raw_integrand(), self.prob, self.dlnx)
@jit(nopython=True)
def integrate_dblell(integ, prob, dx): # pragma: no cover
r"""Double Integration via the trapezoidal method if using NUMBA"""
nr = integ.shape[0]
nk = integ.shape[1]
nm = prob.shape[1]
out = np.zeros((nr, nk))
integrand = np.zeros((nm, nm))
for ir in range(nr):
for ik in range(nk):
for im in range(nm):
for jm in range(im, nm):
integrand[im, jm] = (
integ[ir, ik, im] * integ[ir, ik, jm] * prob[ir, im, jm]
)
out[ir, ik] = dbltrapz_(integrand, dx, dx)
return out
@jit(nopython=True)
def density_mod_(r, rvir, densitymat, dx): # pragma: no cover
"""The modified density, under new limits."""
d = np.zeros(len(r))
for ir, rr in enumerate(r):
integrand = prob_inner_r_(rr, rvir) * densitymat
d[ir] = dbltrapz_(integrand, dx, dx)
return np.sqrt(d)
@jit(nopython=True)
def prob_inner_(r, rvir): # pragma: no cover
"""
Jit-compiled version of calculating prob, taking advantage of symmetry.
"""
nrv = len(rvir)
out = np.empty((len(r), nrv, nrv))
for ir, rr in enumerate(r):
for irv, rv1 in enumerate(rvir):
for jrv in range(irv, nrv):
rv2 = rvir[jrv]
x = (rr / (rv1 + rv2) - 0.8) / 0.29
if x <= 0:
out[ir, irv, jrv] = 0
elif x >= 1:
out[ir, irv, jrv] = 1
else:
out[ir, irv, jrv] = 3 * x ** 2 - 2 * x ** 3
return out
@jit(nopython=True)
def prob_inner_r_(r, rvir): # pragma: no cover
"""
Jit-compiled version of calculating prob along one r,
taking advantage of symmetry.
"""
nrv = len(rvir)
out = np.empty((nrv, nrv))
for irv, rv1 in enumerate(rvir):
for jrv in range(irv, nrv):
rv2 = rvir[jrv]
x = (r / (rv1 + rv2) - 0.8) / 0.29
if x <= 0:
out[irv, jrv] = 0
elif x >= 1:
out[irv, jrv] = 1
else:
out[irv, jrv] = 3 * x ** 2 - 2 * x ** 3
return out
class NgMatched(DblEllipsoid):
r"""
A model for double ellipsoid halo exclusion, where a mask is
defined so that the number density of galaxies is matched.
"""
@cached_property
def mask(self):
"""Mask Function for matching density"""
integrand = self.density * self.m
# cumint = cumsimps(integrand,dx = self.dlnx)
cumint = intg.cumtrapz(integrand, dx=self.dlnx, initial=0) # len m
cumint = np.outer(np.ones_like(self.r), cumint) # r,m
return np.where(
cumint > 1.0001 * np.outer(self.density_mod, np.ones_like(self.m)),
np.ones_like(cumint, dtype=bool),
np.zeros_like(cumint, dtype=bool),
)
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
"""
integ = self.raw_integrand() # r,k,m
integ.transpose((1, 0, 2))[:, self.mask] = 0
return intg.simps(integ, dx=self.dlnx) ** 2
if USE_NUMBA:
class NgMatched_(DblEllipsoid_): # pragma: no cover
r"""
The same as :class:`NgMatched`. But uses NUMBA to speed up the integration.
"""
@cached_property
def mask(self):
"""Mask Function for matching density"""
integrand = self.density * self.m
# cumint = cumsimps(integrand,dx = self.dlnx)
cumint = intg.cumtrapz(integrand, dx=self.dlnx, initial=0) # len m
cumint = np.outer(np.ones_like(self.r), cumint) # r,m
return np.where(
cumint > 1.0001 * np.outer(self.density_mod, np.ones_like(self.m)),
np.ones_like(cumint, dtype=bool),
np.zeros_like(cumint, dtype=bool),
)
def integrate(self):
"""
Integrate the :meth:`raw_integrand` over mass.
"""
integ = self.raw_integrand() # r,k,m
integ.transpose((1, 0, 2))[:, self.mask] = 0
return intg.simps(integ, dx=self.dlnx) ** 2
def cumsimps(func, dx):
"""
A very simplistic cumulative simpsons rule integrator. func is an array,
h is the equal spacing. It is somewhat inaccurate in the first few bins, since
we just truncate the integral, regardless of whether it is odd or even numbered.
Examples
--------
>>> x = np.linspace(0,1,1001)
>>> y = np.sin(x)
>>> print cumsimps(y,0.001)/(1-np.cos(x))
"""
f1 = func.copy()
f1[1:-1] *= 2
f1[1:-1:2] *= 2
rm = func.copy()
rm[1:-1:2] *= 3
cs = np.cumsum(f1)
cs -= rm
return cs * dx / 3
|
steven-murray/halomod
|
src/halomod/halo_exclusion.py
|
Python
|
mit
| 18,292
|
[
"TINKER"
] |
2bddf09dd9a63d90c3b0f56989a198d492d47394e808bbdb3c7928560e513116
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Nodes for PPAPI IDL AST."""
from idl_namespace import IDLNamespace
from idl_node import IDLAttribute, IDLFile, IDLNode
from idl_option import GetOption
from idl_visitor import IDLVisitor
from idl_release import IDLReleaseList, IDLReleaseMap
#
# IDL Predefined types
#
BuiltIn = set(['int8_t', 'int16_t', 'int32_t', 'int64_t', 'uint8_t',
'uint16_t', 'uint32_t', 'uint64_t', 'double_t', 'float_t',
'handle_t', 'interface_t', 'char', 'mem_t', 'str_t', 'void'])
#
# IDLNamespaceLabelResolver
#
# Once the AST is build, we need to resolve the namespace and version
# information.
#
class IDLNamespaceLabelResolver(IDLVisitor):
NamespaceSet = set(['AST', 'Callspec', 'Interface', 'Member', 'Struct'])
#
# When we arrive at a node we must assign it a namespace and if the
# node is named, then place it in the appropriate namespace.
#
def Arrive(self, node, parent_namespace):
# If we are entering a parent, clear the local Label\
if node.IsA('File'): self.release_map = None
# If this object is not a namespace aware object, use the parent's one
if node.cls not in self.NamespaceSet:
node.namespace = parent_namespace
else:
# otherwise create one.
node.namespace = IDLNamespace(parent_namespace)
node.namespace.name = node.GetName()
# If this node is named, place it in its parent's namespace
if parent_namespace and node.cls in IDLNode.NamedSet:
# Set version min and max based on properties
if self.release_map:
vmin = node.GetProperty('version')
vmax = node.GetProperty('deprecate')
rmin = self.release_map.GetRelease(vmin)
rmax = self.release_map.GetRelease(vmax)
node.SetReleaseRange(rmin, rmax)
parent_namespace.AddNode(node)
# Pass this namespace to each child in case they inherit it
return node.namespace
#
# As we return from a node, if the node is a LabelItem we pass back
# the key=value pair representing the mapping of release to version.
# If the node is a Label take the lists of mapping and generate a
# version map which is assigned to the Labels parent as a property.
#
def Depart(self, node, data, childdata):
if node.IsA('LabelItem'):
return (node.GetName(), node.GetProperty('VALUE'))
if node.IsA('Label') and node.GetName() == GetOption('label'):
try:
self.release_map = IDLReleaseMap(childdata)
node.parent.release_map = self.release_map
except Exception as err:
node.Error('Unable to build release map: %s' % str(err))
return None
class IDLFileTypeResolver(IDLVisitor):
def VisitFilter(self, node, data):
return not node.IsA('Comment', 'Copyright')
def Arrive(self, node, filenode):
# Track the file node to update errors
if node.IsA('File'):
node.SetProperty('FILE', node)
# If this node has a TYPEREF, resolve it to a version list
typeref = node.property_node.GetPropertyLocal('TYPEREF')
if typeref:
node.typelist = node.parent.namespace.FindList(typeref)
if not node.typelist:
node.Error('Could not resolve %s.' % typeref)
else:
node.typelist = None
return filenode
#
# IDLAst
#
# A specialized version of the IDLNode for containing the whole of the
# AST. The specialized BuildTree function pulls the per file namespaces
# into the global AST namespace and checks for collisions.
#
class IDLAst(IDLNode):
def __init__(self, children):
IDLNode.__init__(self, 'AST', 'BuiltIn', 1, 0, children)
self.Resolve()
def Resolve(self):
self.namespace = IDLNamespace(None)
self.namespace.name = 'AST'
IDLNamespaceLabelResolver().Visit(self, self.namespace)
IDLFileTypeResolver().Visit(self, None)
# Build an ordered list of all releases
self.releases = set()
for filenode in self.GetListOf('File'):
self.releases |= set(filenode.release_map.GetReleases())
self.releases = sorted(self.releases)
def SetTypeInfo(self, name, properties):
node = self.namespace[name]
for prop in properties:
node.properties[prop] = properties[prop]
|
aYukiSekiguchi/ACCESS-Chromium
|
ppapi/generators/idl_ast.py
|
Python
|
bsd-3-clause
| 4,288
|
[
"VisIt"
] |
bfccc155a18da08a3892281038224503b4e4aeb0f1fa0f3837303a562520b2dd
|
#!/usr/bin/python
## Global modules
import os
from os.path import join
import logging
import shutil
import pysam
## Local modules
import mglobals
import helpers
import snp2gene
import snps_combine
log = logging.getLogger('pipeline')
def trim():
log.info('Beginning trim')
os.chdir(mglobals.samples_path)
@helpers.multiprocess(mglobals.samples_list)
def trim_call(sample):
log.info('Trimming sample {}'.format(sample))
trimmed_path = join(mglobals.trimmed_path, sample)
p_trim_R1 = trimmed_path + '_trim_R1.fastq'
u_trim_R1 = trimmed_path + '_u_trim_R1.fastq'
p_trim_R2 = trimmed_path + '_trim_R2.fastq'
u_trim_R2 = trimmed_path + '_u_trim_R2.fastq'
if os.path.exists(p_trim_R1) and os.path.exists(p_trim_R2):
log.info('Sample already trimmed, linking from trimmed_path')
# Need the try block because will fail if link already exists
try:
os.symlink(p_trim_R1, os.path.basename(p_trim_R1))
os.symlink(p_trim_R2, os.path.basename(p_trim_R2))
except OSError:
pass
else:
threads = str(mglobals.cpu_count//10) # This optimal for ~40 samples but would
# probably crash with a higher number.
log.info('Sample not already trimmed, trimming now')
trim_params = (['nice', '-n', '5',
'java', '-jar', mglobals.trimmomatic_path,
'PE',
'-threads', threads,
'-phred33',
sample + '_R1.fastq',
sample + '_R2.fastq',
p_trim_R1,
u_trim_R1,
p_trim_R2,
u_trim_R2,
'SLIDINGWINDOW:4:20',
'TRAILING:20',
'MINLEN:50'])
helpers.sub_call(trim_params)
log.info('Finished trimming sample {}, linking in.'.format(sample))
os.symlink(p_trim_R1, os.path.basename(p_trim_R1))
os.symlink(p_trim_R2, os.path.basename(p_trim_R2))
trim_call()
log.info('Finished trim')
@helpers.log_func
def my_tophat():
if mglobals.original:
log.info('Aligning reads to the original reference fasta')
fastas = mglobals.original_fastas
else:
log.info('Aligning reads to alternate reference fastas')
fastas = mglobals.alternate_fastas
@helpers.multiprocess(zip(mglobals.samples_list, fastas))
def tophat_call(sample, ref_fasta):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
ref_fasta_base = ref_fasta.split('.')[0]
mismatches = '5'
mate_inner = sample.split('_')[2]
number_of_samples = len(mglobals.samples_list)
threads_per_sample = mglobals.cpu_count//number_of_samples
threads = str(threads_per_sample)
log.info('threads per sample ' + threads)
log.info('tophat: aligning sample {} with ref fasta {}'.format(sample, ref_fasta))
tophat_params = ['nice', '-n', '5',
'tophat',
'-p', threads,
'-G', mglobals.dros_gtf,
'--transcriptome-index=../transcriptome_data/known',
'-N', mismatches,
'--b2-L', '20',
'--b2-N', '1',
'--read-edit-dist', mismatches,
'-o', (sample + '_thout'),
'--no-novel-juncs',
'--mate-inner-dist', mate_inner,
ref_fasta_base,
join(mglobals.samples_path, (sample + '_trim_R1.fastq')),
join(mglobals.samples_path, (sample + '_trim_R2.fastq'))]
helpers.sub_call(tophat_params)
log.info('tophat: finished analyzing sample: {} with ref fasta: {}'.format(sample, ref_fasta))
# Copy transcriptome index to original_path and alternate_path
for path in [mglobals.original_path, mglobals.alternate_path]:
if not os.path.exists(join(path, 'transcriptome_data')):
log.info('Linking transcriptome data to ' + path)
os.symlink(mglobals.dros_gtf_index, join(path, 'transcriptome_data'))
tophat_call()
@helpers.log_func
def my_alignment_filter():
@helpers.multiprocess(mglobals.samples_list)
def filter_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample, (sample + '_thout')))
else:
os.chdir(join(mglobals.alternate_path, sample, (sample + '_thout')))
log.info('Filtering aligned reads for: ' + sample)
# Index each bamfile
if not os.path.exists('accepted_hits.bam.bai'):
pysam.index('accepted_hits.bam')
# Sort by NH flag
raw_reads = pysam.Samfile('accepted_hits.bam', 'rb')
filter_reads = pysam.Samfile('filter.bam', 'wb', template=raw_reads)
for read in raw_reads.fetch():
if ('NH', 1) in read.tags:
filter_reads.write(read)
raw_reads.close()
filter_reads.close()
pysam.index('filter.bam')
filter_call()
@helpers.log_func
def my_pileup(out_file_extension='.mpileup'):
if mglobals.original:
fastas = mglobals.original_fastas
else:
fastas = mglobals.alternate_fastas
@helpers.multiprocess(zip(mglobals.samples_list, fastas))
def pileup_call(sample, ref_fasta):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('mpileup: creating .mpileup file for {} with ref fasta: {}'.format(sample, ref_fasta))
pileup_command = ['nice', '-n', '5',
'samtools', 'mpileup',
'-B',
'-d10000000',
'-f', ref_fasta,
join((sample + '_thout'), 'filter.bam')]
output_file = sample + out_file_extension
with open(output_file, 'w') as output_file:
helpers.sub_call(pileup_command, stdout=output_file)
log.info('mpileup: finished for {} with ref fasta: {}'.format(sample, ref_fasta))
pileup_call()
@helpers.log_func
def my_variant_calls(in_file_extension='.mpileup', out_file_extension='.vcf'):
'''
Note, build alternate fastas depends on the out_file_extension being '.vcf'.
'''
@helpers.multiprocess(mglobals.samples_list)
def variant_calls_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('Varscan: creating csv for: ' + sample)
varscan_command = ['nice', '-n', '5',
'java', '-jar', mglobals.varscan_path,
'mpileup2snp',
(sample + in_file_extension),
'--min-coverage', '2',
'--min-avg-qual', '20',
'--strand-filter', '0',
'--p-value', '1',
'--min-var-freq', '1e-10',
'--output-vcf', '1',
]
output_file = sample + out_file_extension
with open(output_file, 'w') as out:
helpers.sub_call(varscan_command, stdout=out)
log.info('varscan finished for: ' + sample)
variant_calls_call()
@helpers.log_func
def cov_and_dgrp_filter(in_file_extension='.vcf', out_file_extension='_freeze2.vcf'):
@helpers.multiprocess(mglobals.samples_list)
def filter_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('Filtering {0} by coverage'.format(sample))
helpers.filter_vcf_by_coverage_cutoffs(vcf=(sample + in_file_extension),
cutoff_table=mglobals.coverage_cutoffs)
log.info('Filtering {0} according to SNP file: {1}'.format(sample, mglobals.current_snp_file))
dgrp_intersect_command = ['nice', '-n', '5',
'intersectBed',
'-a', (sample + '_covfil.vcf'), # the output of the helper
# function above.
'-b', mglobals.current_snp_file,
'-wa'
]
sample_dgrp_intersect = sample + out_file_extension
with open(sample_dgrp_intersect, 'w') as out:
helpers.sub_call(dgrp_intersect_command, stdout=out)
filter_call()
@helpers.log_func
def annotate_vcf(in_file_extension='_freeze2.vcf', out_file_extension='_geneannot.vcf'):
@helpers.multiprocess(mglobals.samples_list)
def annotate_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('Annotating ' + sample + in_file_extension + ' with ' + mglobals.current_genes_file)
gtf_intersect_command = ['nice', '-n', '5',
'intersectBed',
'-a', (sample + in_file_extension),
'-b', mglobals.current_genes_file,
'-wa',
'-wb'
]
sample_gtf_intersect = sample + out_file_extension
with open(sample_gtf_intersect, 'w') as out:
helpers.sub_call(gtf_intersect_command, stdout=out)
annotate_call()
@helpers.log_func
def build_alternate_fastas(in_file_extension='_geneannot.vcf'):
# If we are doing the original alignment, we can now build the alternate
# reference fastas for each sample
@helpers.multiprocess(mglobals.samples_list)
def build_fastas_call(sample):
os.chdir(join(mglobals.original_path, sample))
log.info('Beginning to build alternate fasta for: ' + sample)
fixed_vcf = sample + '_fix.vcf'
log.info('Removing duplicated annotations (per transcript annotations)')
helpers.remove_dups(input_f=(sample + in_file_extension),
output_f=(sample + '.temp'))
log.info('Removing duplicate alleles and adding header')
# The fact that the original vcf was named sample.vcf is hardcoded
# here. Be careful.
helpers.vcf_fix(template_f=(sample + '.vcf'),
input_f=(sample + '.temp'),
output_f=fixed_vcf)
# Delete temporary file
os.remove(sample + '.temp')
log.info('Creating alternate fasta')
new_fasta = sample + '_unfixed.fa'
helpers.sub_call(['nice', '-n', '5',
'java', '-Xmx2g', '-jar',
mglobals.gatk_path,
'-R', 'genome.fa',
'-T', 'FastaAlternateReferenceMaker',
'-o', new_fasta,
'--variant', fixed_vcf])
# Fix the fasta
log.info('Fixing gatk fasta')
# If you change this name, you need to change the alternate fastas list as well.
final_fasta = sample + '.fa'
helpers.fasta_fix(input_f=new_fasta, output_f=final_fasta)
# Delete the unfixed version
os.remove(new_fasta)
log.info('Moving new fasta to: ' + join(mglobals.alternate_path, sample))
shutil.move(final_fasta, join(mglobals.alternate_path, sample))
log.info('Indexing new fasta')
os.chdir(join(mglobals.alternate_path, sample))
helpers.sub_call(['bowtie2-build',
'-f', final_fasta,
sample])
build_fastas_call()
@helpers.log_func
def vcf_to_csv(in_file_extension='_geneannot.vcf',
out_file_extension='_INTER_py.csv'):
@helpers.multiprocess(mglobals.samples_list)
def vcf_to_csv_call(sample):
if mglobals.original:
os.chdir(join(mglobals.original_path, sample))
else:
os.chdir(join(mglobals.alternate_path, sample))
log.info('Converting vcf to csv for: ' + sample)
snp2gene.converter(input_f=(sample + in_file_extension),
output_f=(sample + out_file_extension))
vcf_to_csv_call()
@helpers.log_func
def combine_snps(in_file_extension='_INTER_py.csv'):
os.chdir(mglobals.samples_path)
@helpers.multiprocess(mglobals.samples_list)
def combine_snps_call(sample):
log.info('Combining SNPs for: ' + sample)
snps_combine.combine_SNPs(orig_f=join(mglobals.original_path, sample,
(sample + in_file_extension)),
new_f=join(mglobals.alternate_path, sample,
(sample + in_file_extension)),
orig_bam=join(mglobals.original_path, sample,
(sample + '_thout'), 'filter.bam'),
new_bam=join(mglobals.alternate_path, sample,
(sample + '_thout'), 'filter.bam'),
ref_vcf=join(mglobals.current_snp_file),
output_f=(sample + '_snps.csv'),
cutoff_table=mglobals.coverage_cutoffs)
mean_propR, snp_count = snps_combine.quick_mean_propR(sample + '_snps.csv')
log.info('Mean proportion reference for {} = {}'.format(sample, mean_propR))
log.info('\tNumber of snps = {}'.format(snp_count))
combine_snps_call()
@helpers.log_func
def csv_recalibrate(in_file_extension='_INTER_py.csv', out_file_extension='_genes.csv'):
os.chdir(mglobals.samples_path)
@helpers.multiprocess(mglobals.samples_list)
def csv_recalibrate_call(sample):
log.info('Combining genes for: ' + sample)
snp2gene.snp2gene(input_orig=join(mglobals.original_path, sample,
(sample + in_file_extension)),
input_new=join(mglobals.alternate_path, sample,
(sample + in_file_extension)),
output_f=(sample + out_file_extension),
orig_bam=join(mglobals.original_path, sample,
(sample + '_thout'), 'filter.bam'),
new_bam=join(mglobals.alternate_path, sample,
(sample + '_thout'), 'filter.bam'),
ref_vcf=mglobals.current_snp_file,
snp_stats_f=(sample + '_snp_stats.csv'),
cutoff_table=mglobals.coverage_cutoffs)
csv_recalibrate_call()
def main():
trim()
mglobals.original = True
my_tophat()
my_alignment_filter()
my_pileup()
my_variant_calls()
cov_and_dgrp_filter()
annotate_vcf()
build_alternate_fastas()
vcf_to_csv()
mglobals.original = False
my_tophat()
my_alignment_filter()
my_pileup()
my_variant_calls()
cov_and_dgrp_filter()
annotate_vcf()
vcf_to_csv()
combine_snps()
csv_recalibrate()
log.info('Pipeline completed successfully!')
if __name__ == '__main__':
main()
|
d-quinn/bio_quinn2013
|
gene_estimates/pipeline-pairedend_altfastas.py
|
Python
|
mit
| 16,026
|
[
"pysam"
] |
3d540f2c1f536bfa61a4ca809c5724cec74483e8aad5d036988b5030e34c7ee1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TAMkin is a post-processing toolkit for normal mode analysis, thermochemistry
# and reaction kinetics.
# Copyright (C) 2008-2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>, An Ghysels
# <An.Ghysels@UGent.be> and Matthias Vandichel <Matthias.Vandichel@UGent.be>
# Center for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all
# rights reserved unless otherwise stated.
#
# This file is part of TAMkin.
#
# TAMkin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "TAMkin: A Versatile Package for Vibrational Analysis and Chemical Kinetics",
# An Ghysels, Toon Verstraelen, Karen Hemelsoet, Michel Waroquier and Veronique
# Van Speybroeck, Journal of Chemical Information and Modeling, 2010, 50,
# 1736-1750W
# http://dx.doi.org/10.1021/ci100099g
#
# TAMkin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
# Import the tamkin library.
from __future__ import print_function
from tamkin import *
# Import units
from molmod.units import kjmol
# import matplotlib.pyplot as pt for plotting
import matplotlib.pyplot as pt
# Import standard python libraries
import os, sys
def load_rotor(mol, filename, rotsym, even, expansion=5):
rot_scan = load_rotscan_g03log(filename)
rotor = Rotor(rot_scan, mol, rotsym=rotsym, even=even)
return rotor
def load_cps_barrier(fn_template):
"""Loads the counterpoise corrected barrier from five sp calculations"""
from molmod.io import FCHKFile
def load_ener(fn_fchk):
fchk = FCHKFile(fn_fchk, field_labels=["Total Energy"])
return fchk.fields["Total Energy"]
return (
load_ener(fn_template % "full_0") +
load_ener(fn_template % "sole_1") - load_ener(fn_template % "full_1") +
load_ener(fn_template % "sole_2") - load_ener(fn_template % "full_2")
)
def run(do_rotor, do_counterpoise, load_sp):
prefix = {True: "ir", False: "ho"}[do_rotor]
prefix += {True: "_cps", False: "_bss"}[do_counterpoise]
if load_sp:
mol_ethyl = load_molecule_g03fchk("ethyl__freq/gaussian.fchk", "ethyl__sp/gaussian.fchk")
mol_ethene = load_molecule_g03fchk("ethene__freq/gaussian.fchk", "ethene__sp/gaussian.fchk")
else:
mol_ethyl = load_molecule_g03fchk("ethyl__freq/gaussian.fchk")
mol_ethene = load_molecule_g03fchk("ethene__freq/gaussian.fchk")
if do_counterpoise:
try:
mol_ts_gauche = load_molecule_g03fchk("ts_ad1_gauche__freq/gaussian.fchk", "ts_ad1_gauche__bsse/gaussian.fchk")
except (IOError, KeyError):
cps_barrier_gauche = load_cps_barrier("ts_ad1_gauche__cps_%s/gaussian.fchk")
mol_ts_gauche = load_molecule_g03fchk("ts_ad1_gauche__freq/gaussian.fchk", energy=cps_barrier_gauche)
try:
mol_ts_trans = load_molecule_g03fchk("ts_ad1_trans__freq/gaussian.fchk", "ts_ad1_trans__bsse/gaussian.fchk")
except (IOError, KeyError):
cps_barrier_trans = load_cps_barrier("ts_ad1_trans__cps_%s/gaussian.fchk")
mol_ts_trans = load_molecule_g03fchk("ts_ad1_trans__freq/gaussian.fchk", energy=cps_barrier_trans)
else:
if load_sp:
mol_ts_gauche = load_molecule_g03fchk("ts_ad1_gauche__freq/gaussian.fchk", "ts_ad1_gauche__sp/gaussian.fchk")
mol_ts_trans = load_molecule_g03fchk("ts_ad1_trans__freq/gaussian.fchk", "ts_ad1_gauche__sp/gaussian.fchk")
else:
mol_ts_gauche = load_molecule_g03fchk("ts_ad1_gauche__freq/gaussian.fchk")
mol_ts_trans = load_molecule_g03fchk("ts_ad1_trans__freq/gaussian.fchk")
# Perform normal mode analysis on the molecules
nma_ethyl = NMA(mol_ethyl, ConstrainExt(gradient_threshold=1e-3))
nma_ethene = NMA(mol_ethene, ConstrainExt(gradient_threshold=1e-3))
nma_ts_gauche = NMA(mol_ts_gauche, ConstrainExt(gradient_threshold=1e-3))
nma_ts_trans = NMA(mol_ts_trans, ConstrainExt(gradient_threshold=1e-3))
if do_rotor:
# Construct the rotors
rotor_ethyl = load_rotor(mol_ethyl, "ethyl__scan_methyl/gaussian.log", 6, True, 1)
rotor1_ts_gauche = load_rotor(mol_ts_gauche, "ts_ad1_gauche__scan_methyl/gaussian.log", 3, False)
rotor2_ts_gauche = load_rotor(mol_ts_gauche, "ts_ad1_trans__scan_forming_bond/gaussian.log", 1, True)
rotor1_ts_trans = load_rotor(mol_ts_trans, "ts_ad1_trans__scan_methyl/gaussian.log", 3, False)
rotor2_ts_trans = load_rotor(mol_ts_trans, "ts_ad1_trans__scan_forming_bond/gaussian.log", 1, True)
# Construct the partition functions.
pf_ethyl = PartFun(nma_ethyl, [
ExtTrans(), ExtRot(), Electronic(2),
#Vibrations(freq_scaling=0.9614, zp_scaling=0.9806),
rotor_ethyl,
])
pf_ethene = PartFun(nma_ethene, [
ExtTrans(), ExtRot(),
#Vibrations(freq_scaling=0.9614, zp_scaling=0.9806),
])
pf_ts_gauche = PartFun(nma_ts_gauche, [
ExtTrans(), ExtRot(), Electronic(2),
#Vibrations(freq_scaling=0.9614, zp_scaling=0.9806),
rotor1_ts_gauche, rotor2_ts_gauche,
])
pf_ts_trans = PartFun(nma_ts_trans, [
ExtTrans(), ExtRot(), Electronic(2),
#Vibrations(freq_scaling=0.9614, zp_scaling=0.9806),
rotor1_ts_trans, rotor2_ts_trans,
])
else:
# Construct the partition functions.
pf_ethyl = PartFun(nma_ethyl, [
ExtTrans(), ExtRot(1), Electronic(2),
#Vibrations(freq_scaling=0.9614, zp_scaling=0.9806),
])
pf_ethene = PartFun(nma_ethene, [
ExtTrans(), ExtRot(4),
#Vibrations(freq_scaling=0.9614, zp_scaling=0.9806),
])
pf_ts_gauche = PartFun(nma_ts_gauche, [
ExtTrans(), ExtRot(1), Electronic(2),
#Vibrations(freq_scaling=0.9614, zp_scaling=0.9806),
])
pf_ts_trans = PartFun(nma_ts_trans, [
ExtTrans(), ExtRot(1), Electronic(2),
#Vibrations(freq_scaling=0.9614, zp_scaling=0.9806),
])
if do_rotor:
# Plot the energy levels and the potential of the hindered rotor. The
# temperature argument is used to indicate the population of each level in the
# plot.
rotor_ethyl.plot_levels("rotor_ethyl_energy_levels.png", 300)
rotor1_ts_gauche.plot_levels("rotor1_ts_gauche_energy_levels.png", 300)
rotor2_ts_gauche.plot_levels("rotor2_ts_gauche_energy_levels.png", 300)
rotor1_ts_trans.plot_levels("rotor1_ts_trans_energy_levels.png", 300)
rotor2_ts_trans.plot_levels("rotor2_ts_trans_energy_levels.png", 300)
# Analyse the partition functions in detail
ta_ethyl = ThermoAnalysis(pf_ethyl, [300,400,500,600])
ta_ethyl.write_to_file("%s_thermo_ethyl.csv" % prefix)
ta_ethene = ThermoAnalysis(pf_ethene, [300,400,500,600])
ta_ethene.write_to_file("%s_thermo_ethene.csv" % prefix)
ta_ts_gauche = ThermoAnalysis(pf_ts_gauche, [300,400,500,600])
ta_ts_gauche.write_to_file("%s_thermo_ts_gauche.csv" % prefix)
ta_ts_trans = ThermoAnalysis(pf_ts_trans, [300,400,500,600])
ta_ts_trans.write_to_file("%s_thermo_ts_trans.csv" % prefix)
# Define kinetic models for the chemical reaction. These are the mandatory arguments:
# 1) a list of reactant partition functions
# (one for unimolecular, two for bimolecular, ...)
# 2) the transition state partition function
# There are two more optional arguments
# 3) cp: model at constant pressure, default=True
# 4) tunneling: a model for the tunelling correction
km_gauche = KineticModel([pf_ethyl, pf_ethene], pf_ts_gauche)
km_trans = KineticModel([pf_ethyl, pf_ethene], pf_ts_trans)
# Analyze the chemical reactions. These are the arguments:
# 1) A kinetic model
# 2) the starting temperature for the fit
# 3) the final temperature for the fit
# The following argument is optional:
# 4) temp_step: The interval on the temperature grid in Kelvin, 10 is default
ra_gauche = ReactionAnalysis(km_gauche, 300, 600)
ra_trans = ReactionAnalysis(km_trans, 300, 600)
# make the Arrhenius plots
pt.clf()
ra_gauche.plot_arrhenius(label="gauche", color="red")
ra_trans.plot_arrhenius(label="trans", color="blue")
pt.legend(loc=0)
pt.savefig("%s_arrhenius.png" % prefix)
# Estimate the error on the kinetic parameters due to level of theory artifacts
# with Monte Carlo sampling. The monte_carlo method takes three optional
# arguments:
# 1) freq_error: the absolute stochastic error on the frequencies (default=1*invcm)
# 2) energy_error: the absolute error on the energy (default=0.0)
# 3) num_iter: the number of monte carlo samples (default=100)
ra_gauche.monte_carlo(num_iter=100)
ra_trans.monte_carlo(num_iter=100)
# plot the parameters, this includes the monte carlo results
pt.clf()
ra_gauche.plot_parameters(label="gauche", color="red")
ra_trans.plot_parameters(label="trans", color="blue")
pt.legend(loc=0)
pt.savefig("%s_parameters.png" % prefix)
# write all results to a file.
ra_gauche.write_to_file("%s_reaction_gauche.txt" % prefix)
ra_trans.write_to_file("%s_reaction_trans.txt" % prefix)
def write_ra_summary(fn, ra):
with open(fn, "w") as f:
print("% 10.5e % 10.5e % 10.5e % 10.5e %10.5e %10.2e %10.2e %10.2e" % (
ra.compute_rate_coeff(300)/ra.unit,
ra.compute_rate_coeff(400)/ra.unit,
ra.compute_rate_coeff(500)/ra.unit,
ra.compute_rate_coeff(600)/ra.unit,
ra.A/ra.unit,
ra.Ea/kjmol,
ra.compute_delta_G(0.0)/kjmol,
ra.compute_delta_E()/kjmol,
), file=f)
write_ra_summary("%s_summary_gauche.txt" % prefix, ra_gauche)
write_ra_summary("%s_summary_trans.txt" % prefix, ra_trans)
usage = """USAGE: ./reaction.py dirname
Analyzes the kinetics of the addition of ethene to ethyl with and without
hindered rotors. It looks at both the trans and gauce pathways.
"""
def main():
from optparse import OptionParser
parser = OptionParser(usage)
options, args = parser.parse_args()
if len(args) != 1:
parser.error("Expecting exactly on argument")
print("Processing %s" % args[0])
os.chdir(args[0])
load_sp = args[0].startswith("GEO")
for do_rotor in True, False:
for do_counterpoise in True, False:
#try:
run(do_rotor, do_counterpoise, load_sp)
print(" OK: do_rotor=%i, do_counterpoise=%i, load_sp=%i" % (do_rotor, do_counterpoise, load_sp))
#except (IOError, KeyError), e:
# print " Failed: do_rotor=%i, do_counterpoise=%i, load_sp=%i" % (do_rotor, do_counterpoise, load_sp)
# print e
if __name__ == "__main__":
main()
|
molmod/tamkin
|
tamkin/examples/011_ethyl_ethene_lot/reaction.py
|
Python
|
gpl-3.0
| 11,623
|
[
"Gaussian"
] |
305fe2a8d60cefa6734a242f0b0606f261a158662b18d3baf96ad2323b9e0f5a
|
from __future__ import division, print_function, absolute_import
from scipy import stats
import numpy as np
from numpy.testing import assert_almost_equal, assert_, assert_raises, \
assert_array_almost_equal, assert_array_almost_equal_nulp, run_module_suite
def test_kde_1d():
#some basic tests comparing to normal distribution
np.random.seed(8765678)
n_basesample = 500
xn = np.random.randn(n_basesample)
xnmean = xn.mean()
xnstd = xn.std(ddof=1)
# get kde for original sample
gkde = stats.gaussian_kde(xn)
# evaluate the density function for the kde for some points
xs = np.linspace(-7,7,501)
kdepdf = gkde.evaluate(xs)
normpdf = stats.norm.pdf(xs, loc=xnmean, scale=xnstd)
intervall = xs[1] - xs[0]
assert_(np.sum((kdepdf - normpdf)**2)*intervall < 0.01)
prob1 = gkde.integrate_box_1d(xnmean, np.inf)
prob2 = gkde.integrate_box_1d(-np.inf, xnmean)
assert_almost_equal(prob1, 0.5, decimal=1)
assert_almost_equal(prob2, 0.5, decimal=1)
assert_almost_equal(gkde.integrate_box(xnmean, np.inf), prob1, decimal=13)
assert_almost_equal(gkde.integrate_box(-np.inf, xnmean), prob2, decimal=13)
assert_almost_equal(gkde.integrate_kde(gkde),
(kdepdf**2).sum()*intervall, decimal=2)
assert_almost_equal(gkde.integrate_gaussian(xnmean, xnstd**2),
(kdepdf*normpdf).sum()*intervall, decimal=2)
def test_kde_bandwidth_method():
def scotts_factor(kde_obj):
"""Same as default, just check that it works."""
return np.power(kde_obj.n, -1./(kde_obj.d+4))
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
# Supply a callable
gkde2 = stats.gaussian_kde(xn, bw_method=scotts_factor)
# Supply a scalar
gkde3 = stats.gaussian_kde(xn, bw_method=gkde.factor)
xs = np.linspace(-7,7,51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf2)
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf, kdepdf3)
assert_raises(ValueError, stats.gaussian_kde, xn, bw_method='wrongstring')
# Subclasses that should stay working (extracted from various sources).
# Unfortunately the earlier design of gaussian_kde made it necessary for users
# to create these kinds of subclasses, or call _compute_covariance() directly.
class _kde_subclass1(stats.gaussian_kde):
def __init__(self, dataset):
self.dataset = np.atleast_2d(dataset)
self.d, self.n = self.dataset.shape
self.covariance_factor = self.scotts_factor
self._compute_covariance()
class _kde_subclass2(stats.gaussian_kde):
def __init__(self, dataset):
self.covariance_factor = self.scotts_factor
super(_kde_subclass2, self).__init__(dataset)
class _kde_subclass3(stats.gaussian_kde):
def __init__(self, dataset, covariance):
self.covariance = covariance
stats.gaussian_kde.__init__(self, dataset)
def _compute_covariance(self):
self.inv_cov = np.linalg.inv(self.covariance)
self._norm_factor = np.sqrt(np.linalg.det(2*np.pi * self.covariance)) \
* self.n
class _kde_subclass4(stats.gaussian_kde):
def covariance_factor(self):
return 0.5 * self.silverman_factor()
def test_gaussian_kde_subclassing():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# gaussian_kde itself
kde = stats.gaussian_kde(x1)
ys = kde(xs)
# subclass 1
kde1 = _kde_subclass1(x1)
y1 = kde1(xs)
assert_array_almost_equal_nulp(ys, y1, nulp=10)
# subclass 2
kde2 = _kde_subclass2(x1)
y2 = kde2(xs)
assert_array_almost_equal_nulp(ys, y2, nulp=10)
# subclass 3
kde3 = _kde_subclass3(x1, kde.covariance)
y3 = kde3(xs)
assert_array_almost_equal_nulp(ys, y3, nulp=10)
# subclass 4
kde4 = _kde_subclass4(x1)
y4 = kde4(x1)
y_expected = [0.06292987, 0.06346938, 0.05860291, 0.08657652, 0.07904017]
assert_array_almost_equal(y_expected, y4, decimal=6)
# Not a subclass, but check for use of _compute_covariance()
kde5 = kde
kde5.covariance_factor = lambda: kde.factor
kde5._compute_covariance()
y5 = kde5(xs)
assert_array_almost_equal_nulp(ys, y5, nulp=10)
def test_gaussian_kde_covariance_caching():
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754, 0.01664475]
# Set the bandwidth, then reset it to the default.
kde = stats.gaussian_kde(x1)
kde.set_bandwidth(bw_method=0.5)
kde.set_bandwidth(bw_method='scott')
y2 = kde(xs)
assert_array_almost_equal(y_expected, y2, decimal=7)
def test_gaussian_kde_monkeypatch():
"""Ugly, but people may rely on this. See scipy pull request 123,
specifically the linked ML thread "Width of the Gaussian in stats.kde".
If it is necessary to break this later on, that is to be discussed on ML.
"""
x1 = np.array([-7, -5, 1, 4, 5], dtype=float)
xs = np.linspace(-10, 10, num=50)
# The old monkeypatched version to get at Silverman's Rule.
kde = stats.gaussian_kde(x1)
kde.covariance_factor = kde.silverman_factor
kde._compute_covariance()
y1 = kde(xs)
# The new saner version.
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
y2 = kde2(xs)
assert_array_almost_equal_nulp(y1, y2, nulp=10)
def test_kde_integer_input():
"""Regression test for #1181."""
x1 = np.arange(5)
kde = stats.gaussian_kde(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869, 0.13480721]
assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_pdf_logpdf():
np.random.seed(1)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = stats.gaussian_kde(xn)
xs = np.linspace(-15, 12, 25)
pdf = gkde.evaluate(xs)
pdf2 = gkde.pdf(xs)
assert_almost_equal(pdf, pdf2, decimal=12)
logpdf = np.log(pdf)
logpdf2 = gkde.logpdf(xs)
assert_almost_equal(logpdf, logpdf2, decimal=12)
if __name__ == "__main__":
run_module_suite()
|
ales-erjavec/scipy
|
scipy/stats/tests/test_kdeoth.py
|
Python
|
bsd-3-clause
| 6,412
|
[
"Gaussian"
] |
a11d9c10dfa4f42b80546a1a7d9493f69492d1184dc8ea72340c7b84cd2e2c2f
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
from .mpl import plot_coord
from .regex import yes, no, parse_dertype
|
ashutoshvt/psi4
|
psi4/driver/qcdb/util/__init__.py
|
Python
|
lgpl-3.0
| 984
|
[
"Psi4"
] |
716c287c4009a3820c0f569d6db4c494879dddfc4854e0980debeba81c740017
|
#!/usr/bin/env python
import pybedtools
import pysam
import vcf
import re
import vcf
import numpy as np
from scipy.stats import mannwhitneyu
import filt_cnvs
def create_bedTools(cnv_file):
cnv_bed = ""
with open(cnv_file, 'r') as f:
for line in f:
line = line.replace("\n","")
line = line.replace("\r","")
line = "chr"+line+'\n'
cnv_bed = cnv_bed + line
cnv = pybedtools.BedTool(cnv_bed,from_string=True)
return cnv
def gencode_annotate(a_cnv, b_gencode, cnv_anno):
utr_dict= {}
transcript_dict= {}
cnv_anno_list = []
for line, feature in enumerate(a_cnv):
f_id = ":".join(feature[0:3]) #f_id = ":".join(feature[0:3])[3:]
cnv_anno_list.append(f_id)
a_and_b = a_cnv.intersect(b_gencode, wa=True, wb=True)
for line, feature in enumerate(a_and_b):
feature_id = ":".join(feature[0:3])[3:]
if cnv_anno.get(feature_id):
pass
else:
cnv_anno[feature_id] = {}
feature[-7] = feature[-7].replace('"','')
if feature[-7] == 'gene':
g_cov = ""
g_name = filter(lambda x: 'gene_name' in x, feature[-1].split(";"))[0].split(" ")[2].replace("\"","")
g_type = filter(lambda x: 'gene_type' in x, feature[-1].split(";"))[0].split(" ")[2].replace("\"","")
g_id = filter(lambda x: 'gene_id' in x, feature[-1].split(";"))[0].split(" ")[1].replace("\"","")
if feature[-6] >= feature[1] and feature[-5] <= feature[2]:
g_cov = "F"
else:
g_cov = "P"
if cnv_anno[feature_id].get('gene_name'):
pass
else:
cnv_anno[feature_id]['gene_name'] = {}
cnv_anno[feature_id]['gene_type'] = {}
cnv_anno[feature_id]['gene_id'] = {}
cnv_anno[feature_id]['gene_name'][g_name] = g_cov
cnv_anno[feature_id]['gene_type'][g_type] = 1
cnv_anno[feature_id]['gene_id'][g_id] = 1
elif feature[-7] == 'UTR':
utr_gene = filter(lambda x: 'gene_name' in x, feature[-1].split(";"))[0].split(" ")[2].replace("\"","")
if utr_dict.get(feature_id):
pass
else:
utr_dict[feature_id] = {}
if utr_dict[feature_id].get(utr_gene):
utr_dict[feature_id][utr_gene] = ";".join([ utr_dict[feature_id][utr_gene], "-".join([feature[0], feature[-6], feature[-5]]) ])
else:
utr_dict[feature_id][utr_gene] = "-".join([feature[0], feature[-6], feature[-5]])
elif feature[-7] == 'exon':
feature[-1] = feature[-1].replace('"','')
if cnv_anno[feature_id].get('exon'):
pass
else:
cnv_anno[feature_id]['exon'] = {}
if cnv_anno[feature_id]['exon'].get(filter(lambda x: 'transcript_id' in x, feature[-1].split(";"))[0].split(" ")[2]):
cnv_anno[feature_id]['exon'][filter(lambda x: 'transcript_id' in x, feature[-1].split(";"))[0].split(" ")[2]] = cnv_anno[feature_id]['exon'][filter(lambda x: 'transcript_id' in x, feature[-1].split(";"))[0].split(" ")[2]] + 1
else:
cnv_anno[feature_id]['exon'][filter(lambda x: 'transcript_id' in x, feature[-1].split(";"))[0].split(" ")[2]] = 1
elif feature[-7] == 'transcript':
feature[-1] = feature[-1].replace('"','')
transcript_gene = filter(lambda x: 'gene_name' in x, feature[-1].split(";"))[0].split(" ")[2].replace("\"","")
if transcript_dict.get(feature_id):
pass
else:
transcript_dict[feature_id] = {}
if transcript_dict[feature_id].get(transcript_gene):
transcript_dict[feature_id][transcript_gene] = ";".join([ transcript_dict[feature_id][transcript_gene], filter(lambda x: 'transcript_id' in x, feature[-1].split(";"))[0].split(" ")[2] ])
else:
transcript_dict[feature_id][transcript_gene] = {}
transcript_dict[feature_id][transcript_gene] = filter(lambda x: 'transcript_id' in x, feature[-1].split(";"))[0].split(" ")[2] #"-".join([feature[0], feature[-6], feature[-5]])
for k in cnv_anno.keys():
if utr_dict.get(k):
for g in utr_dict[k].keys():
if cnv_anno[k]['gene_name'].get(g):
if cnv_anno[k]['gene_name'][g] == "P":
if cnv_anno[k].get('UTR'):
cnv_anno[k]['UTR'] = "|".join([cnv_anno[k]['UTR'], utr_dict[k][g]])
else:
cnv_anno[k]['UTR'] = utr_dict[k][g]
for k in cnv_anno.keys():
if transcript_dict.get(k):
for g in transcript_dict[k].keys():
if cnv_anno[k]['gene_name'].get(g):
if cnv_anno[k]['gene_name'][g] == "P":
if cnv_anno[k].get('transcript'):
cnv_anno[k]['transcript'] = "|".join([cnv_anno[k]['transcript'], transcript_dict[k][g]])
else:
cnv_anno[k]['transcript'] = transcript_dict[k][g]
if cnv_anno[k].get('exon'):
if cnv_anno[k].get('exon_count'):
if cnv_anno[k]['exon'].get(trans_id):
cnv_anno[k]['exon_count'][trans_id] = cnv_anno[k]['exon'][trans_id]
else:
cnv_anno[k]['exon_count'] = {}
for trans_id in transcript_dict[k][g].split(";"):
#print trans_id
if cnv_anno[k]['exon'].get(trans_id):
cnv_anno[k]['exon_count'][trans_id] = cnv_anno[k]['exon'][trans_id]
return cnv_anno
def sanger_annotate(a_cnv, c_conradCNV, cnv_anno):
a_and_c = a_cnv.intersect(c_conradCNV, wa=True, wb=True)
for line, feature in enumerate(a_and_c):
feature_id = ":".join(feature[0:3])[3:]
if cnv_anno.get(feature_id):
pass
else:
cnv_anno[feature_id] = {}
if cnv_anno[feature_id].get('Sanger_HiRes_CNV'):
cnv_anno[feature_id]['Sanger_HiRes_CNV'] = cnv_anno[feature_id]['Sanger_HiRes_CNV'] + 1
else:
cnv_anno[feature_id]['Sanger_HiRes_CNV'] = 1
return cnv_anno
def dgv_annotate(a_cnv, d_dgvCNV, cnv_anno):
a_and_e = a_cnv.intersect(d_dgvCNV, wa=True, wb=True)
for line, feature in enumerate(a_and_e):
feature_id = ":".join(feature[0:3])[3:]
if cnv_anno.get(feature_id):
pass
else:
cnv_anno[feature_id] = {}
if cnv_anno[feature_id].get('DGV_CNV'):
cnv_anno[feature_id]['DGV_CNV'] = cnv_anno[feature_id]['DGV_CNV'] + 1
else:
cnv_anno[feature_id]['DGV_CNV'] = 1
if cnv_anno[feature_id].get('DGV_VarType'):
if filter(lambda x: feature[-3] in x, cnv_anno[feature_id]['DGV_VarType'].split(";")):
pass
else:
cnv_anno[feature_id]['DGV_VarType'] = ";".join([ cnv_anno[feature_id]['DGV_VarType'], feature[-3] ])
else:
cnv_anno[feature_id]['DGV_VarType'] = feature[-3]
if cnv_anno[feature_id].get('DGV_VarSubType'):
if filter(lambda x: feature[-2] in x, cnv_anno[feature_id]['DGV_VarSubType'].split(";")):
pass
else:
cnv_anno[feature_id]['DGV_VarSubType'] = ";".join([ cnv_anno[feature_id]['DGV_VarSubType'], feature[-2] ])
else:
cnv_anno[feature_id]['DGV_VarSubType'] = feature[-2]
if cnv_anno[feature_id].get('DGV_PUBMEDID'):
if filter(lambda x: feature[-1] in x, cnv_anno[feature_id]['DGV_PUBMEDID'].split(";")):
pass
else:
cnv_anno[feature_id]['DGV_PUBMEDID'] = ";".join([ cnv_anno[feature_id]['DGV_PUBMEDID'], feature[-1] ])
else:
cnv_anno[feature_id]['DGV_PUBMEDID'] = feature[-1]
return cnv_anno
def dgvFilt_annotate(d_dgvFiltsCNV_l, cnv_anno, level):
for k in cnv_anno.keys():
dgvFilt_count = 0
line = k.split(":")
dgv_popFreq = []
lbl_count = level + "_count"
lbl_pop = level + "_popFreq"
try:
for row in d_dgvFiltsCNV_l.fetch(line[0], int(line[1]), int(line[2])):
dgvFilt_count = dgvFilt_count + 1
dgv_popFreq.append(row.split("\t")[4])
if len(dgv_popFreq) > 0:
cnv_anno[k][lbl_count] = dgvFilt_count
cnv_anno[k][lbl_pop] = "|".join(dgv_popFreq)
else:
cnv_anno[k][lbl_count] = "NA"
cnv_anno[k][lbl_pop] = "NA"
except ValueError:
pass
return cnv_anno
def phastCon_annotate(e_phastCon, cnv_anno):
for k in cnv_anno.keys():
phastConEle_count = 0
line = k.split(":")
phastCon_lod = []
chrom = 'chr' + line[0]
#chrom = line[0]
for row in e_phastCon.fetch(chrom, int(line[1]), int(line[2])):
phastConEle_count = phastConEle_count + 1
phastCon_lod.append(int(row.split("\t")[3][4:]))
if len(phastCon_lod) > 0:
cnv_anno[k]['phastCon_count'] = phastConEle_count
cnv_anno[k]['phastCon_min_max'] = ":".join([str(min(phastCon_lod)), str(max(phastCon_lod))])
else:
cnv_anno[k]['phastCon_count'] = "NA"
cnv_anno[k]['phastCon_min_max'] = "NA"
return cnv_anno
pass
def geneticIntolarance_annotate(i_genIntol_file, cnv_anno):
genticIntol_score = {}
with open(i_genIntol_file, 'r') as f:
for line in f:
line = line.replace('\n', '')
line = line.replace('\r', '')
l = line.split("\t")
genticIntol_score[l[0]] = l[1]
for k in cnv_anno.keys():
if cnv_anno[k].get('gene_name'):
for g in cnv_anno[k]['gene_name'].keys():
if genticIntol_score.get(g):
if cnv_anno[k].get('GenInTolScore'):
cnv_anno[k]['GenInTolScore'] = "|".join([cnv_anno[k]['GenInTolScore'],genticIntol_score[g]])
else:
cnv_anno[k]['GenInTolScore'] = genticIntol_score[g]
else:
if cnv_anno[k].get('GenInTolScore'):
pass
else:
cnv_anno[k]['GenInTolScore'] = "NA"
else:
cnv_anno[k]['GenInTolScore'] = "NA"
return cnv_anno
pass
def haploIdx_annotate(f_haploIdx, cnv_anno):
for k in cnv_anno.keys():
haploIdx_count = 0
line = k.split(":")
haploIdx_percentage = []
haploIdx_score = []
chrom = 'chr' + line[0]
for row in f_haploIdx.fetch(chrom, int(line[1]), int(line[2])):
haploIdx_count = haploIdx_count + 1
haploIdx_percentage.append(row.split("\t")[4][:-1])
haploIdx_score.append(row.split("\t")[5])
if len(haploIdx_percentage) > 0:
cnv_anno[k]['haploIdx_count'] = haploIdx_count
cnv_anno[k]['haploIdx_score'] = "|".join([":".join(haploIdx_percentage), ":".join(haploIdx_score)])
else:
cnv_anno[k]['haploIdx_count'] = "NA"
cnv_anno[k]['haploIdx_score'] = "NA"
return cnv_anno
pass
def del1000g_annotate(g_del1000g_delFile, cnv_anno):
for k in cnv_anno.keys():
del_1000g_count = 0
line = k.split(":")
for row in g_del1000g_delFile.fetch(line[0][3:], int(line[1]), int(line[2])):
del_1000g_count = del_1000g_count + 1
if del_1000g_count > 0:
cnv_anno[k]['1000G_Del_count'] = del_1000g_count
else:
cnv_anno[k]['1000G_Del_count'] = "NA"
return cnv_anno
pass
def dup1000g_annotate(h_dup1000g_delFile, cnv_anno):
for k in cnv_anno.keys():
dup_1000g_count = 0
line = k.split(":")
if re.search(r'Y',line[0]):
pass
else:
for row in h_dup1000g_delFile.fetch(line[0][3:], int(line[1]), int(line[2])):
dup_1000g_count = dup_1000g_count + 1
if dup_1000g_count > 0:
cnv_anno[k]['1000G_Dup_count'] = dup_1000g_count
else:
cnv_anno[k]['1000G_Dup_count'] = "NA"
return cnv_anno
pass
def clinVar_annotate(i_clinVar_reader, cnv_anno):
for k in cnv_anno.keys():
line = k.split(":")
clindbn = {}
clinhgvs = {}
for record in i_clinVar_reader.fetch(line[0][3:], int(line[1]), int(line[2])):
if len(record.REF) > 1 or len(record.ALT[0]) > 1:
if re.search(r'4|5|6|7', record.INFO['CLNSIG'][0]):
for clin_disease in record.INFO['CLNDBN']:
for clin_disease_split in clin_disease.split("|"):
if clin_disease_split != 'not_provided':
clindbn[clin_disease_split] = 1
for clinhgvs_name in record.INFO['CLNHGVS']:
clinhgvs[clinhgvs_name] = 1
if len(clindbn.keys()) > 0:
cnv_anno[k]['clindbn'] = "|".join(clindbn.keys())
else:
cnv_anno[k]['clindbn'] = "NA"
if len(clinhgvs.keys()) > 0:
cnv_anno[k]['clinhgvs'] = "|".join(clinhgvs.keys())
else:
cnv_anno[k]['clinhgvs'] = "NA"
return cnv_anno
def omim_annotate(j_omim_file, cnv_anno):
omim_morbidMap = {}
with open(j_omim_file, 'r') as f:
for line in f:
line = line.replace('\n', '')
l = line.split("\t")
omim_morbidMap[l[0]] = l[1]
for k in cnv_anno.keys():
if cnv_anno[k].get('gene_name'):
for g in cnv_anno[k]['gene_name'].keys():
if omim_morbidMap.get(g):
if cnv_anno[k].get('OMIM'):
cnv_anno[k]['OMIM'] = "|".join([cnv_anno[k]['OMIM'],omim_morbidMap[g]])
else:
cnv_anno[k]['OMIM'] = omim_morbidMap[g]
else:
if cnv_anno[k].get('OMIM'):
pass
else:
cnv_anno[k]['OMIM'] = "NA"
else:
cnv_anno[k]['OMIM'] = "NA"
return cnv_anno
def devDisorder_annotate(h_devDis_file, cnv_anno):
devDisorder = {}
with open(h_devDis_file, 'r') as f:
for line in f:
line = line.replace('\n', '')
l = line.split("\t")
devDisorder[l[0]] = l[1].split("|")
for k in cnv_anno.keys():
if cnv_anno[k].get('gene_name'):
cnv_anno_devDis = {}
cnv_anno_devDis['devDis_mutConseq'] = {}
cnv_anno_devDis['devDis_disName'] = {}
cnv_anno_devDis['devDis_pubmedID'] = {}
for g in cnv_anno[k]['gene_name'].keys():
if devDisorder.get(g):
cnv_anno_devDis['devDis_mutConseq'][devDisorder[g][0]] = 1
cnv_anno_devDis['devDis_disName'][devDisorder[g][1]] = 1
cnv_anno_devDis['devDis_pubmedID'][devDisorder[g][2]] = 1
if cnv_anno_devDis['devDis_mutConseq']:
cnv_anno[k]['devDis_mutConseq'] = ";".join(cnv_anno_devDis['devDis_mutConseq'].keys())
else:
cnv_anno[k]['devDis_mutConseq'] = "NA"
if cnv_anno_devDis['devDis_disName']:
cnv_anno[k]['devDis_disName'] = ";".join(cnv_anno_devDis['devDis_disName'].keys())
else:
cnv_anno[k]['devDis_disName'] = "NA"
if cnv_anno_devDis['devDis_pubmedID']:
cnv_anno[k]['devDis_pubmedID'] = ";".join(cnv_anno_devDis['devDis_pubmedID'].keys())
else:
cnv_anno[k]['devDis_pubmedID'] = "NA"
else:
cnv_anno[k]['devDis_mutConseq'] = "NA"
cnv_anno[k]['devDis_disName'] = "NA"
cnv_anno[k]['devDis_pubmedID'] = "NA"
return cnv_anno
|
huguesfontenelle/cnvScan
|
src/annotate.py
|
Python
|
gpl-3.0
| 16,482
|
[
"pysam"
] |
80620fdc11f4d1618418e72a80dcb9cf0668c0b4c0ddc67c7c813559d42d9374
|
#!/usr/bin/python
######################################################################
# Ascii TMS Viewer
#
#--------------------------------------------------------------------
# Brian Hone | Initial Release
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
# Copyright (c) 2009 Brian Hone
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
######################################################################
import sys, os, string, urllib2, getopt
import pprint
false = 0
true = 1
usage=\
"""
Startup:
-V, --version display the version of Wget and exit.
-h, --help print this help.
Logging and input file:
-o, --output-file=FILE log messages to FILE.
-d, --debug print lots of debugging information.
-v, --verbose be verbose (this is the default).
-x, --force-directories force creation of directories.
"""
def wget( opts ):
version = 0.1
output_file = sys.stdout
debug = false
verbose = false
force_directories = false
has_output_file = false
output_file_name = "stdout"
try:
optlist, args = getopt.getopt( opts, 'Vhadqvixo:', ["version", "help", "output-file", "debug", "verbose", "force-directories"] )
except getopt.GetoptError, e:
print "Error: " + str(e)
print usage
return
for o,a in optlist:
if o in ['-V', "--version"]:
print "wget.py version %s" % version
return
elif o in [ '-h', "--help" ]:
print usage
return
elif o in [ '-d', "--debug" ]:
debug=true
elif o in [ '-v', "--verbose" ]:
verbose=true
elif o in [ '-x', "--force-directories" ]:
if has_output_file:
print "Select either -o OR -x"
print usage
return
force_directories=true
elif o in [ '-o', "--output-file" ]:
if force_directories:
print "Select either -o OR -x"
print usage
return
has_output_file = true;
if a == '':
print "Please specify an output file\n\n" + usage
return
try:
output_file_name = a
output_file = open( a, "w" )
except:
print "Couldn't open output_file " + a
return
# end command line handling
url = args[0]
# begin getting the url
if debug:
print "Opening URL %s" % url
try:
handle = urllib2.urlopen( url )
except:
print "Problems opening %s" % url
return
if debug:
print "Retreived %s" % url
if force_directories:
urlparts = string.split( url, "/" )
dirparts = urlparts[2:-1]
fname = urlparts[-1]
if debug:
print "Directories: " + pprint.pformat(dirparts)
print "Filename: " + fname
curdir='./'
nextdir=''
for dirname in dirparts:
if nextdir == '':
nextdir = dirname
else:
nextdir=nextdir+os.sep+dirname
if dirname not in os.listdir(curdir):
if debug:
print "trying to make " + nextdir
try:
os.mkdir(nextdir)
except:
print "Couldn't make directory %s" % nextdir
return
curdir = nextdir
try:
output_file_name = curdir + os.sep + fname
output_file = open( output_file_name, "w" )
except:
print "Couldn't open file %s" % output_file_name
if debug:
print "Writing to file %s" % output_file_name
output_file.write( handle.read() )
# end wget function
if __name__=="__main__":
wget( sys.argv[1:] )
|
flailingsquirrel/asciimapper
|
Wget.py
|
Python
|
bsd-3-clause
| 5,007
|
[
"Brian"
] |
97ca5d2d710183640ef91354c6ec5867acc46678851417a19a5335652c30368d
|
"""The Mayavi Envisage application.
"""
# Author: Prabhu Ramachandran <prabhu@enthought.com>
# Copyright (c) 2008-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import sys
import os.path
import logging
# Enthought library imports.
from apptools.logger.api import LogFileHandler, FORMATTER
from traits.etsconfig.api import ETSConfig
from traits.api import (HasTraits, Instance, Int,
on_trait_change, Bool)
# Local imports.
from .mayavi_workbench_application import MayaviWorkbenchApplication
from mayavi.preferences.api import preference_manager
from mayavi.core.customize import get_custom_plugins
# GLOBALS
logger = logging.getLogger()
######################################################################
# Useful functions.
######################################################################
def setup_logger(logger, fname, stream=True, mode=logging.ERROR):
"""Setup a log file and the logger. If the given file name is not
absolute, put the log file in `ETSConfig.application_home`, if not
it will create it where desired.
Parameters:
-----------
fname -- file name the logger should use. If this is an absolute
path it will create the log file as specified, if not it will put it
in `ETSConfig.application_home`.
stream -- Add a stream handler.
mode -- the logging mode of the stream handler.
"""
if not os.path.isabs(fname):
path = os.path.join(ETSConfig.application_home, fname)
else:
path = fname
# Check if we have already added a logger (can happen when the app
# is started multiple number of times from ipython say).
handlers = logger.handlers
if len(handlers) > 1:
h = handlers[0]
if isinstance(h, LogFileHandler) and h.baseFilename == path:
logger.info('Logging handlers already set! Not duplicating.')
return
logger.setLevel(logging.DEBUG)
handler = LogFileHandler(path)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if stream:
s = logging.StreamHandler()
s.setFormatter(FORMATTER)
s.setLevel(mode)
logger.addHandler(s)
logger.info("*"*80)
logger.info("logfile is: '%s'", os.path.abspath(path))
logger.info("*"*80)
def get_non_gui_plugin_classes():
"""Get list of basic mayavi plugin classes that do not add any views or
actions."""
from envisage.core_plugin import CorePlugin
from envisage.ui.workbench.workbench_plugin import WorkbenchPlugin
from tvtk.plugins.scene.scene_plugin import ScenePlugin
from mayavi.plugins.mayavi_plugin import MayaviPlugin
plugins = [CorePlugin,
WorkbenchPlugin,
MayaviPlugin,
ScenePlugin,
]
return plugins
def get_non_gui_plugins():
"""Get list of basic mayavi plugins that do not add any views or
actions."""
return [cls() for cls in get_non_gui_plugin_classes()]
def get_plugin_classes():
"""Get list of default plugin classes to use for Mayavi."""
# Force the selection of a toolkit:
from traitsui.api import toolkit
toolkit()
from traits.etsconfig.api import ETSConfig
try_use_ipython = preference_manager.root.use_ipython
use_ipython = False
if ETSConfig.toolkit == 'wx' and try_use_ipython:
try:
# If the right versions of IPython, EnvisagePlugins and
# Pyface are not installed, this import will fail.
from envisage.plugins.ipython_shell.view.ipython_shell_view \
import IPythonShellView
use_ipython = True
except: pass
if use_ipython:
from envisage.plugins.ipython_shell.ipython_shell_plugin import \
IPythonShellPlugin
PythonShellPlugin = IPythonShellPlugin
else:
from envisage.plugins.python_shell.python_shell_plugin import PythonShellPlugin
from envisage.plugins.text_editor.text_editor_plugin import TextEditorPlugin
from apptools.logger.plugin.logger_plugin import LoggerPlugin
from tvtk.plugins.scene.ui.scene_ui_plugin import SceneUIPlugin
from mayavi.plugins.mayavi_ui_plugin import MayaviUIPlugin
plugins = get_non_gui_plugin_classes()
plugins.extend([
LoggerPlugin,
MayaviUIPlugin,
SceneUIPlugin,
PythonShellPlugin,
TextEditorPlugin,
])
return plugins
def get_plugins():
"""Get list of default plugins to use for Mayavi."""
return [cls() for cls in get_plugin_classes()]
###########################################################################
# `Mayavi` class.
###########################################################################
class Mayavi(HasTraits):
"""The Mayavi application class.
This class may be easily subclassed to do something different.
For example, one way to script MayaVi (as a standalone application
and not interactively) is to subclass this and do the needful.
"""
# The main envisage application.
application = Instance('envisage.ui.workbench.api.WorkbenchApplication')
# Turn this off if you don't want the workbench to start the GUI
# event loop.
start_gui_event_loop = Bool(True, desc='start a GUI event loop')
# The MayaVi Script instance.
script = Instance('mayavi.plugins.script.Script')
# The logging mode.
log_mode = Int(logging.ERROR, desc='the logging mode to use')
def main(self, argv=None, plugins=None):
"""The main application is created and launched here.
Parameters
----------
argv : list of strings
The list of command line arguments. The default is `None`
where no command line arguments are parsed. To support
command line arguments you can pass `sys.argv[1:]`.
plugins : list of Plugin objects
List of plugins to start. If none is provided it defaults to
something meaningful.
log_mode :
The logging mode to use.
"""
# Parse any cmd line args.
if argv is None:
argv = []
self.parse_command_line(argv)
if plugins is None:
plugins = get_plugins()
plugins += get_custom_plugins()
# Create the application
prefs = preference_manager.preferences
app = MayaviWorkbenchApplication(plugins=plugins,
preferences=prefs,
start_gui_event_loop=self.start_gui_event_loop)
self.application = app
# Setup the logger.
self.setup_logger()
# Start the application.
app.run()
def setup_logger(self):
"""Setup logging for the application."""
setup_logger(logger, 'mayavi.log', mode=self.log_mode)
def parse_command_line(self, argv):
"""Parse command line options.
Parameters
----------
- argv : `list` of `strings`
The list of command line arguments.
"""
from optparse import OptionParser
usage = "usage: %prog [options]"
parser = OptionParser(usage)
(options, args) = parser.parse_args(argv)
def run(self):
"""This function is called after the GUI has started.
Override this to do whatever you want to do as a MayaVi
script. If this is not overridden then an empty MayaVi
application will be started.
*Make sure all other MayaVi specific imports are made here!*
If you import MayaVi related code earlier you will run into
difficulties. Use 'self.script' to script the mayavi engine.
"""
pass
######################################################################
# Non-public interface.
######################################################################
@on_trait_change('application.gui:started')
def _on_application_gui_started(self, obj, trait_name, old, new):
"""This is called as soon as the Envisage GUI starts up. The
method is responsible for setting our script instance.
"""
if trait_name != 'started' or not new:
return
app = self.application
from mayavi.plugins.script import Script
window = app.workbench.active_window
# Set our script instance.
self.script = window.get_service(Script)
# Call self.run from the GUI thread.
app.gui.invoke_later(self.run)
def main(argv=None):
"""Simple helper to start up the mayavi application. This returns
the running application."""
m = Mayavi()
m.main(argv)
return m
if __name__ == '__main__':
main(sys.argv[1:])
|
dmsurti/mayavi
|
mayavi/plugins/app.py
|
Python
|
bsd-3-clause
| 8,778
|
[
"Mayavi"
] |
98643644e89482f4b8fdc1df35d33f0f50420939904597396e0e696262b8fff4
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import mock
import unittest
import logging
import collections
from MooseDocs.test import MooseDocsTestCase
from MooseDocs.extensions import core, floats, heading, command, katex, include, config
from MooseDocs import base, common
from MooseDocs.tree import pages, tokens
logging.basicConfig()
class TestTokenizeEquation(MooseDocsTestCase):
EXTENSIONS = [core, command, floats, heading, include, katex, config]
def testBlockNoNumber(self):
ast = self.tokenize('!equation\ny=2x')
self.assertSize(ast, 1)
self.assertToken(ast(0), 'Equation', tex='y=2x', inline=False, label=None, number=None)
self.assertIn('moose-equation-', ast(0)['bookmark'])
def testBlockWithNumber(self):
ast = self.tokenize('!equation id=foo\ny=2x')
self.assertSize(ast, 2) # shortcut still added, this will go away eventually
self.assertToken(ast(0), 'Equation', tex='y=2x', inline=False, label='foo', number=1)
self.assertIn('moose-equation-', ast(0)['bookmark'])
def testInline(self):
ast = self.tokenize('[!eq](y=2x)')
self.assertSize(ast, 1)
self.assertToken(ast(0), 'Paragraph', size=1)
self.assertToken(ast(0,0), 'Equation', tex='y=2x', inline=True, label=None, number=None)
self.assertIn('moose-equation-', ast(0,0)['bookmark'])
def testExceptions(self):
with self.assertLogs(level=logging.ERROR) as cm:
ast = self.tokenize('[!equation](y=2x)')
self.assertEqual(len(cm.output), 1)
self.assertIn(r"The '!equation' command is a block level command", cm.output[0])
with self.assertLogs(level=logging.ERROR) as cm:
ast = self.tokenize('!eq\ny=2x')
self.assertEqual(len(cm.output), 1)
self.assertIn(r"The '!eq' command is an inline level command", cm.output[0])
with self.assertLogs(level=logging.ERROR) as cm:
ast = self.tokenize('[!eq id=foo](y=2x)')
self.assertEqual(len(cm.output), 1)
self.assertIn(r"The 'id' setting is not allowed", cm.output[0])
class TestTokenizeEquationReference(MooseDocsTestCase):
EXTENSIONS = TestTokenizeEquation.EXTENSIONS
def testLocalEqRef(self):
ast = self.tokenize('[!eqref](second_law)')
self.assertSize(ast, 1)
self.assertToken(ast(0), 'Paragraph', size=1)
self.assertToken(ast(0,0), 'EquationReference', size=0, label='second_law', filename=None)
def testNonLocalEqRef(self):
ast = self.tokenize('[!eqref](katex_include.md#second_law)')
self.assertSize(ast, 1)
self.assertToken(ast(0), 'Paragraph', size=1)
self.assertToken(ast(0,0), 'EquationReference', size=0, label='second_law', filename='katex_include.md')
def testExceptions(self):
with self.assertLogs(level=logging.ERROR) as cm:
ast = self.tokenize('!eqref')
self.assertEqual(len(cm.output), 1)
self.assertIn(r"The '!eqref' command is an inline level command", cm.output[0])
class TestRenderEquation(MooseDocsTestCase):
EXTENSIONS = TestTokenizeEquation.EXTENSIONS
def testRenderBlockEquation(self):
ast = katex.Equation(None, tex=r'y=x')
res = self.render(ast, renderer=base.HTMLRenderer())
self.assertHTMLTag(res, 'body', size=1)
self.assertHTMLTag(res(0), 'span', size=2, class_='moose-katex-block-equation')
self.assertHTMLTag(res(0,0), 'span', size=0, class_='moose-katex-equation table-cell')
self.assertHTMLTag(res(0,1), 'script', size=1, string='var element = document.getElementById("None");katex.render("y=x", element, {displayMode:true,throwOnError:false});')
def testRenderBlockEquationWithLabel(self):
ast = katex.Equation(None, tex=r'y=x', label='foo')
res = self.render(ast, renderer=base.HTMLRenderer())
self.assertHTMLTag(res, 'body', size=1)
self.assertHTMLTag(res(0), 'span', size=3, class_='moose-katex-block-equation')
self.assertHTMLTag(res(0,0), 'span', size=0, class_='moose-katex-equation table-cell')
self.assertHTMLTag(res(0,1), 'span', size=1, class_='moose-katex-equation-number', string='(None)')
self.assertHTMLTag(res(0,2), 'script', size=1, string='var element = document.getElementById("None");katex.render("y=x", element, {displayMode:true,throwOnError:false});')
def testRenderBlockEquationWithLabelAndNumber(self):
ast = katex.Equation(None, tex=r'y=x', label='foo', number=1980)
res = self.render(ast, renderer=base.HTMLRenderer())
self.assertHTMLTag(res, 'body', size=1)
self.assertHTMLTag(res(0), 'span', size=3, class_='moose-katex-block-equation')
self.assertHTMLTag(res(0,0), 'span', size=0, class_='moose-katex-equation table-cell')
self.assertHTMLTag(res(0,1), 'span', size=1, class_='moose-katex-equation-number', string='(1980)')
self.assertHTMLTag(res(0,2), 'script', size=1, string='var element = document.getElementById("None");katex.render("y=x", element, {displayMode:true,throwOnError:false});')
def testRenderInlineEquation(self):
ast = katex.Equation(None, tex=r'y=x', inline=True)
res = self.render(ast, renderer=base.HTMLRenderer())
self.assertHTMLTag(res, 'body', size=1)
self.assertHTMLTag(res(0), 'span', size=1, class_='moose-katex-inline-equation')
self.assertHTMLTag(res(0,0), 'script', size=1, string='var element = document.getElementById("None");katex.render("y=x", element, {displayMode:false,throwOnError:false});')
class TestRenderEquationReference(MooseDocsTestCase):
EXTENSIONS = TestTokenizeEquation.EXTENSIONS
def setupContent(self):
config = [dict(root_dir='python/MooseDocs/test/content',
content=['extensions/katex.md',
'extensions/katex_include.md',
'extensions/katex_include2.md'])]
return common.get_content(config, '.md')
def testLocalEqRef(self):
ast = katex.EquationReference(None, label='second_law')
res = self.render(ast, renderer=base.HTMLRenderer())
self.assertHTMLTag(res, 'body', size=1)
self.assertHTMLTag(res(0), 'a', size=1, class_='moose-equation-reference', href='#None', string='Eq. (None)')
def testNonLocalEqRefNoHeading(self):
ast = katex.EquationReference(None, label='second_law', filename='katex_include.md')
res = self.render(ast, renderer=base.HTMLRenderer())
self.assertHTMLTag(res, 'body', size=1)
self.assertHTMLTag(res(0), 'a', size=2, class_='moose-equation-reference')
self.assertIn('extensions/katex_include.html#moose-equation-', res(0)['href'])
self.assertIn('katex_include.md, ', res(0,0)['content'])
self.assertIn('Eq. (2)', res(0,1)['content'])
def testNonLocalEqRefWithHeading(self):
ast = katex.EquationReference(None, label='second_law', filename='katex_include2.md')
res = self.render(ast, renderer=base.HTMLRenderer())
self.assertHTMLTag(res, 'body', size=1)
self.assertHTMLTag(res(0), 'a', size=9, class_='moose-equation-reference')
self.assertIn('extensions/katex_include2.html#moose-equation-', res(0)['href'])
self.assertIn('Equations', res(0,0)['content'])
self.assertIn('Eq. (2)', res(0,8)['content'])
def testLocalEqRef(self):
ast = katex.EquationReference(None, label='second_law')
res = self.render(ast, renderer=base.LatexRenderer())
self.assertLatexString(res(0), content='Eq.~')
self.assertLatexCommand(res(1), 'eqref', string='second_law')
if __name__ == '__main__':
unittest.main(verbosity=2, buffer=False)
|
harterj/moose
|
python/MooseDocs/test/extensions/test_katex.py
|
Python
|
lgpl-2.1
| 8,041
|
[
"MOOSE"
] |
62a32a125c5b80c8969caa954768ceaad6cb674dc1b0aea4793f8682c58aff68
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Simulate the motion of a spherical red blood cell-like particle advected
in a planar Poiseuille flow, with or without volume conservation. For more
details, see :ref:`Immersed Boundary Method for soft elastic objects`.
"""
import os
import argparse
import writeVTK
import espressomd
import espressomd.lb
import espressomd.shapes
import espressomd.lbboundaries
import espressomd.virtual_sites
required_features = ["LB_BOUNDARIES", "VIRTUAL_SITES_INERTIALESS_TRACERS",
"EXPERIMENTAL_FEATURES"]
espressomd.assert_features(required_features)
parser = argparse.ArgumentParser()
parser.add_argument(
"--no-volcons", action="store_const", dest="volcons",
const=False, help="Disable volume conservation", default=True)
parser.add_argument(
"--no-bending", action="store_const", dest="bending",
const=False, help="Disable bending", default=True)
args = parser.parse_args()
if args.volcons and not args.bending:
print('Note: removing bending will also remove volume conservation')
args.volcons = False
# System setup
boxZ = 20
system = espressomd.System(box_l=(20, 20, boxZ))
system.time_step = 1 / 6.
system.cell_system.skin = 0.1
system.virtual_sites = espressomd.virtual_sites.VirtualSitesInertialessTracers()
print(f"Parallelization: {system.cell_system.node_grid}")
force = 0.001
from addSoft import AddSoft
k1 = 0.1
k2 = 1
AddSoft(system, 10, 10, 10, k1, k2)
# case without bending and volCons
outputDir = "outputPure"
# case with bending
if args.bending:
from addBending import AddBending
kb = 1
AddBending(system, kb)
outputDir = "outputBendPara"
# case with bending and volCons
if args.volcons:
from addVolCons import AddVolCons
kV = 10
AddVolCons(system, kV)
outputDir = "outputVolParaCUDA"
# Add LB Fluid
lbf = espressomd.lb.LBFluid(agrid=1, dens=1, visc=1, tau=system.time_step,
ext_force_density=[force, 0, 0])
system.actors.add(lbf)
system.thermostat.set_lb(LB_fluid=lbf, gamma=1.0, act_on_virtual=False)
# Setup boundaries
walls = [espressomd.lbboundaries.LBBoundary() for k in range(2)]
walls[0].set_params(shape=espressomd.shapes.Wall(normal=[0, 0, 1], dist=0.5))
walls[1].set_params(shape=espressomd.shapes.Wall(
normal=[0, 0, -1], dist=-boxZ + 0.5))
for wall in walls:
system.lbboundaries.add(wall)
# make directory
os.makedirs(outputDir)
print('Saving data to ' + outputDir)
# Perform integration
writeVTK.WriteVTK(system, os.path.join(outputDir, f"cell_{0}.vtk"))
stepSize = 1000
numSteps = 20
for i in range(numSteps):
system.integrator.run(stepSize)
writeVTK.WriteVTK(system, os.path.join(outputDir, f"cell_{i + 1}.vtk"))
print(f"Done {i + 1} out of {numSteps} steps.")
|
espressomd/espresso
|
samples/immersed_boundary/sampleImmersedBoundary.py
|
Python
|
gpl-3.0
| 3,438
|
[
"ESPResSo",
"VTK"
] |
31d0222756d85352221a9218d309d5a022bfb93307498f4d6f71b28298400baa
|
#!/usr/bin/env python
import os,sys
from os.path import join,abspath,split
import inspect
from collections import OrderedDict as odict
import numpy as np
from numpy.lib.recfunctions import stack_arrays
import fitsio
import ugali.utils.projector
from ugali.utils.projector import gal2cel, cel2gal
import ugali.utils.idl
from ugali.utils.healpix import ang2pix
from ugali.utils.shell import get_ugali_dir, get_cat_dir
from ugali.utils.logger import logger
#class Catalog(np.recarray):
#
# DATADIR=os.path.join(os.path.split(os.path.abspath(__file__))[0],"../data/catalogs/")
#
# def __new__(cls,filename=None):
# # Need to do it this way so that array can be resized...
# dtype=[('name',object),
# ('ra',float),
# ('dec',float),
# ('glon',float),
# ('glat',float)]
# self = np.recarray(0,dtype=dtype).view(cls)
# self._load(filename)
# return self
#
# def __add__(self, other):
# return np.concatenate([self,other])
#
# def __getitem__(self, key):
# """
# Support indexing, slicing and direct access.
# """
# try:
# return np.recarray.__getitem__(key)
# except ValueError, message:
# if key in self.name:
# idx = (self.name == key)
# return np.recarray.__getitem__(idx)
# else:
# raise ValueError(message)
#
# def _load(self,filename):
# pass
#
# def match(self,lon,lat,tol=0.1,coord='gal'):
# if coord.lower == 'cel':
# glon, glat = ugali.utils.projector.celToGal(lon,lat)
# else:
# glon,glat = lon, lat
# return ugali.utils.projector.match(glon,glat,self.data['glon'],self.data['glat'],tol)
class SourceCatalog(object):
#join(split(abspath(__file__))[0],"../data/catalogs/")
DATADIR=get_cat_dir()
def __init__(self, filename=None):
columns = [('name',object),
('ra',float),
('dec',float),
('glon',float),
('glat',float)]
self.data = np.recarray(0,dtype=columns)
self._load(filename)
if np.isnan([self.data['glon'],self.data['glat']]).any():
raise ValueError("Incompatible values")
def __getitem__(self, key):
"""
Support indexing, slicing and direct access.
"""
try:
return self.data[key]
except ValueError as message:
if key in self.data['name']:
return self.data[self.data['name'] == key]
else:
raise ValueError(message)
def __add__(self, other):
ret = SourceCatalog()
ret.data = np.concatenate([self.data,other.data])
return ret
def __len__(self):
""" Return the length of the collection.
"""
return len(self.data)
def _load(self,filename):
pass
def match(self,lon,lat,coord='gal',tol=0.1,nnearest=1):
if coord.lower() == 'cel':
glon, glat = cel2gal(lon,lat)
else:
glon,glat = lon, lat
return ugali.utils.projector.match(glon,glat,self['glon'],self['glat'],tol,nnearest)
class McConnachie12(SourceCatalog):
"""
Catalog of nearby dwarf spheroidal galaxies.
http://arxiv.org/abs/1204.1562
https://www.astrosci.ca/users/alan/Nearby_Dwarfs_Database_files/NearbyGalaxies.dat
"""
def _load(self,filename):
if filename is None:
filename = os.path.join(self.DATADIR,"J_AJ_144_4/NearbyGalaxies2012.dat")
self.filename = filename
raw = np.genfromtxt(filename,delimiter=[19,3,3,5,3,3,3],usecols=range(7),dtype=['|S19']+6*[float],skip_header=36)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
ra = raw[['f1','f2','f3']].view(float).reshape(len(raw),-1)
dec = raw[['f4','f5','f6']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class McConnachie15(SourceCatalog):
"""
Catalog of nearby dwarf spheroidal galaxies. Updated September 2015.
http://arxiv.org/abs/1204.1562
http://www.astro.uvic.ca/~alan/Nearby_Dwarf_Database_files/NearbyGalaxies.dat
"""
def _load(self,filename):
if filename is None:
filename = os.path.join(self.DATADIR,"J_AJ_144_4/NearbyGalaxies.dat")
self.filename = filename
raw = np.genfromtxt(filename,delimiter=[19,3,3,5,3,3,3],usecols=list(range(7)),dtype=['|S19']+6*[float],skip_header=36)
self.data.resize(len(raw))
self.data['name'] = np.char.lstrip(np.char.strip(raw['f0']),'*')
ra = raw[['f1','f2','f3']].view(float).reshape(len(raw),-1)
dec = raw[['f4','f5','f6']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class Rykoff14(SourceCatalog):
"""
Catalog of red-sequence galaxy clusters.
http://arxiv.org/abs/1303.3562
"""
def _load(self, filename):
if filename is None:
filename = os.path.join(self.DATADIR,"redmapper/dr8_run_redmapper_v5.10_lgt20_catalog.fit")
self.filename = filename
raw = fitsio.read(filename,lower=True)
self.data.resize(len(raw))
self.data['name'] = np.char.mod("RedMaPPer %d",raw['mem_match_id'])
self.data['ra'] = raw['ra']
self.data['dec'] = raw['dec']
glon,glat = cel2gal(raw['ra'],raw['dec'])
self.data['glon'],self.data['glat'] = glon, glat
class Harris96(SourceCatalog):
"""
Catalog of Milky Way globular clusters.
Harris, W.E. 1996, AJ, 112, 1487
http://physwww.physics.mcmaster.ca/~harris/mwgc.dat
NOTE: There is some inconsistency between Equatorial and
Galactic coordinates in the catalog. Equatorial seems more
reliable.
"""
def _load(self,filename):
if filename is None:
filename = os.path.join(self.DATADIR,"VII_202/mwgc.dat")
self.filename = filename
kwargs = dict(delimiter=[12,12,3,3,6,5,3,6,8,8,6],dtype=2*['S12']+7*[float],skip_header=72,skip_footer=363)
raw = np.genfromtxt(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
ra = raw[['f2','f3','f4']].view(float).reshape(len(raw),-1)
dec = raw[['f5','f6','f7']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class Corwen04(SourceCatalog):
"""
Modern compilation of the New General Catalogue and IC
"""
def _load(self,filename):
kwargs = dict(delimiter=[1,1,4,15,3,3,8,3,3,7],usecols=[1,2]+list(range(4,10)),dtype=['S1']+[int]+6*[float])
if filename is None:
raw = []
for basename in ['VII_239A/ngcpos.dat','VII_239A/icpos.dat']:
filename = os.path.join(self.DATADIR,basename)
raw.append(np.genfromtxt(filename,**kwargs))
raw = np.concatenate(raw)
else:
raw = np.genfromtxt(filename,**kwargs)
self.filename = filename
# Some entries are missing...
raw['f4'] = np.where(np.isnan(raw['f4']),0,raw['f4'])
raw['f7'] = np.where(np.isnan(raw['f7']),0,raw['f7'])
self.data.resize(len(raw))
names = np.where(raw['f0'] == 'N', 'NGC %04i', 'IC %04i')
self.data['name'] = np.char.mod(names,raw['f1'])
ra = raw[['f2','f3','f4']].view(float).reshape(len(raw),-1)
dec = raw[['f5','f6','f7']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
#class Steinicke10(SourceCatalog):
# """
# Another modern compilation of the New General Catalogue
# (people still don't agree on the composition of NGC...)
# """
# def _load(self,filename):
# if filename is None:
# filename = os.path.join(self.DATADIR,"NI2013.csv")
#
# raw = np.genfromtxt(filename,delimiter=',',usecols=[5,6]+range(13,20),dtype=['S1',int]+3*[float]+['S1']+3*[float])
#
# self.data.resize(len(raw))
# names = np.where(raw['f0'] == 'N', 'NGC %04i', 'IC %04i')
# self.data['name'] = np.char.mod(names,raw['f1'])
#
# sign = np.where(raw['f5'] == '-',-1,1)
# ra = raw[['f2','f3','f4']].view(float).reshape(len(raw),-1)
# dec = raw[['f6','f7','f8']].view(float).reshape(len(raw),-1)
# dec[:,0] = np.copysign(dec[:,0], sign)
#
# self.data['ra'] = ugali.utils.projector.hms2dec(ra)
# self.data['dec'] = ugali.utils.projector.dms2dec(dec)
#
# glon,glat = ugali.utils.projector.celToGal(self.data['ra'],self.data['dec'])
# self.data['glon'],self.data['glat'] = glon,glat
class Nilson73(SourceCatalog):
"""
Modern compilation of the Uppsala General Catalog
http://vizier.cfa.harvard.edu/viz-bin/Cat?VII/26D
"""
def _load(self,filename):
if filename is None:
filename = os.path.join(self.DATADIR,"VII_26D/catalog.dat")
self.filename = filename
raw = np.genfromtxt(filename,delimiter=[3,7,2,4,3,2],dtype=['S3']+['S7']+4*[float])
self.data.resize(len(raw))
self.data['name'] = np.char.mod('UGC %s',np.char.strip(raw['f1']))
ra = raw[['f2','f3']].view(float).reshape(len(raw),-1)
ra = np.vstack([ra.T,np.zeros(len(raw))]).T
dec = raw[['f4','f5']].view(float).reshape(len(raw),-1)
dec = np.vstack([dec.T,np.zeros(len(raw))]).T
ra1950 = ugali.utils.projector.hms2dec(ra)
dec1950 = ugali.utils.projector.dms2dec(dec)
ra2000,dec2000 = ugali.utils.idl.jprecess(ra1950,dec1950)
self.data['ra'] = ra2000
self.data['dec'] = dec2000
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class Webbink85(SourceCatalog):
"""
Structure parameters of Galactic globular clusters
http://vizier.cfa.harvard.edu/viz-bin/Cat?VII/151
NOTE: Includes Reticulum and some open clusters
http://spider.seds.org/spider/MWGC/mwgc.html
"""
def _load(self,filename):
kwargs = dict(delimiter=[8,15,9,4,3,3,5,5],usecols=[1]+list(range(3,8)),dtype=['S13']+5*[float])
if filename is None:
raw = []
for basename in ['VII_151/table1a.dat','VII_151/table1c.dat']:
filename = os.path.join(self.DATADIR,basename)
raw.append(np.genfromtxt(filename,**kwargs))
raw = np.concatenate(raw)
else:
raw = np.genfromtxt(filename,**kwargs)
self.filename = filename
self.data.resize(len(raw))
#self.data['name'] = np.char.strip(raw['f0'])
self.data['name'] = np.char.join(' ',np.char.split(raw['f0']))
ra = raw[['f1','f2','f3']].view(float).reshape(len(raw),-1)
dec = raw[['f4','f5']].view(float).reshape(len(raw),-1)
dec = np.vstack([dec.T,np.zeros(len(raw))]).T
ra1950 = ugali.utils.projector.hms2dec(ra)
dec1950 = ugali.utils.projector.dms2dec(dec)
ra2000,dec2000 = ugali.utils.idl.jprecess(ra1950,dec1950)
self.data['ra'] = ra2000
self.data['dec'] = dec2000
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class Kharchenko13(SourceCatalog):
"""
Global survey of star clusters in the Milky Way
http://vizier.cfa.harvard.edu/viz-bin/Cat?J/A%2bA/558/A53
NOTE: CEL and GAL coordinates are consistent to < 0.01 deg.
"""
def _load(self,filename):
kwargs = dict(delimiter=[4,18,20,8,8],usecols=[1,3,4],dtype=['S18',float,float])
if filename is None:
filename = os.path.join(self.DATADIR,"J_AA_558_A53/catalog.dat")
self.filename = filename
raw = np.genfromtxt(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
self.data['glon'] = raw['f1']
self.data['glat'] = raw['f2']
ra,dec = gal2cel(self.data['glon'],self.data['glat'])
self.data['ra'],self.data['dec'] = ra,dec
class Bica08(SourceCatalog):
"""
LMC star clusters
http://cdsarc.u-strasbg.fr/viz-bin/Cat?J/MNRAS/389/678
NOTE: CEL and GAL coordinates are consistent to < 0.01 deg.
"""
def _load(self,filename):
kwargs = dict(delimiter=[32,2,3,3,5,3,3],dtype=['S32']+6*[float])
if filename is None:
filename = os.path.join(self.DATADIR,"J_MNRAS_389_678/table3.dat")
self.filename = filename
raw = np.genfromtxt(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
ra = raw[['f1','f2','f3']].view(float).reshape(len(raw),-1)
dec = raw[['f4','f5','f6']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class WEBDA14(SourceCatalog):
"""
Open cluster database.
http://www.univie.ac.at/webda/cgi-bin/selname.cgi?auth=
"""
def _load(self,filename):
kwargs = dict(delimiter='\t',usecols=[0,1,2],dtype=['S18',float,float])
if filename is None:
filename = os.path.join(self.DATADIR,"WEBDA/webda.tsv")
self.filename = filename
raw = np.genfromtxt(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
self.data['glon'] = raw['f1']
self.data['glat'] = raw['f2']
ra,dec = gal2cel(self.data['glon'],self.data['glat'])
self.data['ra'],self.data['dec'] = ra,dec
class ExtraDwarfs(SourceCatalog):
"""
Collection of dwarf galaxy candidates discovered in 2015
"""
def _load(self,filename):
kwargs = dict(delimiter=',')
if filename is None:
filename = os.path.join(self.DATADIR,"extras/extra_dwarfs.csv")
self.filename = filename
raw = np.recfromcsv(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = raw['name']
self.data['ra'] = raw['ra']
self.data['dec'] = raw['dec']
self.data['glon'],self.data['glat'] = cel2gal(raw['ra'],raw['dec'])
class ExtraClusters(SourceCatalog):
"""
Collection of recently discovered star clusters
"""
def _load(self,filename):
kwargs = dict(delimiter=',')
if filename is None:
filename = os.path.join(self.DATADIR,"extras/extra_clusters.csv")
self.filename = filename
raw = np.recfromcsv(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = raw['name']
self.data['ra'] = raw['ra']
self.data['dec'] = raw['dec']
self.data['glon'],self.data['glat'] = cel2gal(raw['ra'],raw['dec'])
def catalogFactory(name, **kwargs):
"""
Factory for various catalogs.
"""
fn = lambda member: inspect.isclass(member) and member.__module__==__name__
catalogs = odict(inspect.getmembers(sys.modules[__name__], fn))
if name not in list(catalogs.keys()):
msg = "%s not found in catalogs:\n %s"%(name,list(kernels.keys()))
logger.error(msg)
msg = "Unrecognized catalog: %s"%name
raise Exception(msg)
return catalogs[name](**kwargs)
if __name__ == "__main__":
import argparse
description = "python script"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('args',nargs=argparse.REMAINDER)
opts = parser.parse_args(); args = opts.args
|
kadrlica/ugali
|
ugali/candidate/associate.py
|
Python
|
mit
| 16,624
|
[
"Galaxy"
] |
689dd7c854558f8fd0ebe8f6d2b40997424c2e85e5db77cd816172759cbc0794
|
# $HeadURL$
__RCSID__ = "$Id$"
import os
import os.path
try:
import hashlib as md5
except:
import md5
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.ConfigurationSystem.Client.PathFinder import getServiceSection
from DIRAC.FrameworkSystem.private.monitoring.ColorGenerator import ColorGenerator
from DIRAC.Core.Utilities import Subprocess, Time
class RRDManager:
__sizesList = [ [ 200, 50 ], [ 400, 100 ], [ 600, 150 ], [ 800, 200 ] ]
__logRRDCommands = False
def __init__( self, rrdLocation, graphLocation ):
"""
Initialize RRDManager
"""
self.rrdLocation = rrdLocation
self.graphLocation = graphLocation
self.log = gLogger.getSubLogger( "RRDManager" )
self.rrdExec = gConfig.getValue( "%s/RRDExec" % getServiceSection( "Framework/Monitoring" ), "rrdtool" )
for path in ( self.rrdLocation, self.graphLocation ):
try:
os.makedirs( path )
except:
pass
def existsRRDFile( self, rrdFile ):
rrdFilePath = "%s/%s" % ( self.rrdLocation, rrdFile )
return os.path.isfile( rrdFilePath )
def getGraphLocation( self ):
"""
Set the location for graph files
"""
return self.graphLocation
def __exec( self, cmd, rrdFile = None ):
"""
Execute a system command
"""
self.log.debug( "RRD command: %s" % cmd )
retVal = Subprocess.shellCall( 0, cmd )
if self.__logRRDCommands and rrdFile:
try:
logFile = "%s.log" % rrdFile
fd = file( logFile, "a" )
if not retVal[ 'OK' ] or retVal[ 'Value' ][0]:
fd.write( "ERROR %s\n" % cmd )
else:
fd.write( "OK %s\n" % cmd )
fd.close()
except Exception, e:
self.log.warn( "Cannot write log %s: %s" % ( logFile, str( e ) ) )
if not retVal[ 'OK' ]:
return retVal
retTuple = retVal[ 'Value' ]
if retTuple[0]:
return S_ERROR( "Failed to execute rrdtool: %s" % ( retTuple[2] ) )
return retVal
def getCurrentBucketTime( self, bucketLength ):
"""
Get current time "bucketized"
"""
return self.bucketize( Time.toEpoch(), bucketLength )
def bucketize( self, secs, bucketLength ):
"""
Bucketize a time (in secs)
"""
secs = int( secs )
return secs - secs % bucketLength
def create( self, type, rrdFile, bucketLength ):
"""
Create an rrd file
"""
rrdFilePath = "%s/%s" % ( self.rrdLocation, rrdFile )
if os.path.isfile( rrdFilePath ):
return S_OK()
try:
os.makedirs( os.path.dirname( rrdFilePath ) )
except:
pass
self.log.info( "Creating rrd file %s" % rrdFile )
cmd = "%s create '%s'" % ( self.rrdExec, rrdFilePath )
#Start GMT(now) - 1h
cmd += " --start %s" % ( self.getCurrentBucketTime( bucketLength ) - 86400 )
cmd += " --step %s" % bucketLength
if type in ( 'mean' ):
dst = "GAUGE"
cf = "AVERAGE"
elif type in ( 'sum', 'acum', 'rate' ):
dst = "ABSOLUTE"
cf = "AVERAGE"
cmd += " DS:value:%s:%s:U:U" % ( dst, bucketLength * 10 )
# 1m res for 1 month
#cmd += " RRA:%s:0.9:1:43200" % cf
# 1m red for 1 year
cmd += " RRA:%s:0.999:1:%s" % ( cf, 31536000 / bucketLength )
return self.__exec( cmd, rrdFilePath )
def __getLastUpdateTime( self, rrdFile ):
"""
Get last update time from an rrd
"""
cmd = "%s last %s" % ( self.rrdExec, rrdFile )
retVal = Subprocess.shellCall( 0, cmd )
if not retVal[ 'OK' ]:
return retVal
retTuple = retVal[ 'Value' ]
if retTuple[0]:
return S_ERROR( "Failed to fetch last update %s : %s" % ( rrdFile, retTuple[2] ) )
return S_OK( int( retTuple[1].strip() ) )
def __fillWithZeros( self, lastUpdateTime, bucketLength, valuesList ):
filledList = []
expectedUpdateTime = lastUpdateTime + bucketLength
for valueTuple in valuesList:
while expectedUpdateTime < valueTuple[0]:
filledList.append( ( expectedUpdateTime, 0 ) )
expectedUpdateTime += bucketLength
filledList.append( valueTuple )
expectedUpdateTime = valueTuple[0] + bucketLength
return filledList
def update( self, type, rrdFile, bucketLength, valuesList, lastUpdate = 0 ):
"""
Add marks to an rrd
"""
rrdFilePath = "%s/%s" % ( self.rrdLocation, rrdFile )
self.log.info( "Updating rrd file", rrdFilePath )
if lastUpdate == 0:
retVal = self.__getLastUpdateTime( rrdFilePath )
if retVal[ 'OK' ]:
lastUpdateTime = retVal[ 'Value' ]
self.log.verbose( "Last update time is %s" % lastUpdateTime )
else:
lastUpdateTime = lastUpdate
cmd = "%s update %s" % ( self.rrdExec, rrdFilePath )
#we have to fill with 0 the db to ensure the mean is valid
valuesList = self.__fillWithZeros( lastUpdateTime, bucketLength, valuesList )
rrdUpdates = []
for entry in valuesList:
rrdUpdates.append( "%s:%s" % entry )
maxRRDArgs = 50
for i in range( 0, len( rrdUpdates ), maxRRDArgs ):
finalCmd = "%s %s" % ( cmd, " ".join( rrdUpdates[ i: i + maxRRDArgs ] ) )
retVal = self.__exec( finalCmd, rrdFilePath )
if not retVal[ 'OK' ]:
self.log.warn( "Error updating rrd file", "%s rrd: %s" % ( rrdFile, retVal[ 'Message' ] ) )
return S_OK( valuesList[-1][0] )
def __generateName( self, *args, **kwargs ):
"""
Generate a random name
"""
m = md5.md5()
m.update( str( args ) )
m.update( str( kwargs ) )
return m.hexdigest()
def __generateRRDGraphVar( self, entryName, activity, timeSpan, plotWidth ):
"""
Calculate the graph query in rrd lingo for an activity
"""
rrdFile = activity.getFile()
rrdType = activity.getType()
bucketLength = activity.getBucketLength()
yScaleFactor = self.__getYScalingFactor( timeSpan, bucketLength, plotWidth )
activity.setBucketScaleFactor( yScaleFactor )
varStr = "'DEF:ac%sRAW=%s/%s:value:AVERAGE'" % ( entryName, self.rrdLocation, rrdFile )
if rrdType in ( "mean", "rate" ):
varStr += " 'CDEF:%s=ac%sRAW,UN,0,ac%sRAW,IF'" % ( entryName, entryName, entryName )
elif rrdType == "sum":
scale = yScaleFactor * bucketLength
varStr += " 'CDEF:%s=ac%sRAW,UN,0,ac%sRAW,%s,*,IF'" % ( entryName, entryName, entryName, scale )
elif rrdType == "acum":
scale = yScaleFactor * bucketLength
varStr += " 'CDEF:ac%sNOTUN=ac%sRAW,UN,0,ac%sRAW,%s,*,IF'" % ( entryName, entryName, entryName, scale )
varStr += " 'CDEF:%s=PREV,UN,ac%sNOTUN,PREV,ac%sNOTUN,+,IF'" % ( entryName, entryName, entryName )
return varStr
def __graphTimeComment( self, fromEpoch, toEpoch ):
comStr = " 'COMMENT:Generated on %s UTC'" % Time.toString().replace( ":", "\:" ).split( "." )[0]
comStr += " 'COMMENT:%s'" % ( "From %s to %s" % ( Time.fromEpoch( fromEpoch ), Time.fromEpoch( toEpoch ) ) ).replace( ":", "\:" )
return comStr
def __getYScalingFactor( self, timeSpan, bucketLength, plotWidth ):
expectedTimeSpan = plotWidth * bucketLength
if timeSpan < expectedTimeSpan:
return 1
else:
return float( timeSpan ) / expectedTimeSpan
def groupPlot( self, fromSecs, toSecs, activitiesList, stackActivities, size, graphFilename = "" ):
"""
Generate a group plot
"""
plotTimeSpan = toSecs - fromSecs
if not graphFilename:
graphFilename = "%s.png" % self.__generateName( fromSecs,
toSecs,
activitiesList,
stackActivities
)
rrdCmd = "%s graph %s/%s" % ( self.rrdExec, self.graphLocation, graphFilename )
rrdCmd += " -s %s" % fromSecs
rrdCmd += " -e %s" % toSecs
rrdCmd += " -w %s" % self.__sizesList[ size ][0]
rrdCmd += " -h %s" % self.__sizesList[ size ][1]
rrdCmd += " --title '%s'" % activitiesList[ 0 ].getGroupLabel()
colorGen = ColorGenerator()
activitiesList.sort()
for idActivity in range( len( activitiesList ) ):
activity = activitiesList[ idActivity ]
rrdCmd += " %s" % self.__generateRRDGraphVar( idActivity, activity, plotTimeSpan, self.__sizesList[ size ][0] )
if stackActivities:
rrdCmd += " 'AREA:%s#%s:%s:STACK'" % ( idActivity, colorGen.getHexColor(), activity.getLabel().replace( ":", "\:" ) )
else:
rrdCmd += " 'LINE1:%s#%s:%s'" % ( idActivity, colorGen.getHexColor(), activity.getLabel().replace( ":", "\:" ) )
rrdCmd += self.__graphTimeComment( fromSecs, toSecs )
retVal = self.__exec( rrdCmd )
if not retVal[ 'OK' ]:
return retVal
return S_OK( graphFilename )
def plot( self, fromSecs, toSecs, activity, stackActivities , size, graphFilename = "" ):
"""
Generate a non grouped plot
"""
plotTimeSpan = toSecs - fromSecs
if not graphFilename:
graphFilename = "%s.png" % self.__generateName( fromSecs,
toSecs,
activity,
stackActivities
)
graphVar = self.__generateRRDGraphVar( 0, activity, plotTimeSpan, self.__sizesList[ size ][0] )
rrdCmd = "%s graph %s/%s" % ( self.rrdExec, self.graphLocation, graphFilename )
rrdCmd += " -s %s" % fromSecs
rrdCmd += " -e %s" % toSecs
rrdCmd += " -w %s" % self.__sizesList[ size ][0]
rrdCmd += " -h %s" % self.__sizesList[ size ][1]
rrdCmd += " --title '%s'" % activity.getLabel()
rrdCmd += " --vertical-label '%s'" % activity.getUnit()
rrdCmd += " %s" % graphVar
if stackActivities:
rrdCmd += " 'AREA:0#0000FF::STACK'"
else:
rrdCmd += " 'LINE1:0#0000FF'"
rrdCmd += self.__graphTimeComment( fromSecs, toSecs )
retVal = self.__exec( rrdCmd )
if not retVal[ 'OK' ]:
return retVal
return S_OK( graphFilename )
def deleteRRD( self, rrdFile ):
try:
os.unlink( "%s/%s" % ( self.rrdLocation, rrdFile ) )
except Exception, e:
self.log.error( "Could not delete rrd file", "%s: %s" % ( rrdFile, str( e ) ) )
|
Sbalbp/DIRAC
|
FrameworkSystem/private/monitoring/RRDManager.py
|
Python
|
gpl-3.0
| 10,184
|
[
"DIRAC"
] |
a345d889f9f51bbb2e587d99820dce53059b2a9451c14ffe3da0d6312a477e26
|
#! /usr/bin/env python3
import sys
from SWEET import *
p = JobGeneration()
p.compilecommand_in_jobscript = False
#
# Run simulation on plane or sphere
#
p.compile.program = 'swe_sphere'
p.compile.plane_or_sphere = 'sphere'
p.compile.plane_spectral_space = 'disable'
p.compile.plane_spectral_dealiasing = 'disable'
p.compile.sphere_spectral_space = 'enable'
p.compile.sphere_spectral_dealiasing = 'enable'
p.platform_id_override = 'cheyenne'
#p.compile.compiler = 'intel'
#
# Use Intel MPI Compilers
#
#p.compile.compiler_c_exec = 'mpicc'
#p.compile.compiler_cpp_exec = 'mpicxx'
#p.compile.compiler_fortran_exec = 'mpif90'
#
# Activate Fortran source
#
p.compile.fortran_source = 'enable'
#
# MPI?
#
#p.compile.sweet_mpi = 'enable'
# Verbosity mode
p.runtime.verbosity = 2
#
# Mode and Physical resolution
#
p.runtime.space_res_spectral = 128
p.runtime.space_res_physical = -1
#
# Benchmark ID
# 4: Gaussian breaking dam
# 100: Galewski
#
p.runtime.bench_id = 100
#
# Compute error
#
p.runtime.compute_error = 0
#
# Preallocate the REXI matrices
#
p.runtime.rexi_sphere_preallocation = 1
#
# Deactivate stability checks
#
p.stability_checks = 0
#
# Threading accross all REXI terms
if True:
p.compile.threading = 'off'
#p.compile.rexi_thread_parallel_sum = 'disable'
p.compile.rexi_thread_parallel_sum = 'enable'
else:
#
# WARNING: rexi_thread_par does not work yet!!!
# MPI Ranks are clashing onthe same node with OpenMP Threads!
#rexi_thread_par = True
rexi_thread_par = False
if rexi_thread_par:
# OMP parallel for over REXI terms
p.compile.threading = 'off'
p.compile.rexi_thread_parallel_sum = 'enable'
else:
p.compile.threading = 'omp'
p.compile.rexi_thread_parallel_sum = 'disable'
#
# REXI method
# N=64, SX,SY=50 and MU=0 with circle primitive provide good results
#
p.runtime.rexi_method = ''
p.runtime.rexi_ci_n = 128
p.runtime.rexi_ci_max_real = -999
p.runtime.rexi_ci_max_imag = -999
p.runtime.rexi_ci_sx = -1
p.runtime.rexi_ci_sy = -1
p.runtime.rexi_ci_mu = 0
p.runtime.rexi_ci_primitive = 'circle'
#p.runtime.rexi_beta_cutoff = 1e-16
p.runtime.rexi_beta_cutoff = 0
#p.compile.debug_symbols = False
#p.runtime.gravitation= 1
#p.runtime.sphere_rotating_coriolis_omega = 1
#p.runtime.h0 = 1
#p.runtime.plane_domain_size = 1
p.runtime.viscosity = 0.0
#timestep_sizes = [timestep_size_reference*(2.0**i) for i in range(0, 11)]
#timestep_sizes = [timestep_size_reference*(2**i) for i in range(2, 4)]
timestep_sizes_explicit = [10, 20, 30, 60, 120, 180]
timestep_sizes_implicit = [60, 120, 180, 360, 480, 600, 720]
timestep_sizes_rexi = [60, 120, 180, 240, 300, 360, 480, 600, 720]
timestep_size_reference = timestep_sizes_explicit[0]
#timestep_sizes = timestep_sizes[1:]
#print(timestep_sizes)
#sys.exit(1)
#p.runtime.max_simulation_time = timestep_sizes[-1]*10 #timestep_size_reference*2000
p.runtime.max_simulation_time = 432000 #timestep_size_reference*(2**6)*10
#p.runtime.output_timestep_size = p.runtime.max_simulation_time
p.runtime.output_filename = "-"
p.runtime.output_timestep_size = 60
p.runtime.sphere_extended_modes = 0
p.runtime.floating_point_output_digits = 14
# Groups to execute, see below
# l: linear
# ln: linear and nonlinear
#groups = ['l1', 'l2', 'ln1', 'ln2', 'ln4']
groups = ['ln2']
#
# MPI ranks
#
#mpi_ranks = [2**i for i in range(0, 12+1)]
#mpi_ranks = [1]
#
# allow including this file
#
if __name__ == "__main__":
####################################################
# WE FOCUS ON 2ND ORDER ACCURATE METHODS HERE
####################################################
groups = ['ln2']
if len(sys.argv) > 1:
groups = [sys.argv[1]]
print("Groups: "+str(groups))
for group in groups:
# 1st order linear
# 2nd order nonlinear
if group == 'ln2':
ts_methods = [
['ln_erk', 4, 4, 0], # reference solution
###########
# RK2/4
###########
['ln_erk', 2, 2, 0], # reference solution
['ln_erk', 4, 4, 0], # reference solution
###########
# CN
###########
['lg_irk_lc_n_erk_ver0', 2, 2, 0],
['lg_irk_lc_n_erk_ver1', 2, 2, 0],
['l_irk_n_erk_ver0', 2, 2, 0],
['l_irk_n_erk_ver1', 2, 2, 0],
###########
# REXI
###########
['lg_rexi_lc_n_erk_ver0', 2, 2, 0],
['lg_rexi_lc_n_erk_ver1', 2, 2, 0],
['l_rexi_n_erk_ver0', 2, 2, 0],
['l_rexi_n_erk_ver1', 2, 2, 0],
###########
# ETDRK
###########
['lg_rexi_lc_n_etdrk', 2, 2, 0],
['l_rexi_n_etdrk', 2, 2, 0],
]
# 4th order nonlinear
if group == 'ln4':
ts_methods = [
['ln_erk', 4, 4, 0], # reference solution
['l_rexi_n_etdrk', 4, 4, 0],
['ln_erk', 4, 4, 0],
]
#
# OVERRIDE TS methods
#
if len(sys.argv) > 4:
ts_methods = [ts_methods[0]]+[[sys.argv[2], int(sys.argv[3]), int(sys.argv[4]), int(sys.argv[5])]]
#
# add prefix string to group benchmarks
#
prefix_string_template = group
#
# Parallelization models
#
# Use all cores on one domain for each MPI task even if only 1 thread is used
# This avoid any bandwidth-related issues
#
#
# Reference solution
#
if True:
tsm = ts_methods[0]
p.runtime.timestep_size = timestep_size_reference
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
p.runtime.rexi_use_direct_solution = tsm[3]
# SPACE parallelization
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = p.platform_resources.num_cores_per_socket
pspace.num_threads_per_rank = pspace.num_cores_per_rank
pspace.num_ranks = 1
pspace.setup()
#pspace.print()
# TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = 1
ptime.setup()
#ptime.print()
# Setup parallelization
p.setup_parallelization([pspace, ptime])
#p.parallelization.print()
# wallclocktime
p.parallelization.max_wallclock_seconds = 60*60 # allow at least one hour
# turbomode
p.parallelization.force_turbo_off = True
if len(tsm) > 4:
s = tsm[4]
p.load_from_dict(tsm[4])
p.write_jobscript('script_'+prefix_string_template+'_ref'+p.runtime.getUniqueID(p.compile)+'/run.sh')
#
# Create job scripts
#
for tsm in ts_methods[1:]:
tsm_name = tsm[0]
if 'ln_erk' in tsm_name:
timestep_sizes = timestep_sizes_explicit
elif 'l_erk' in tsm_name or 'lg_erk' in tsm_name:
timestep_sizes = timestep_sizes_explicit
elif 'l_irk' in tsm_name or 'lg_irk' in tsm_name:
timestep_sizes = timestep_sizes_implicit
elif 'l_rexi' in tsm_name or 'lg_rexi' in tsm_name:
timestep_sizes = timestep_sizes_rexi
else:
print("Unable to identify time stepping method "+tsm_name)
sys.exit(1)
for p.runtime.timestep_size in timestep_sizes:
p.runtime.timestepping_method = tsm[0]
p.runtime.timestepping_order = tsm[1]
p.runtime.timestepping_order2 = tsm[2]
p.runtime.rexi_use_direct_solution = tsm[3]
if len(tsm) > 4:
s = tsm[4]
p.runtime.load_from_dict(tsm[4])
p.parallelization.force_turbo_off = True
if not '_rexi' in p.runtime.timestepping_method:
p.runtime.rexi_method = ''
# SPACE parallelization
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = p.platform_resources.num_cores_per_socket
pspace.num_threads_per_rank = pspace.num_cores_per_rank
pspace.num_ranks = 1
pspace.setup()
#pspace.print()
# TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = 1
ptime.setup()
#ptime.print()
# Setup parallelization
p.setup_parallelization([pspace, ptime])
#p.parallelization.print()
# wallclocktime
p.parallelization.max_wallclock_seconds = 60*60 # allow at least one hour
# turbomode
p.parallelization.force_turbo_off = True
p.write_jobscript('script_'+prefix_string_template+p.getUniqueID()+'/run.sh')
else:
c = 1
range_cores_node = [p.platform_resources.num_cores_per_socket]
if True:
#for N in [64, 128]:
#for N in [128, 256]:
#for N in [128, 256]:
for N in [128]:
range_time_cores = []
i = 1
while i <= N:
range_time_cores.append(i)
i *= 2
#for r in [25, 50, 75]:
# Everything starting and above 40 results in significant errors
#for r in [30, 50]:
#for r in [30, 60]:
for r in [30]:
#for gf in [0.01, 0.005, 0.001, 0.0005, 0.0001, 0.0]:
#for gf in [0.01, 0.005, 0.001, 0.0005, 0.0001, 0.0]:
#for gf in [1, 0.1, 0.01, 0.001, 0.0001, 0.00001, 0.000001, 0.0000001, 0.00000001]:
#for gf_exp_N in [2, 4, 6, 10, 20, 40]:
#for gf_exp_N in [2, 4, 10]:
for gf_exp_N in [0]:
#for gf_scale in [0, 5, 10, 20, 50]:
for gf_scale in [0]:
#for ci_max_real in [10, 5]:
for ci_max_real in [10.0]:
p.runtime.load_from_dict({
'rexi_method': 'ci',
'ci_n':N,
'ci_max_real':ci_max_real,
'ci_max_imag':r,
'half_poles':0,
'ci_gaussian_filter_scale':gf_scale,
#'ci_gaussian_filter_dt_norm':130.0, # unit scaling for T128 resolution
'ci_gaussian_filter_dt_norm':0.0, # unit scaling for T128 resolution
'ci_gaussian_filter_exp_N':gf_exp_N,
})
for par_time_cores in [range_time_cores[-1]]:
if True:
# SPACE parallelization
pspace = JobParallelizationDimOptions('space')
pspace.num_cores_per_rank = p.platform_resources.num_cores_per_socket
pspace.num_threads_per_rank = pspace.num_cores_per_rank
pspace.num_ranks = 1
pspace.setup()
pspace.print()
# TIME parallelization
ptime = JobParallelizationDimOptions('time')
ptime.num_cores_per_rank = 1
ptime.num_threads_per_rank = 1 #pspace.num_cores_per_rank
ptime.num_ranks = min(par_time_cores, p.platform_resources.num_cores // pspace.num_cores_per_rank)
ptime.setup()
ptime.print()
# Setup parallelization
p.setup_parallelization([pspace, ptime])
p.parallelization.print()
# wallclocktime
p.parallelization.max_wallclock_seconds = 60*60 # allow at least one hour
# turbomode
p.parallelization.force_turbo_off = True
# Generate only scripts with max number of cores
p.write_jobscript('script_'+prefix_string_template+p.getUniqueID()+'/run.sh')
p.write_compilecommands("./compile_platform_"+p.platforms.platform_id+".sh")
|
schreiberx/sweet
|
benchmarks_sphere/rexi_mass_energy_galewsky_martinium/jobs_create.py
|
Python
|
mit
| 10,962
|
[
"Gaussian"
] |
1b324a48509c7d29240e24e70c81cc923a77aa7b4719c56da27d502ca65f537e
|
from django.db import models
from django.contrib.auth.models import User
import requests
import json
from datetime import datetime, timedelta
import dateutil.parser
from pushbullet import PushBullet
from settings_local import PUSHBULLET_KEY, GITLAB_KEY, GITLAB_URL
import html5lib
from django.utils.crypto import get_random_string
from django.core import urlresolvers
# Create your models here.
class AcceptedEvent(models.Model):
name = models.TextField()
class Language(models.Model):
name = models.TextField()
class Session(models.Model):
start = models.DateTimeField()
end = models.DateTimeField()
language = models.ForeignKey(Language)
class Streak(models.Model):
uuid = models.CharField(max_length=36, primary_key=True)
user = models.ForeignKey(User)
streak = models.PositiveIntegerField(default=0)
date = models.DateTimeField()
utc_offset = models.PositiveIntegerField(default=0)
lost = models.BooleanField(default=False)
sessions = models.ManyToManyField(Session)
def update_streak(self, test_for_success=False):
"""Updates the streak. Run by huey at midnight. True means streak is incremented, false means failure.
Attribute:
test_for_success: when true the result is simulated but not saved."""
pb = PushBullet(PUSHBULLET_KEY)
def github():
"""Checks if a commit has been made in the last 24 hours."""
try:
GITHUB_API = "https://api.github.com/users/{0}/events".format(self.user.username)
accepted_events = AcceptedEvent.objects.values_list("name", flat=True)
events = json.loads(requests.get(GITHUB_API).text)
for event in events:
# it needs to be either a commit or a pull request
# it must also be after the last update.
if event["type"] in accepted_events \
and self.date < dateutil.parser.parse(event["created_at"]):
return True
else:
return False
except:
return False
def freecodecamp():
"""Checks your freecodecamp profile for progress matching today's date."""
try:
CODECAMP_URL = "https://www.freecodecamp.com/{0}".format(self.user.username)
document = html5lib.parse(requests.get(CODECAMP_URL).text)
if document.findtext((datetime.now()-timedelta(days=1)).strftime("%b %d, %Y"), default=None) is None:
return False
return True
except:
return False
def gitlab():
try:
repos_endpoint = "/api/v3/projects"
repos = json.loads(requests.get(
"{0}{1}?order_by=last_activity_at&private_token={2}".format(GITLAB_URL, repos_endpoint, GITLAB_KEY)).text)
commits_endpoint = "/api/v3/projects/{0}/repository/commits"
for repo in repos:
commits = json.loads(requests.get(
"{0}{1}?order_by=last_activity_at&private_token={2}".format(GITLAB_URL,
commits_endpoint.format(repo["id"]),
GITLAB_KEY)).text)
# if we get to a repo hasn't been updated in the last 24 hours, return false
# (they are ordered by latest activity)
if self.date > dateutil.parser.parse(repo["last_activity_at"]):
return False
for commit in commits: # if the date is not in the last day, break
if self.date < dateutil.parser.parse(commit["created_at"]):
# if we have the right guy, return true
if commit["author_name"] == self.user.username:
return True
else:
break
except:
return False
def session():
date_from = datetime.now() - timedelta(days=1)
if self.sessions.objects.filter(start__gte=date_from):
return True
return False
successful = gitlab() or github() or freecodecamp() or session()
if test_for_success is False:
self.streak += (1*int(successful)*int(self.lost)) # stops you getting more points after losing.
self.lost = not successful or self.lost # if you lost, it will stay until you open the app.
self.date = datetime.now()
if self.lost:
push = pb.push_link(urlresolvers.resolve("codestreak:root"), "Your streak is over! Visit the app to reset.")
self.save()
else:
if successful:
push = pb.push_note("Well done. You made a commit today.", ":)")
else:
push = pb.push_note("You're risking your streak!", "It's quite late and you still haven't made a commit. Hurry!")
return True if successful else False
def notify_streak(self):
self.update_streak(test_for_success=True)
|
arlyon/codestreak
|
models.py
|
Python
|
mit
| 5,337
|
[
"VisIt"
] |
27cd7d12ec003b49730e3d0d63f4f38d68eaa446039da26eadd85157ff1f0912
|
import os
import os.path
import sys
sys.path.insert(0, os.path.abspath('lib'))
from ansible.release import __version__, __author__
try:
from setuptools import setup, find_packages
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).")
sys.exit(1)
with open('requirements.txt') as requirements_file:
install_requirements = requirements_file.read().splitlines()
if not install_requirements:
print("Unable to read requirements from the requirements.txt file"
"That indicates this copy of the source code is incomplete.")
sys.exit(2)
SYMLINKS = {'ansible': frozenset(('ansible-console',
'ansible-doc',
'ansible-galaxy',
'ansible-playbook',
'ansible-pull',
'ansible-vault'))}
for source in SYMLINKS:
for dest in SYMLINKS[source]:
dest_path = os.path.join('bin', dest)
if not os.path.islink(dest_path):
try:
os.unlink(dest_path)
except OSError as e:
if e.errno == 2:
# File does not exist which is all we wanted
pass
os.symlink(source, dest_path)
setup(
name='ansible',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='info@ansible.com',
url='https://ansible.com/',
license='GPLv3',
# Ansible will also make use of a system copy of python-six and
# python-selectors2 if installed but use a Bundled copy if it's not.
install_requires=install_requirements,
package_dir={ '': 'lib' },
packages=find_packages('lib'),
package_data={
'': [
'module_utils/*.ps1',
'modules/windows/*.ps1',
'modules/windows/*.ps1',
'galaxy/data/*/*.*',
'galaxy/data/*/*/*.*',
'galaxy/data/*/tests/inventory'
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-console',
'bin/ansible-connection',
'bin/ansible-vault',
],
data_files=[],
)
|
gptech/ansible
|
setup.py
|
Python
|
gpl-3.0
| 3,121
|
[
"Galaxy"
] |
1bf733c54188d16ea04795087b217d071776348bf42c4e6c78b5fc6712b8dd1d
|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# gui.py
# Purpose: control a continuously running LAMMPS simulation via a Tkinter GUI
# Syntax: gui.py in.lammps Nfreq
# in.lammps = LAMMPS input script
# Nfreq = query GUI every this many steps
# IMPORTANT: this script cannot yet be run in parallel via Pypar,
# because I can't seem to do a MPI-style broadcast in Pypar
from __future__ import print_function
import sys,time
# methods called by GUI
def go():
global runflag
runflag = 1
def stop():
global runflag
runflag = 0
def settemp(value):
global temptarget
temptarget = slider.get()
def quit():
global breakflag
breakflag = 1
# parse command line
argv = sys.argv
if len(argv) != 3:
print("Syntax: gui.py in.lammps Nfreq")
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from lammps import lammps
lmp = lammps()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# display GUI with go/stop/quit buttons and slider for temperature
# just proc 0 handles GUI
breakflag = 0
runflag = 0
temptarget = 1.0
if me == 0:
try:
from Tkinter import *
except:
from tkinter import *
tkroot = Tk()
tkroot.withdraw()
root = Toplevel(tkroot)
root.title("LAMMPS GUI")
frame = Frame(root)
Button(frame,text="Go",command=go).pack(side=LEFT)
Button(frame,text="Stop",command=stop).pack(side=LEFT)
slider = Scale(frame,from_=0.0,to=5.0,resolution=0.1,
orient=HORIZONTAL,label="Temperature")
slider.bind('<ButtonRelease-1>',settemp)
slider.set(temptarget)
slider.pack(side=LEFT)
Button(frame,text="Quit",command=quit).pack(side=RIGHT)
frame.pack()
tkroot.update()
# endless loop, checking status of GUI settings every Nfreq steps
# run with pre yes/no and post yes/no depending on go/stop status
# re-invoke fix langevin with new seed when temperature slider changes
# after re-invoke of fix langevin, run with pre yes
running = 0
temp = temptarget
seed = 12345
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
while 1:
if me == 0: tkroot.update()
if temp != temptarget:
temp = temptarget
seed += me+1
lmp.command("fix 2 all langevin %g %g 0.1 %d" % (temp,temp,seed))
running = 0
if runflag and running:
lmp.command("run %d pre no post no" % nfreq)
elif runflag and not running:
lmp.command("run %d pre yes post no" % nfreq)
elif not runflag and running:
lmp.command("run %d pre no post yes" % nfreq)
if breakflag: break
if runflag: running = 1
else: running = 0
time.sleep(0.01)
# uncomment if running in parallel via Pypar
#print("Proc %d out of %d procs has" % (me,nprocs), lmp)
#pypar.finalize()
|
arielzn/lammps
|
python/examples/gui.py
|
Python
|
gpl-2.0
| 2,890
|
[
"LAMMPS"
] |
abd98aed4f274651d1242651e5f9d84f29a7116b9b170ac403e78e514bb09eb2
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import time
import gevent
from gevent import Greenlet
from rrdtool import RRDToolClient
DEFAULT_INTERVAL = 60
class AvogadroAgent(RRDToolClient, Greenlet):
""" Avogadro Agent base class implementation.
"""
@property
def name(self):
return self.__class__.__name__
def __init__(self, interval=DEFAULT_INTERVAL, options=None):
super(AvogadroAgent, self).__init__(options=options)
self.interval = options.interval or interval
def __repr__(self):
return self.name
def _run(self):
while True:
value = self.collect() # collect() implemented in subclass
if value is None:
continue # skip non-values
ts = time.time()
print self, ts, value
super(AvogadroAgent, self).store(value, ts=ts) # store() implemented in
# super()
gevent.sleep(self.interval)
@classmethod
def addParserOptions(cls, parser):
super(AvogadroAgent, cls).addParserOptions(parser)
parser.add_option("--interval",
default=DEFAULT_INTERVAL,
dest="interval",
help="Interval, in seconds, for metric collection",
metavar="SECONDS",
type="int")
def collect(self):
raise NotImplementedError("collect() not implemented in subclass")
|
numenta/nupic.rogue
|
avogadro/agent.py
|
Python
|
agpl-3.0
| 2,347
|
[
"Avogadro"
] |
1c94cbe0d30aa4260fb288a79d188addb7ae562219019c79ab23407b9d9b172e
|
from numpy import exp,concatenate,array,float,r_
from pylab import plot,ylabel,xlabel,gca,draw,legend,subplot,show,text,gcf,rand
from numpy import zeros,arange,ones,convolve,floor
import numpy as np
import matplotlib as mpl
mpl.rcParams['lines.linewidth'] = 3
mpl.rcParams['figure.figsize'] = (10,8)
mpl.rcParams['axes.grid']=True
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import h5py
import plasticnet as pn
import splikes as sp
def save(fname,sim,neurons=[],connections=[]):
f=h5py.File(fname,'w')
try:
f.attrs['plasticnet version']=pn.version
f.attrs['splikes version']=sp.version
group=f.create_group("simulation")
sim.save(group)
for n,neuron in enumerate(neurons):
group=f.create_group("neuron %d" % n)
if neuron.verbose:
print("<<<< group neuron %d >>>>" % n)
sys.stdout.flush()
neuron.save(group)
for monitor_name in sim.monitors:
m=sim.monitors[monitor_name]
if m.container==neuron:
mgroup=group.create_group("monitor %s" % m.name)
m.save(mgroup)
for c,connection in enumerate(connections):
group=f.create_group("connection %d" % c)
if connection.verbose:
print("<<<< group connection %d >>>>" % c)
sys.stdout.flush()
connection.save(group)
try:
idx=neurons.index(connection.pre)
except ValueError:
idx=None
group.attrs['pre number']=idx
try:
idx=neurons.index(connection.post)
except ValueError:
idx=None
group.attrs['post number']=idx
for monitor_name in sim.monitors:
m=sim.monitors[monitor_name]
if m.container==connection:
mgroup=group.create_group("monitor %s" % m.name)
m.save(mgroup)
finally:
f.close()
def bigfonts(size=20,family='sans-serif'):
from matplotlib import rc
rc('font',size=size,family=family)
rc('axes',labelsize=size)
rc('axes',titlesize=size)
rc('xtick',labelsize=size)
rc('ytick',labelsize=size)
rc('legend',fontsize=size)
bigfonts()
def running_average(t,y,T):
N=len(t[t<=T])
yf=np.convolve(y, np.ones((N,))/N,mode='same')
return yf
def paramtext(x,y,*args,**kwargs):
paramstr='\n'.join(args)
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
T=text(x,y,paramstr,
ha='center',
va='top',
bbox=props,
transform=gca().transAxes,
multialignment='left',
**kwargs)
def plot_spike_lines(neuron,color,label):
yl=gca().get_ylim()
dyl=yl[1]-yl[0]
count=0
for t,n in neuron.saved_spikes:
if count==0:
plot([t,t],[yl[0],yl[0]+.1*dyl],color[n],linewidth=3,label=label)
else:
plot([t,t],[yl[0],yl[0]+.1*dyl],color[n],linewidth=3)
count+=1
print("Total number of %s spikes: %d " % (label,len(neuron.saved_spikes)))
if neuron.N>1:
for i in range(neuron.N):
print(" Number of spikes for neuron %d: %d" % (i,len([t for t,n in neuron.saved_spikes if n==i])))
def timeplot(*args,**kwargs):
import matplotlib.pyplot as plt
import matplotlib.ticker
def HMSFormatter(value, loc):
h = value // 3600
m = (value - h * 3600) // 60
s = value % 60
return "%02d:%02d:%02d" % (h,m,s)
def HMSFormatter2(value, loc):
h = value // 3600
m = (value - h * 3600) // 60
s = value % 60
ms=value%1
return "%02d:%02d.%03d" % (m,s,ms*1000)
t=args[0]
if max(t)<10: # use ms
gca().xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(HMSFormatter2))
else:
gca().xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(HMSFormatter))
plot(*args,**kwargs)
gcf().autofmt_xdate()
ms=0.001
second=1000*ms
minute=60*second
hour=60*minute
day=24*hour
year=365.25*day
Hz=1.0
def plot_time_data(t,y,style='.-'):
t=array(t)
try:
mx=t.max()
except ValueError: # empty t
mx=0.0
if mx>2*day: # switch to days
t=t/day
unit='day'
elif mx>2*hour:
t=t/hour
unit='hour'
elif mx>2*minute:
t=t/minute
unit='min'
elif mx>2*second:
t=t/second
unit='sec'
else:
unit='ms'
try:
plot(t,y,style)
except ValueError:
plot(t,[y],style)
xlabel('Time (%s)' % unit)
def spike_counts(tmat,spikes,window=200):
times,neurons=zip(*spikes)
times=np.array(times)
neurons=np.array(neurons)
mx=np.max(neurons)
N=mx+1
counts=np.zeros( (N,len(tmat)-1) )
for ti in range(len(tmat)-1):
t1=tmat[ti]
t2=tmat[ti+1]
idx=np.where( (times<=t2) & (times>=t1) )[0]
for ii in idx:
counts[neurons[ii],ti]+=1
return counts
def average_rate_plot(monitors,window=200,neurons=[],xlim=None,ylim=None):
if not isinstance(monitors,list):
monitors=[monitors]
if not isinstance(monitors[0],sp.SpikeMonitor):
nn=monitors
monitors=[]
for n in nn:
for m in n.monitors:
if isinstance(m,SpikeMonitor):
monitors.append(m)
for m in monitors:
if xlim:
t_min=xlim[0]
t_max=xlim[1]
else:
t_min=m.t.min()
t_max=m.t.max()
if m.between:
t=arange(m.between[0],m.between[1]+1)
offset=m.between[0]
else:
t=arange(0,t_max+1)
offset=0
count=zeros(t.shape)
if not neurons:
for tt in m.t:
count[int(tt)-offset]+=1
N=m.cell.N
else:
for ss,tt in zip(m.spikes,m.t):
if ss in neurons:
count[int(tt)-offset]+=1
N=len(neurons)
filt=ones(window)
rates=convolve(filt, count, mode='same')*1000.0/window/N
plot_time_data(t,rates,'.-')
ylabel('Rate (Hz)')
def raster_plot(monitors,xlim=None,ylim=None):
if not isinstance(monitors,list):
monitors=[monitors]
if not isinstance(monitors[0],SpikeMonitor):
neurons=monitors
monitors=[]
for n in neurons:
for m in n.monitors:
if isinstance(m,SpikeMonitor):
monitors.append(m)
N=0
for m in monitors:
plot_time_data(m.t,m.spikes+N,'.')
N+=m.cell.N
ylabel('Neuron Number')
ax=gca()
if not ylim:
ylim=ax.get_ylim()
dy=ylim[1]-ylim[0]
ylim=[ylim[0]-dy*.1, ylim[1]+dy*.1]
ax.set_ylim(ylim)
# we don't adjust the xlim like the ylim, so you can more easily compare with the states plots
if xlim:
ax.set_xlim(xlim)
draw()
def plot_state(neuron,*args,**kwargs):
items=kwargs.get('items',None)
style=kwargs.get('style','.-')
variable_names=args
if not items is None:
try:
items[0]
except TypeError:
items=[items]
data=[]
for var in variable_names:
for m in neuron.monitors:
if m.var==var:
t=m.t
vals=m.vals
if items is None:
plot_time_data(t,vals,style)
data.append( (t,vals) )
else:
plot_time_data(t,vals[:,items],style)
data.append( (t,vals[:items]) )
if len(variable_names)==1:
ylabel(var)
return data[0]
else:
ylabel('Value')
legend(variable_names)
return data
def plot_monitors(neuron):
for i,m in enumerate(neuron.monitors):
subplot(len(neuron.monitors),1,i+1)
if isinstance(m,StateMonitor):
plot_state(neuron,m.var)
else:
raster_plot(neuron,[0,max(m.t)])
show()
draw()
def sq(x,width):
y=zeros(x.shape)
y[abs(x)<=(width/2.0)]=1.0
return y
def make_square(N=10,sz=100,rates=[5,55],width=10,display=False):
min_rate,max_rate=rates
try:
min_width,max_width=width
except TypeError:
min_width,max_width=width,width
centers=(r_[0:N]+0.5)*sz/N
# reverse, so the shift works easier down below
centers=sz-centers
mid=sz/2
idx=r_[0:sz]-mid
l=[]
for c in centers:
width=rand()*(max_width-min_width)+min_width
g=sq(idx,width)*(max_rate-min_rate)+min_rate
g=concatenate((g[mid:],g[0:mid]))
r=concatenate((g[c:],g[0:c]))
l.append(r)
a=array(l,float)
if display:
for r in a:
plot(r,'.-')
return a
def make_gaussian(N=10,sz=100,rates=[5,55],sigma=10,display=False):
min_rate,max_rate=rates
try:
min_sigma,max_sigma=sigma
except TypeError:
min_sigma,max_sigma=sigma,sigma
centers=(r_[0:N]+0.5)*sz/N
centers=centers.astype(int)
# reverse, so the shift works easier down below
centers=sz-centers
mid=sz//2
idx=r_[0:sz]-mid
l=[]
for c in centers:
sigma=rand()*(max_sigma-min_sigma)+min_sigma
g=exp(-idx**2/(2.0*sigma**2))*(max_rate-min_rate)+min_rate
g=concatenate((g[mid:],g[0:mid]))
r=concatenate((g[c:],g[0:c]))
l.append(r)
a=array(l,float)
if display:
for r in a:
plot(r,'.-')
return a
def convert_neuron_equations(D):
import re
loop_var='__i'
equation_lines=[]
for line in D['equations']:
line=line.replace(' ','')
line=line.split('#')[0] # get rid of comment
if not line:
continue
if ':' in line:
line,shape_str=line.split(':') # get shape info
else:
shape_str=''
parts=line.split('=')
left_side=parts[0]
if '/' in left_side: # derivative
vpart=left_side.split('/')
varstr=vpart[0][1:].strip()
op_str='+=sim.dt*'
else: # equality
varstr=left_side
op_str='='
varstr="%s[%s]%s" % (varstr, loop_var,op_str)
line=parts[1]
# put the indexing code in
for var in D['variables']:
pstr=r'\b%s\b' % var # match words (\b = word boundary)
pattern=re.compile(pstr)
line,count=pattern.subn('%s[%s]' % (var,loop_var),line)
# put the parameters in
for var in D['parameters']:
pstr=r'\b%s\b' % var # match words (\b = word boundary)
pattern=re.compile(pstr)
# TODO: this should be done with a buffer
if not isinstance(D['parameters'][var],(int,float)):
line,count=pattern.subn('%s[%s]' % (var,loop_var),line)
else:
line,count=pattern.subn('%s' % var,line)
line=varstr+"("+line+")"
equation_lines.append(line)
flag=False
indent=' '
s="for %s in range(self.N):\n" % loop_var
for line in equation_lines:
s+=indent+line+"\n"
return s
def translate_neuron(neuron_str):
def indent(s,num):
if num>0:
indent_str=' '*num
lines=s.split('\n')
new_lines=[]
for line in lines:
new_lines.append(indent_str+line)
s2='\n'.join(new_lines)
return s2
elif num<0:
num=-num
lines=s.split('\n')
new_lines=[]
for line in lines:
new_lines.append(line[num:])
s2='\n'.join(new_lines)
return s2
else:
return s
import yaml
D=yaml.load(neuron_str)
D['parameter lines']=D['parameters']
del D['parameters']
code_str=""
code_str+="""
from splikes cimport *
cimport cython
import pylab
import numpy as np
cimport numpy as np
"""
code_str+="\n"
variables={}
for p in D['equations']:
parts=p.strip().split('=')
left=parts[0].strip()
if '/' in left:
varname=left.split('/')[0].strip()[1:]
else:
varname=left
variables[varname]=p
D['variables']=variables
parameters={}
for p in D['parameter lines']:
parts=p.strip().split('=')
varname=parts[0].strip()
value=parts[1].strip()
parameters[varname]=eval(value)
D['parameters']=parameters
code_str+="cdef class {name}(neuron):\n".format(**D)
code_str+=indent("cdef public double "+",".join(parameters.keys()),4)+"\n"
code_str+=" cdef public np.ndarray "+",".join(variables.keys())+"\n"
code_str+=" cpdef _reset(self):\n"
for varname in variables:
if varname in ['rate']:
continue
code_str+=" self.%s=np.zeros(self.N,dtype=np.float)\n" % varname
code_str+=" neuron._reset(self)\n"
code_str+="""
def __init__(self,N):
neuron.__init__(self,N)
"""
code_str+="\n"
for p in parameters:
code_str+=" self.%s=%s\n" % (p,str(parameters[p]))
code_str+=" self._reset()\n"
code_str+="""
@cython.cdivision(True)
@cython.boundscheck(False) # turn of bounds-checking for entire function
cpdef update(self,double t,simulation sim):
cdef int __i,__j
cdef connection c
cdef neuron pre
"""
code_str+="\n"
for varname in variables:
code_str+=" cdef double *%s=<double *>self.%s.data\n" % (varname,varname)
for varname in parameters:
code_str+=" cdef double %s=self.%s\n" % (varname,varname)
code_str+="""
cdef double *W,*state
cdef double spike_scale
cdef int *spiking
"""
code_str+="""
for c in self.connections_pre:
pre=c.pre
W=c.W
spiking=<int *>pre.spiking.data
spike_scale=c.spike_scale
if pre.is_spike and c.use_state:
state=<double *>c.state.data
for __j in range(pre.N):
if spiking[__j]:
for __i in range(self.N):
state[__i]+=spike_scale*W[__i*pre.N+__j]
"""
code_str+="\n"
code_str+=indent(convert_neuron_equations(D),8)
spiking=D['spiking'].strip()
if 'threshold' in spiking:
var_threshold=spiking.split('>')[0].strip()
if var_threshold not in variables:
raise ValueError("%s not in variables %s" % (var_threshold,str(variables)))
if 'reset' not in parameters:
raise ValueError("'reset' not in parameters")
if 'threshold' not in parameters:
raise ValueError("'threshold' not in parameters")
code_str+="""
spiking=<int *>self.spiking.data
self.is_spike=0
for __i in range(self.N):
if %s[__i]>self.threshold:
spiking[__i]=1
self.is_spike=1
self.post_count+=1
if self.save_spikes_begin<=t<=self.save_spikes_end:
self.saved_spikes.append( (t,__i) )
%s[__i]=self.reset
else:
spiking[__i]=0
""" % (var_threshold,var_threshold)
elif 'poisson' in spiking:
if 'rate' not in variables:
raise ValueError("'rate' not in variables")
code_str+="""
spiking=<int *>self.spiking.data
self.is_spike=0
for __i in range(self.N):
if randu()<(rate[__i]*sim.dt):
spiking[__i]=1
self.is_spike=1
self.post_count+=1
if self.save_spikes_begin<=t<=self.save_spikes_end:
self.saved_spikes.append( (t,__i) )
else:
spiking[__i]=0
"""
else:
if spiking:
raise ValueError("Spiking '%s' not implemented" % spiking)
return code_str
def convert_connection_equations(D):
import re
loop_var1='__i'
loop_var2='__j'
equation_lines=[]
shape_lines=[]
if not D['equations']:
return ''
for eqn in D['equations']:
line=eqn.keys()[0]
shape=eqn[line]
line=line.replace(' ','')
line=line.split('#')[0] # get rid of comment
if not line:
continue
parts=line.split('=')
left_side=parts[0]
if '/' in left_side: # derivative
vpart=left_side.split('/')
varstr=vpart[0][1:].strip()
op_str='+=sim.dt*'
else: # equality
varstr=left_side
op_str='='
if shape=='pre':
varstr="%s[%s]%s" % (varstr, loop_var2,op_str)
elif shape=='post':
varstr="%s[%s]%s" % (varstr, loop_var1,op_str)
else:
varstr="%s[__wi]%s" % (varstr,op_str)
shape_lines.append(shape)
line=parts[1]
for var in D['variables']:
shape=D['variables'][var][1]
pstr=r'\b%s\b' % var # match words (\b = word boundary)
pattern=re.compile(pstr)
if shape=='pre':
line,count=pattern.subn('%s[%s]' % (var,loop_var2),line)
elif shape=='post':
line,count=pattern.subn('%s[%s]' % (var,loop_var1,),line)
else:
line,count=pattern.subn('%s[__wi]' % (var,),line)
var='pre'
pstr=r'\b%s\b' % var # match words (\b = word boundary)
pattern=re.compile(pstr)
line,count=pattern.subn('%s[%s]/sim.dt' % (var,loop_var2),line) # divide by dt
var='post'
pstr=r'\b%s\b' % var # match words (\b = word boundary)
pattern=re.compile(pstr)
line,count=pattern.subn('%s[%s]/sim.dt' % (var,loop_var1),line)
# put the parameters in
for var in D['parameters']:
pstr=r'\b%s\b' % var # match words (\b = word boundary)
pattern=re.compile(pstr)
if not isinstance(D['parameters'][var],(int,float)):
line,count=pattern.subn('%s[%s]' % (var,loop_var2),line)
else:
line,count=pattern.subn('%s' % var,line)
line=varstr+"("+line+")"
equation_lines.append(line)
old_shape='nothing'
s=''
for line,shape in zip(equation_lines,shape_lines):
if shape!=old_shape:
if shape=='pre':
s+="for %s in range(self.pre.N):\n" % loop_var2
indent=' '
elif shape=='post':
s+="for %s in range(self.post.N):\n" % loop_var1
indent=' '
else:
s+="for %s in range(self.post.N):\n for %s in range(self.pre.N):\n" % (loop_var1,loop_var2)
s+=" __wi=%s*self.pre.N+%s\n" % (loop_var1,loop_var2)
indent=' '*2
old_shape=shape
s+=indent+line+"\n"
return s
def translate_connection(connection_str):
def indent(s,num):
if num>0:
indent_str=' '*num
lines=s.split('\n')
new_lines=[]
for line in lines:
new_lines.append(indent_str+line)
s2='\n'.join(new_lines)
return s2
elif num<0:
num=-num
lines=s.split('\n')
new_lines=[]
for line in lines:
new_lines.append(line[num:])
s2='\n'.join(new_lines)
return s2
else:
return s
import yaml
D=yaml.load(connection_str)
D['parameter lines']=D['parameters']
del D['parameters']
code_str=""
variables={}
new_equations=[]
for p in D['equations']:
try:
eqn=p.keys()[0]
shape=p[eqn]
shape=shape.split('#')[0] # get rid of comments
new_equations.append({eqn:shape})
except AttributeError:
eqn=p
eqn=eqn.split('#')[0] # get rid of comments
shape='full'
new_equations.append({eqn:shape})
parts=eqn.strip().split('=')
left=parts[0].strip()
if '/' in left:
varname=left.split('/')[0].strip()[1:]
else:
varname=left
if varname in ['W','post_rate','pre_rate']:
continue
variables[varname]=eqn,shape
D['equations']=new_equations
D['variables']=variables
parameters={}
for p in D['parameter lines']:
parts=p.strip().split('=')
varname=parts[0].strip()
value=parts[1].strip()
parameters[varname]=eval(value)
D['parameters']=parameters
code_str=""
code_str+="""
from splikes cimport *
cimport cython
import pylab
import numpy as np
cimport numpy as np
"""
code_str+="\n"
# put in the original translated string
lines2=connection_str.split('\n')
lines2='\n'.join(['# ' + line for line in lines2])
code_str+="\n"+lines2+"\n"
code_str+="cdef class {name}(connection):\n".format(**D)
code_str+=indent("cdef public double "+",".join(parameters.keys()),4)+"\n"
code_str+=" cdef public np.ndarray "+",".join(variables.keys())+"\n"
code_str+=" cpdef _reset(self):\n"
for varname in variables:
shape=variables[varname][1]
if shape=='pre':
code_str+=" self.%s=np.zeros(self.pre.N,dtype=np.float)\n" % varname
elif shape=='post':
code_str+=" self.%s=np.zeros(self.post.N,dtype=np.float)\n" % varname
elif shape=='full':
code_str+=" self.%s=np.zeros( (self.post.N,self.pre.N),dtype=np.float)\n" % varname
else:
raise ValueError('Illegal shape: %s' % shape)
code_str+=" connection._reset(self)\n"
code_str+="""
def __init__(self,neuron pre,neuron post,initial_weight_range=None,state=None):
connection.__init__(self,pre,post,initial_weight_range,state)
"""
code_str+="\n"
for p in parameters:
code_str+=" self.%s=%s\n" % (p,str(parameters[p]))
code_str+=" self._reset()\n"
code_str+="""
@cython.cdivision(True)
@cython.boundscheck(False) # turn of bounds-checking for entire function
cpdef update(self,double t,simulation sim):
cdef int __i,__j
"""
code_str+="\n"
for varname in variables:
code_str+=" cdef double *%s=<double *>self.%s.data\n" % (varname,varname)
for varname in parameters:
code_str+=" cdef double %s=self.%s\n" % (varname,varname)
code_str+="""
cdef double *W=self.W
cdef double *post_rate=<double *>self.post.rate.data
cdef double *pre_rate=<double *>self.pre.rate.data
cdef int *pre,*post # spikes for pre and post
cdef int __wi
pre=<int *>self.pre.spiking.data
post=<int *>self.post.spiking.data
"""
code_str+="\n"
code_str+=indent(convert_connection_equations(D),8)
code_str+="\n"
code_str+=indent("self.apply_weight_limits()\n",8)
return code_str
|
bblais/Plasticnet
|
splikes/utils.py
|
Python
|
mit
| 24,339
|
[
"NEURON"
] |
8d5e37636d15dd34eba996ee8ce8329681a988b5051607fd9f8eae71ba76a146
|
from __future__ import absolute_import, print_function
from . import common_info
from . import c_spec
#----------------------------------------------------------------------------
# The "standard" conversion classes
#----------------------------------------------------------------------------
default = [c_spec.int_converter(),
c_spec.float_converter(),
c_spec.complex_converter(),
c_spec.unicode_converter(),
c_spec.string_converter(),
c_spec.list_converter(),
c_spec.dict_converter(),
c_spec.tuple_converter(),
c_spec.file_converter(),
c_spec.instance_converter(),]
#common_spec.module_converter()]
#----------------------------------------------------------------------------
# add numpy array converters to the default
# converter list.
#----------------------------------------------------------------------------
try:
from . import standard_array_spec
default.append(standard_array_spec.array_converter())
except ImportError:
pass
#----------------------------------------------------------------------------
# add numpy scalar converters to the default
# converter list.
#----------------------------------------------------------------------------
try:
from . import numpy_scalar_spec
default.append(numpy_scalar_spec.numpy_complex_scalar_converter())
except ImportError:
pass
#----------------------------------------------------------------------------
# Add VTK support
#----------------------------------------------------------------------------
try:
from . import vtk_spec
default.insert(0,vtk_spec.vtk_converter())
except IndexError:
pass
#----------------------------------------------------------------------------
# Add "sentinal" catchall converter
#
# if everything else fails, this one is the last hope (it always works)
#----------------------------------------------------------------------------
default.append(c_spec.catchall_converter())
standard_info = [common_info.basic_module_info()]
standard_info += [x.generate_build_info() for x in default]
#----------------------------------------------------------------------------
# Blitz conversion classes
#
# same as default, but will convert numpy arrays to blitz C++ classes
#----------------------------------------------------------------------------
try:
from . import blitz_spec
blitz = [blitz_spec.array_converter()] + default
#-----------------------------------
# Add "sentinal" catchall converter
#
# if everything else fails, this one
# is the last hope (it always works)
#-----------------------------------
blitz.append(c_spec.catchall_converter())
except:
pass
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/scipy/weave/converters.py
|
Python
|
agpl-3.0
| 2,746
|
[
"VTK"
] |
4c009f00ba8d1d46f8ffb097d79fc63ef7d62622d549f30769f62265b3d9f9d9
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Miscellaneous tools used by OpenERP.
"""
from functools import wraps
import cProfile
from contextlib import contextmanager
import subprocess
import logging
import os
import socket
import sys
import threading
import time
import werkzeug.utils
import zipfile
from collections import defaultdict, Mapping
from datetime import datetime
from itertools import islice, izip, groupby
from lxml import etree
from which import which
from threading import local
import traceback
try:
from html2text import html2text
except ImportError:
html2text = None
from config import config
from cache import *
from .parse_version import parse_version
import openerp
# get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are moved to loglevels until we refactor tools.
from openerp.loglevels import get_encodings, ustr, exception_to_unicode # noqa
_logger = logging.getLogger(__name__)
# List of etree._Element subclasses that we choose to ignore when parsing XML.
# We include the *Base ones just in case, currently they seem to be subclasses of the _* ones.
SKIPPED_ELEMENT_TYPES = (etree._Comment, etree._ProcessingInstruction, etree.CommentBase, etree.PIBase)
def find_in_path(name):
path = os.environ.get('PATH', os.defpath).split(os.pathsep)
if config.get('bin_path') and config['bin_path'] != 'None':
path.append(config['bin_path'])
try:
return which(name, path=os.pathsep.join(path))
except IOError:
return None
def find_pg_tool(name):
path = None
if config['pg_path'] and config['pg_path'] != 'None':
path = config['pg_path']
try:
return which(name, path=path)
except IOError:
return None
def exec_pg_command(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
args2 = (prog,) + args
with open(os.devnull) as dn:
return subprocess.call(args2, stdout=dn, stderr=subprocess.STDOUT)
def exec_pg_command_pipe(name, *args):
prog = find_pg_tool(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
def exec_command_pipe(name, *args):
prog = find_in_path(name)
if not prog:
raise Exception('Couldn\'t find %s' % name)
# on win32, passing close_fds=True is not compatible
# with redirecting std[in/err/out]
pop = subprocess.Popen((prog,) + args, bufsize= -1,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=(os.name=="posix"))
return pop.stdin, pop.stdout
#----------------------------------------------------------
# File paths
#----------------------------------------------------------
#file_path_root = os.getcwd()
#file_path_addons = os.path.join(file_path_root, 'addons')
def file_open(name, mode="r", subdir='addons', pathinfo=False):
"""Open a file from the OpenERP root, using a subdir folder.
Example::
>>> file_open('hr/report/timesheer.xsl')
>>> file_open('addons/hr/report/timesheet.xsl')
>>> file_open('../../base/report/rml_template.xsl', subdir='addons/hr/report', pathinfo=True)
@param name name of the file
@param mode file open mode
@param subdir subdirectory
@param pathinfo if True returns tuple (fileobject, filepath)
@return fileobject if pathinfo is False else (fileobject, filepath)
"""
import openerp.modules as addons
adps = addons.module.ad_paths
rtp = os.path.normcase(os.path.abspath(config['root_path']))
basename = name
if os.path.isabs(name):
# It is an absolute path
# Is it below 'addons_path' or 'root_path'?
name = os.path.normcase(os.path.normpath(name))
for root in adps + [rtp]:
root = os.path.normcase(os.path.normpath(root)) + os.sep
if name.startswith(root):
base = root.rstrip(os.sep)
name = name[len(base) + 1:]
break
else:
# It is outside the OpenERP root: skip zipfile lookup.
base, name = os.path.split(name)
return _fileopen(name, mode=mode, basedir=base, pathinfo=pathinfo, basename=basename)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
elif subdir:
name = os.path.join(subdir, name)
if name.replace(os.sep, '/').startswith('addons/'):
subdir = 'addons'
name2 = name[7:]
else:
name2 = name
# First, try to locate in addons_path
if subdir:
for adp in adps:
try:
return _fileopen(name2, mode=mode, basedir=adp,
pathinfo=pathinfo, basename=basename)
except IOError:
pass
# Second, try to locate in root_path
return _fileopen(name, mode=mode, basedir=rtp, pathinfo=pathinfo, basename=basename)
def _fileopen(path, mode, basedir, pathinfo, basename=None):
name = os.path.normpath(os.path.join(basedir, path))
if basename is None:
basename = name
# Give higher priority to module directories, which is
# a more common case than zipped modules.
if os.path.isfile(name):
fo = open(name, mode)
if pathinfo:
return fo, name
return fo
# Support for loading modules in zipped form.
# This will not work for zipped modules that are sitting
# outside of known addons paths.
head = os.path.normpath(path)
zipname = False
while os.sep in head:
head, tail = os.path.split(head)
if not tail:
break
if zipname:
zipname = os.path.join(tail, zipname)
else:
zipname = tail
zpath = os.path.join(basedir, head + '.zip')
if zipfile.is_zipfile(zpath):
from cStringIO import StringIO
zfile = zipfile.ZipFile(zpath)
try:
fo = StringIO()
fo.write(zfile.read(os.path.join(
os.path.basename(head), zipname).replace(
os.sep, '/')))
fo.seek(0)
if pathinfo:
return fo, name
return fo
except Exception:
pass
# Not found
if name.endswith('.rml'):
raise IOError('Report %r doesn\'t exist or deleted' % basename)
raise IOError('File not found: %s' % basename)
#----------------------------------------------------------
# iterables
#----------------------------------------------------------
def flatten(list):
"""Flatten a list of elements into a uniqu list
Author: Christophe Simonis (christophe@tinyerp.com)
Examples::
>>> flatten(['a'])
['a']
>>> flatten('b')
['b']
>>> flatten( [] )
[]
>>> flatten( [[], [[]]] )
[]
>>> flatten( [[['a','b'], 'c'], 'd', ['e', [], 'f']] )
['a', 'b', 'c', 'd', 'e', 'f']
>>> t = (1,2,(3,), [4, 5, [6, [7], (8, 9), ([10, 11, (12, 13)]), [14, [], (15,)], []]])
>>> flatten(t)
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def isiterable(x):
return hasattr(x, "__iter__")
r = []
for e in list:
if isiterable(e):
map(r.append, flatten(e))
else:
r.append(e)
return r
def reverse_enumerate(l):
"""Like enumerate but in the other sens
Usage::
>>> a = ['a', 'b', 'c']
>>> it = reverse_enumerate(a)
>>> it.next()
(2, 'c')
>>> it.next()
(1, 'b')
>>> it.next()
(0, 'a')
>>> it.next()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
StopIteration
"""
return izip(xrange(len(l)-1, -1, -1), reversed(l))
def topological_sort(elems):
""" Return a list of elements sorted so that their dependencies are listed
before them in the result.
:param elems: specifies the elements to sort with their dependencies; it is
a dictionary like `{element: dependencies}` where `dependencies` is a
collection of elements that must appear before `element`. The elements
of `dependencies` are not required to appear in `elems`; they will
simply not appear in the result.
:returns: a list with the keys of `elems` sorted according to their
specification.
"""
# the algorithm is inspired by [Tarjan 1976],
# http://en.wikipedia.org/wiki/Topological_sorting#Algorithms
result = []
visited = set()
def visit(n):
if n not in visited:
visited.add(n)
if n in elems:
# first visit all dependencies of n, then append n to result
map(visit, elems[n])
result.append(n)
map(visit, elems)
return result
class UpdateableStr(local):
""" Class that stores an updateable string (used in wizards)
"""
def __init__(self, string=''):
self.string = string
def __str__(self):
return str(self.string)
def __repr__(self):
return str(self.string)
def __nonzero__(self):
return bool(self.string)
class UpdateableDict(local):
"""Stores an updateable dict to use in wizards
"""
def __init__(self, dict=None):
if dict is None:
dict = {}
self.dict = dict
def __str__(self):
return str(self.dict)
def __repr__(self):
return str(self.dict)
def clear(self):
return self.dict.clear()
def keys(self):
return self.dict.keys()
def __setitem__(self, i, y):
self.dict.__setitem__(i, y)
def __getitem__(self, i):
return self.dict.__getitem__(i)
def copy(self):
return self.dict.copy()
def iteritems(self):
return self.dict.iteritems()
def iterkeys(self):
return self.dict.iterkeys()
def itervalues(self):
return self.dict.itervalues()
def pop(self, k, d=None):
return self.dict.pop(k, d)
def popitem(self):
return self.dict.popitem()
def setdefault(self, k, d=None):
return self.dict.setdefault(k, d)
def update(self, E, **F):
return self.dict.update(E, F)
def values(self):
return self.dict.values()
def get(self, k, d=None):
return self.dict.get(k, d)
def has_key(self, k):
return self.dict.has_key(k)
def items(self):
return self.dict.items()
def __cmp__(self, y):
return self.dict.__cmp__(y)
def __contains__(self, k):
return self.dict.__contains__(k)
def __delitem__(self, y):
return self.dict.__delitem__(y)
def __eq__(self, y):
return self.dict.__eq__(y)
def __ge__(self, y):
return self.dict.__ge__(y)
def __gt__(self, y):
return self.dict.__gt__(y)
def __hash__(self):
return self.dict.__hash__()
def __iter__(self):
return self.dict.__iter__()
def __le__(self, y):
return self.dict.__le__(y)
def __len__(self):
return self.dict.__len__()
def __lt__(self, y):
return self.dict.__lt__(y)
def __ne__(self, y):
return self.dict.__ne__(y)
class currency(float):
""" Deprecate
.. warning::
Don't use ! Use res.currency.round()
"""
def __init__(self, value, accuracy=2, rounding=None):
if rounding is None:
rounding=10**-accuracy
self.rounding=rounding
self.accuracy=accuracy
def __new__(cls, value, accuracy=2, rounding=None):
return float.__new__(cls, round(value, accuracy))
#def __str__(self):
# display_value = int(self*(10**(-self.accuracy))/self.rounding)*self.rounding/(10**(-self.accuracy))
# return str(display_value)
def to_xml(s):
return s.replace('&','&').replace('<','<').replace('>','>')
def get_iso_codes(lang):
if lang.find('_') != -1:
if lang.split('_')[0] == lang.split('_')[1].lower():
lang = lang.split('_')[0]
return lang
ALL_LANGUAGES = {
'ab_RU': u'Abkhazian / аҧсуа',
'am_ET': u'Amharic / አምሃርኛ',
'ar_SY': u'Arabic / الْعَرَبيّة',
'bg_BG': u'Bulgarian / български език',
'bs_BA': u'Bosnian / bosanski jezik',
'ca_ES': u'Catalan / Català',
'cs_CZ': u'Czech / Čeština',
'da_DK': u'Danish / Dansk',
'de_DE': u'German / Deutsch',
'de_CH': u'German (CH) / Deutsch (CH)',
'el_GR': u'Greek / Ελληνικά',
'en_CA': u'English (CA)',
'en_GB': u'English (UK)',
'en_US': u'English (US)',
'es_AR': u'Spanish (AR) / Español (AR)',
'es_BO': u'Spanish (BO) / Español (BO)',
'es_CL': u'Spanish (CL) / Español (CL)',
'es_CO': u'Spanish (CO) / Español (CO)',
'es_CR': u'Spanish (CR) / Español (CR)',
'es_DO': u'Spanish (DO) / Español (DO)',
'es_EC': u'Spanish (EC) / Español (EC)',
'es_ES': u'Spanish / Español',
'es_GT': u'Spanish (GT) / Español (GT)',
'es_HN': u'Spanish (HN) / Español (HN)',
'es_MX': u'Spanish (MX) / Español (MX)',
'es_NI': u'Spanish (NI) / Español (NI)',
'es_PA': u'Spanish (PA) / Español (PA)',
'es_PE': u'Spanish (PE) / Español (PE)',
'es_PR': u'Spanish (PR) / Español (PR)',
'es_PY': u'Spanish (PY) / Español (PY)',
'es_SV': u'Spanish (SV) / Español (SV)',
'es_UY': u'Spanish (UY) / Español (UY)',
'es_VE': u'Spanish (VE) / Español (VE)',
'et_EE': u'Estonian / Eesti keel',
'fa_IR': u'Persian / فارس',
'fi_FI': u'Finnish / Suomi',
'fr_BE': u'French (BE) / Français (BE)',
'fr_CA': u'French (CA) / Français (CA)',
'fr_CH': u'French (CH) / Français (CH)',
'fr_FR': u'French / Français',
'gl_ES': u'Galician / Galego',
'gu_IN': u'Gujarati / ગુજરાતી',
'he_IL': u'Hebrew / עִבְרִי',
'hi_IN': u'Hindi / हिंदी',
'hr_HR': u'Croatian / hrvatski jezik',
'hu_HU': u'Hungarian / Magyar',
'id_ID': u'Indonesian / Bahasa Indonesia',
'it_IT': u'Italian / Italiano',
'iu_CA': u'Inuktitut / ᐃᓄᒃᑎᑐᑦ',
'ja_JP': u'Japanese / 日本語',
'ko_KP': u'Korean (KP) / 한국어 (KP)',
'ko_KR': u'Korean (KR) / 한국어 (KR)',
'lo_LA': u'Lao / ພາສາລາວ',
'lt_LT': u'Lithuanian / Lietuvių kalba',
'lv_LV': u'Latvian / latviešu valoda',
'mk_MK': u'Macedonian / македонски јазик',
'ml_IN': u'Malayalam / മലയാളം',
'mn_MN': u'Mongolian / монгол',
'nb_NO': u'Norwegian Bokmål / Norsk bokmål',
'nl_NL': u'Dutch / Nederlands',
'nl_BE': u'Flemish (BE) / Vlaams (BE)',
'oc_FR': u'Occitan (FR, post 1500) / Occitan',
'pl_PL': u'Polish / Język polski',
'pt_BR': u'Portuguese (BR) / Português (BR)',
'pt_PT': u'Portuguese / Português',
'ro_RO': u'Romanian / română',
'ru_RU': u'Russian / русский язык',
'si_LK': u'Sinhalese / සිංහල',
'sl_SI': u'Slovenian / slovenščina',
'sk_SK': u'Slovak / Slovenský jazyk',
'sq_AL': u'Albanian / Shqip',
'sr_RS': u'Serbian (Cyrillic) / српски',
'sr@latin': u'Serbian (Latin) / srpski',
'sv_SE': u'Swedish / svenska',
'te_IN': u'Telugu / తెలుగు',
'tr_TR': u'Turkish / Türkçe',
'vi_VN': u'Vietnamese / Tiếng Việt',
'uk_UA': u'Ukrainian / українська',
'ur_PK': u'Urdu / اردو',
'zh_CN': u'Chinese (CN) / 简体中文',
'zh_HK': u'Chinese (HK)',
'zh_TW': u'Chinese (TW) / 正體字',
'th_TH': u'Thai / ภาษาไทย',
'tlh_TLH': u'Klingon',
}
def scan_languages():
""" Returns all languages supported by OpenERP for translation
:returns: a list of (lang_code, lang_name) pairs
:rtype: [(str, unicode)]
"""
return sorted(ALL_LANGUAGES.iteritems(), key=lambda k: k[1])
def get_user_companies(cr, user):
def _get_company_children(cr, ids):
if not ids:
return []
cr.execute('SELECT id FROM res_company WHERE parent_id IN %s', (tuple(ids),))
res = [x[0] for x in cr.fetchall()]
res.extend(_get_company_children(cr, res))
return res
cr.execute('SELECT company_id FROM res_users WHERE id=%s', (user,))
user_comp = cr.fetchone()[0]
if not user_comp:
return []
return [user_comp] + _get_company_children(cr, [user_comp])
def mod10r(number):
"""
Input number : account or invoice number
Output return: the same number completed with the recursive mod10
key
"""
codec=[0,9,4,6,8,2,7,1,3,5]
report = 0
result=""
for digit in number:
result += digit
if digit.isdigit():
report = codec[ (int(digit) + report) % 10 ]
return result + str((10 - report) % 10)
def human_size(sz):
"""
Return the size in a human readable format
"""
if not sz:
return False
units = ('bytes', 'Kb', 'Mb', 'Gb')
if isinstance(sz,basestring):
sz=len(sz)
s, i = float(sz), 0
while s >= 1024 and i < len(units)-1:
s /= 1024
i += 1
return "%0.2f %s" % (s, units[i])
def logged(f):
@wraps(f)
def wrapper(*args, **kwargs):
from pprint import pformat
vector = ['Call -> function: %r' % f]
for i, arg in enumerate(args):
vector.append(' arg %02d: %s' % (i, pformat(arg)))
for key, value in kwargs.items():
vector.append(' kwarg %10s: %s' % (key, pformat(value)))
timeb4 = time.time()
res = f(*args, **kwargs)
vector.append(' result: %s' % pformat(res))
vector.append(' time delta: %s' % (time.time() - timeb4))
_logger.debug('\n'.join(vector))
return res
return wrapper
class profile(object):
def __init__(self, fname=None):
self.fname = fname
def __call__(self, f):
@wraps(f)
def wrapper(*args, **kwargs):
profile = cProfile.Profile()
result = profile.runcall(f, *args, **kwargs)
profile.dump_stats(self.fname or ("%s.cprof" % (f.func_name,)))
return result
return wrapper
__icons_list = ['STOCK_ABOUT', 'STOCK_ADD', 'STOCK_APPLY', 'STOCK_BOLD',
'STOCK_CANCEL', 'STOCK_CDROM', 'STOCK_CLEAR', 'STOCK_CLOSE', 'STOCK_COLOR_PICKER',
'STOCK_CONNECT', 'STOCK_CONVERT', 'STOCK_COPY', 'STOCK_CUT', 'STOCK_DELETE',
'STOCK_DIALOG_AUTHENTICATION', 'STOCK_DIALOG_ERROR', 'STOCK_DIALOG_INFO',
'STOCK_DIALOG_QUESTION', 'STOCK_DIALOG_WARNING', 'STOCK_DIRECTORY', 'STOCK_DISCONNECT',
'STOCK_DND', 'STOCK_DND_MULTIPLE', 'STOCK_EDIT', 'STOCK_EXECUTE', 'STOCK_FILE',
'STOCK_FIND', 'STOCK_FIND_AND_REPLACE', 'STOCK_FLOPPY', 'STOCK_GOTO_BOTTOM',
'STOCK_GOTO_FIRST', 'STOCK_GOTO_LAST', 'STOCK_GOTO_TOP', 'STOCK_GO_BACK',
'STOCK_GO_DOWN', 'STOCK_GO_FORWARD', 'STOCK_GO_UP', 'STOCK_HARDDISK',
'STOCK_HELP', 'STOCK_HOME', 'STOCK_INDENT', 'STOCK_INDEX', 'STOCK_ITALIC',
'STOCK_JUMP_TO', 'STOCK_JUSTIFY_CENTER', 'STOCK_JUSTIFY_FILL',
'STOCK_JUSTIFY_LEFT', 'STOCK_JUSTIFY_RIGHT', 'STOCK_MEDIA_FORWARD',
'STOCK_MEDIA_NEXT', 'STOCK_MEDIA_PAUSE', 'STOCK_MEDIA_PLAY',
'STOCK_MEDIA_PREVIOUS', 'STOCK_MEDIA_RECORD', 'STOCK_MEDIA_REWIND',
'STOCK_MEDIA_STOP', 'STOCK_MISSING_IMAGE', 'STOCK_NETWORK', 'STOCK_NEW',
'STOCK_NO', 'STOCK_OK', 'STOCK_OPEN', 'STOCK_PASTE', 'STOCK_PREFERENCES',
'STOCK_PRINT', 'STOCK_PRINT_PREVIEW', 'STOCK_PROPERTIES', 'STOCK_QUIT',
'STOCK_REDO', 'STOCK_REFRESH', 'STOCK_REMOVE', 'STOCK_REVERT_TO_SAVED',
'STOCK_SAVE', 'STOCK_SAVE_AS', 'STOCK_SELECT_COLOR', 'STOCK_SELECT_FONT',
'STOCK_SORT_ASCENDING', 'STOCK_SORT_DESCENDING', 'STOCK_SPELL_CHECK',
'STOCK_STOP', 'STOCK_STRIKETHROUGH', 'STOCK_UNDELETE', 'STOCK_UNDERLINE',
'STOCK_UNDO', 'STOCK_UNINDENT', 'STOCK_YES', 'STOCK_ZOOM_100',
'STOCK_ZOOM_FIT', 'STOCK_ZOOM_IN', 'STOCK_ZOOM_OUT',
'terp-account', 'terp-crm', 'terp-mrp', 'terp-product', 'terp-purchase',
'terp-sale', 'terp-tools', 'terp-administration', 'terp-hr', 'terp-partner',
'terp-project', 'terp-report', 'terp-stock', 'terp-calendar', 'terp-graph',
'terp-check','terp-go-month','terp-go-year','terp-go-today','terp-document-new','terp-camera_test',
'terp-emblem-important','terp-gtk-media-pause','terp-gtk-stop','terp-gnome-cpu-frequency-applet+',
'terp-dialog-close','terp-gtk-jump-to-rtl','terp-gtk-jump-to-ltr','terp-accessories-archiver',
'terp-stock_align_left_24','terp-stock_effects-object-colorize','terp-go-home','terp-gtk-go-back-rtl',
'terp-gtk-go-back-ltr','terp-personal','terp-personal-','terp-personal+','terp-accessories-archiver-minus',
'terp-accessories-archiver+','terp-stock_symbol-selection','terp-call-start','terp-dolar',
'terp-face-plain','terp-folder-blue','terp-folder-green','terp-folder-orange','terp-folder-yellow',
'terp-gdu-smart-failing','terp-go-week','terp-gtk-select-all','terp-locked','terp-mail-forward',
'terp-mail-message-new','terp-mail-replied','terp-rating-rated','terp-stage','terp-stock_format-scientific',
'terp-dolar_ok!','terp-idea','terp-stock_format-default','terp-mail-','terp-mail_delete'
]
def icons(*a, **kw):
global __icons_list
return [(x, x) for x in __icons_list ]
def detect_ip_addr():
"""Try a very crude method to figure out a valid external
IP or hostname for the current machine. Don't rely on this
for binding to an interface, but it could be used as basis
for constructing a remote URL to the server.
"""
def _detect_ip_addr():
from array import array
from struct import pack, unpack
try:
import fcntl
except ImportError:
fcntl = None
ip_addr = None
if not fcntl: # not UNIX:
host = socket.gethostname()
ip_addr = socket.gethostbyname(host)
else: # UNIX:
# get all interfaces:
nbytes = 128 * 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
names = array('B', '\0' * nbytes)
#print 'names: ', names
outbytes = unpack('iL', fcntl.ioctl( s.fileno(), 0x8912, pack('iL', nbytes, names.buffer_info()[0])))[0]
namestr = names.tostring()
# try 64 bit kernel:
for i in range(0, outbytes, 40):
name = namestr[i:i+16].split('\0', 1)[0]
if name != 'lo':
ip_addr = socket.inet_ntoa(namestr[i+20:i+24])
break
# try 32 bit kernel:
if ip_addr is None:
ifaces = filter(None, [namestr[i:i+32].split('\0', 1)[0] for i in range(0, outbytes, 32)])
for ifname in [iface for iface in ifaces if iface != 'lo']:
ip_addr = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, pack('256s', ifname[:15]))[20:24])
break
return ip_addr or 'localhost'
try:
ip_addr = _detect_ip_addr()
except Exception:
ip_addr = 'localhost'
return ip_addr
# RATIONALE BEHIND TIMESTAMP CALCULATIONS AND TIMEZONE MANAGEMENT:
# The server side never does any timestamp calculation, always
# sends them in a naive (timezone agnostic) format supposed to be
# expressed within the server timezone, and expects the clients to
# provide timestamps in the server timezone as well.
# It stores all timestamps in the database in naive format as well,
# which also expresses the time in the server timezone.
# For this reason the server makes its timezone name available via the
# common/timezone_get() rpc method, which clients need to read
# to know the appropriate time offset to use when reading/writing
# times.
def get_win32_timezone():
"""Attempt to return the "standard name" of the current timezone on a win32 system.
@return the standard name of the current win32 timezone, or False if it cannot be found.
"""
res = False
if sys.platform == "win32":
try:
import _winreg
hklm = _winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
current_tz_key = _winreg.OpenKey(hklm, r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation", 0,_winreg.KEY_ALL_ACCESS)
res = str(_winreg.QueryValueEx(current_tz_key,"StandardName")[0]) # [0] is value, [1] is type code
_winreg.CloseKey(current_tz_key)
_winreg.CloseKey(hklm)
except Exception:
pass
return res
def detect_server_timezone():
"""Attempt to detect the timezone to use on the server side.
Defaults to UTC if no working timezone can be found.
@return the timezone identifier as expected by pytz.timezone.
"""
try:
import pytz
except Exception:
_logger.warning("Python pytz module is not available. "
"Timezone will be set to UTC by default.")
return 'UTC'
# Option 1: the configuration option (did not exist before, so no backwards compatibility issue)
# Option 2: to be backwards compatible with 5.0 or earlier, the value from time.tzname[0], but only if it is known to pytz
# Option 3: the environment variable TZ
sources = [ (config['timezone'], 'OpenERP configuration'),
(time.tzname[0], 'time.tzname'),
(os.environ.get('TZ',False),'TZ environment variable'), ]
# Option 4: OS-specific: /etc/timezone on Unix
if os.path.exists("/etc/timezone"):
tz_value = False
try:
f = open("/etc/timezone")
tz_value = f.read(128).strip()
except Exception:
pass
finally:
f.close()
sources.append((tz_value,"/etc/timezone file"))
# Option 5: timezone info from registry on Win32
if sys.platform == "win32":
# Timezone info is stored in windows registry.
# However this is not likely to work very well as the standard name
# of timezones in windows is rarely something that is known to pytz.
# But that's ok, it is always possible to use a config option to set
# it explicitly.
sources.append((get_win32_timezone(),"Windows Registry"))
for (value,source) in sources:
if value:
try:
tz = pytz.timezone(value)
_logger.info("Using timezone %s obtained from %s.", tz.zone, source)
return value
except pytz.UnknownTimeZoneError:
_logger.warning("The timezone specified in %s (%s) is invalid, ignoring it.", source, value)
_logger.warning("No valid timezone could be detected, using default UTC "
"timezone. You can specify it explicitly with option 'timezone' in "
"the server configuration.")
return 'UTC'
def get_server_timezone():
return "UTC"
DEFAULT_SERVER_DATE_FORMAT = "%Y-%m-%d"
DEFAULT_SERVER_TIME_FORMAT = "%H:%M:%S"
DEFAULT_SERVER_DATETIME_FORMAT = "%s %s" % (
DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_TIME_FORMAT)
# Python's strftime supports only the format directives
# that are available on the platform's libc, so in order to
# be cross-platform we map to the directives required by
# the C standard (1989 version), always available on platforms
# with a C standard implementation.
DATETIME_FORMATS_MAP = {
'%C': '', # century
'%D': '%m/%d/%Y', # modified %y->%Y
'%e': '%d',
'%E': '', # special modifier
'%F': '%Y-%m-%d',
'%g': '%Y', # modified %y->%Y
'%G': '%Y',
'%h': '%b',
'%k': '%H',
'%l': '%I',
'%n': '\n',
'%O': '', # special modifier
'%P': '%p',
'%R': '%H:%M',
'%r': '%I:%M:%S %p',
'%s': '', #num of seconds since epoch
'%T': '%H:%M:%S',
'%t': ' ', # tab
'%u': ' %w',
'%V': '%W',
'%y': '%Y', # Even if %y works, it's ambiguous, so we should use %Y
'%+': '%Y-%m-%d %H:%M:%S',
# %Z is a special case that causes 2 problems at least:
# - the timezone names we use (in res_user.context_tz) come
# from pytz, but not all these names are recognized by
# strptime(), so we cannot convert in both directions
# when such a timezone is selected and %Z is in the format
# - %Z is replaced by an empty string in strftime() when
# there is not tzinfo in a datetime value (e.g when the user
# did not pick a context_tz). The resulting string does not
# parse back if the format requires %Z.
# As a consequence, we strip it completely from format strings.
# The user can always have a look at the context_tz in
# preferences to check the timezone.
'%z': '',
'%Z': '',
}
POSIX_TO_LDML = {
'a': 'E',
'A': 'EEEE',
'b': 'MMM',
'B': 'MMMM',
#'c': '',
'd': 'dd',
'H': 'HH',
'I': 'hh',
'j': 'DDD',
'm': 'MM',
'M': 'mm',
'p': 'a',
'S': 'ss',
'U': 'w',
'w': 'e',
'W': 'w',
'y': 'yy',
'Y': 'yyyy',
# see comments above, and babel's format_datetime assumes an UTC timezone
# for naive datetime objects
#'z': 'Z',
#'Z': 'z',
}
def posix_to_ldml(fmt, locale):
""" Converts a posix/strftime pattern into an LDML date format pattern.
:param fmt: non-extended C89/C90 strftime pattern
:param locale: babel locale used for locale-specific conversions (e.g. %x and %X)
:return: unicode
"""
buf = []
pc = False
quoted = []
for c in fmt:
# LDML date format patterns uses letters, so letters must be quoted
if not pc and c.isalpha():
quoted.append(c if c != "'" else "''")
continue
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
quoted = []
if pc:
if c == '%': # escaped percent
buf.append('%')
elif c == 'x': # date format, short seems to match
buf.append(locale.date_formats['short'].pattern)
elif c == 'X': # time format, seems to include seconds. short does not
buf.append(locale.time_formats['medium'].pattern)
else: # look up format char in static mapping
buf.append(POSIX_TO_LDML[c])
pc = False
elif c == '%':
pc = True
else:
buf.append(c)
# flush anything remaining in quoted buffer
if quoted:
buf.append("'")
buf.append(''.join(quoted))
buf.append("'")
return ''.join(buf)
def server_to_local_timestamp(src_tstamp_str, src_format, dst_format, dst_tz_name,
tz_offset=True, ignore_unparsable_time=True):
"""
Convert a source timestamp string into a destination timestamp string, attempting to apply the
correct offset if both the server and local timezone are recognized, or no
offset at all if they aren't or if tz_offset is false (i.e. assuming they are both in the same TZ).
WARNING: This method is here to allow formatting dates correctly for inclusion in strings where
the client would not be able to format/offset it correctly. DO NOT use it for returning
date fields directly, these are supposed to be handled by the client!!
@param src_tstamp_str: the str value containing the timestamp in the server timezone.
@param src_format: the format to use when parsing the server timestamp.
@param dst_format: the format to use when formatting the resulting timestamp for the local/client timezone.
@param dst_tz_name: name of the destination timezone (such as the 'tz' value of the client context)
@param ignore_unparsable_time: if True, return False if src_tstamp_str cannot be parsed
using src_format or formatted using dst_format.
@return local/client formatted timestamp, expressed in the local/client timezone if possible
and if tz_offset is true, or src_tstamp_str if timezone offset could not be determined.
"""
if not src_tstamp_str:
return False
res = src_tstamp_str
if src_format and dst_format:
# find out server timezone
server_tz = get_server_timezone()
try:
# dt_value needs to be a datetime.datetime object (so no time.struct_time or mx.DateTime.DateTime here!)
dt_value = datetime.strptime(src_tstamp_str, src_format)
if tz_offset and dst_tz_name:
try:
import pytz
src_tz = pytz.timezone(server_tz)
dst_tz = pytz.timezone(dst_tz_name)
src_dt = src_tz.localize(dt_value, is_dst=True)
dt_value = src_dt.astimezone(dst_tz)
except Exception:
pass
res = dt_value.strftime(dst_format)
except Exception:
# Normal ways to end up here are if strptime or strftime failed
if not ignore_unparsable_time:
return False
return res
def split_every(n, iterable, piece_maker=tuple):
"""Splits an iterable into length-n pieces. The last piece will be shorter
if ``n`` does not evenly divide the iterable length.
@param ``piece_maker``: function to build the pieces
from the slices (tuple,list,...)
"""
iterator = iter(iterable)
piece = piece_maker(islice(iterator, n))
while piece:
yield piece
piece = piece_maker(islice(iterator, n))
if __name__ == '__main__':
import doctest
doctest.testmod()
class upload_data_thread(threading.Thread):
def __init__(self, email, data, type):
self.args = [('email',email),('type',type),('data',data)]
super(upload_data_thread,self).__init__()
def run(self):
try:
import urllib
args = urllib.urlencode(self.args)
fp = urllib.urlopen('http://www.openerp.com/scripts/survey.php', args)
fp.read()
fp.close()
except Exception:
pass
def upload_data(email, data, type='SURVEY'):
a = upload_data_thread(email, data, type)
a.start()
return True
def get_and_group_by_field(cr, uid, obj, ids, field, context=None):
""" Read the values of ``field´´ for the given ``ids´´ and group ids by value.
:param string field: name of the field we want to read and group by
:return: mapping of field values to the list of ids that have it
:rtype: dict
"""
res = {}
for record in obj.read(cr, uid, ids, [field], context=context):
key = record[field]
res.setdefault(key[0] if isinstance(key, tuple) else key, []).append(record['id'])
return res
def get_and_group_by_company(cr, uid, obj, ids, context=None):
return get_and_group_by_field(cr, uid, obj, ids, field='company_id', context=context)
# port of python 2.6's attrgetter with support for dotted notation
def resolve_attr(obj, attr):
for name in attr.split("."):
obj = getattr(obj, name)
return obj
def attrgetter(*items):
if len(items) == 1:
attr = items[0]
def g(obj):
return resolve_attr(obj, attr)
else:
def g(obj):
return tuple(resolve_attr(obj, attr) for attr in items)
return g
class unquote(str):
"""A subclass of str that implements repr() without enclosing quotation marks
or escaping, keeping the original string untouched. The name come from Lisp's unquote.
One of the uses for this is to preserve or insert bare variable names within dicts during eval()
of a dict's repr(). Use with care.
Some examples (notice that there are never quotes surrounding
the ``active_id`` name:
>>> unquote('active_id')
active_id
>>> d = {'test': unquote('active_id')}
>>> d
{'test': active_id}
>>> print d
{'test': active_id}
"""
def __repr__(self):
return self
class UnquoteEvalContext(defaultdict):
"""Defaultdict-based evaluation context that returns
an ``unquote`` string for any missing name used during
the evaluation.
Mostly useful for evaluating OpenERP domains/contexts that
may refer to names that are unknown at the time of eval,
so that when the context/domain is converted back to a string,
the original names are preserved.
**Warning**: using an ``UnquoteEvalContext`` as context for ``eval()`` or
``safe_eval()`` will shadow the builtins, which may cause other
failures, depending on what is evaluated.
Example (notice that ``section_id`` is preserved in the final
result) :
>>> context_str = "{'default_user_id': uid, 'default_section_id': section_id}"
>>> eval(context_str, UnquoteEvalContext(uid=1))
{'default_user_id': 1, 'default_section_id': section_id}
"""
def __init__(self, *args, **kwargs):
super(UnquoteEvalContext, self).__init__(None, *args, **kwargs)
def __missing__(self, key):
return unquote(key)
class mute_logger(object):
"""Temporary suppress the logging.
Can be used as context manager or decorator.
@mute_logger('openerp.plic.ploc')
def do_stuff():
blahblah()
with mute_logger('openerp.foo.bar'):
do_suff()
"""
def __init__(self, *loggers):
self.loggers = loggers
def filter(self, record):
return 0
def __enter__(self):
for logger in self.loggers:
assert isinstance(logger, basestring),\
"A logger name must be a string, got %s" % type(logger)
logging.getLogger(logger).addFilter(self)
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
for logger in self.loggers:
logging.getLogger(logger).removeFilter(self)
def __call__(self, func):
@wraps(func)
def deco(*args, **kwargs):
with self:
return func(*args, **kwargs)
return deco
_ph = object()
class CountingStream(object):
""" Stream wrapper counting the number of element it has yielded. Similar
role to ``enumerate``, but for use when the iteration process of the stream
isn't fully under caller control (the stream can be iterated from multiple
points including within a library)
``start`` allows overriding the starting index (the index before the first
item is returned).
On each iteration (call to :meth:`~.next`), increases its :attr:`~.index`
by one.
.. attribute:: index
``int``, index of the last yielded element in the stream. If the stream
has ended, will give an index 1-past the stream
"""
def __init__(self, stream, start=-1):
self.stream = iter(stream)
self.index = start
self.stopped = False
def __iter__(self):
return self
def next(self):
if self.stopped: raise StopIteration()
self.index += 1
val = next(self.stream, _ph)
if val is _ph:
self.stopped = True
raise StopIteration()
return val
def stripped_sys_argv(*strip_args):
"""Return sys.argv with some arguments stripped, suitable for reexecution or subprocesses"""
strip_args = sorted(set(strip_args) | set(['-s', '--save', '-d', '--database', '-u', '--update', '-i', '--init']))
assert all(config.parser.has_option(s) for s in strip_args)
takes_value = dict((s, config.parser.get_option(s).takes_value()) for s in strip_args)
longs, shorts = list(tuple(y) for _, y in groupby(strip_args, lambda x: x.startswith('--')))
longs_eq = tuple(l + '=' for l in longs if takes_value[l])
args = sys.argv[:]
def strip(args, i):
return args[i].startswith(shorts) \
or args[i].startswith(longs_eq) or (args[i] in longs) \
or (i >= 1 and (args[i - 1] in strip_args) and takes_value[args[i - 1]])
return [x for i, x in enumerate(args) if not strip(args, i)]
class ConstantMapping(Mapping):
"""
An immutable mapping returning the provided value for every single key.
Useful for default value to methods
"""
__slots__ = ['_value']
def __init__(self, val):
self._value = val
def __len__(self):
"""
defaultdict updates its length for each individually requested key, is
that really useful?
"""
return 0
def __iter__(self):
"""
same as len, defaultdict udpates its iterable keyset with each key
requested, is there a point for this?
"""
return iter([])
def __getitem__(self, item):
return self._value
def dumpstacks(sig=None, frame=None):
""" Signal handler: dump a stack trace for each existing thread."""
code = []
def extract_stack(stack):
for filename, lineno, name, line in traceback.extract_stack(stack):
yield 'File: "%s", line %d, in %s' % (filename, lineno, name)
if line:
yield " %s" % (line.strip(),)
# code from http://stackoverflow.com/questions/132058/getting-stack-trace-from-a-running-python-application#answer-2569696
# modified for python 2.5 compatibility
threads_info = dict([(th.ident, {'name': th.name, 'uid': getattr(th, 'uid', 'n/a')})
for th in threading.enumerate()])
for threadId, stack in sys._current_frames().items():
thread_info = threads_info.get(threadId)
code.append("\n# Thread: %s (id:%s) (uid:%s)" %
(thread_info and thread_info['name'] or 'n/a',
threadId,
thread_info and thread_info['uid'] or 'n/a'))
for line in extract_stack(stack):
code.append(line)
if openerp.evented:
# code from http://stackoverflow.com/questions/12510648/in-gevent-how-can-i-dump-stack-traces-of-all-running-greenlets
import gc
from greenlet import greenlet
for ob in gc.get_objects():
if not isinstance(ob, greenlet) or not ob:
continue
code.append("\n# Greenlet: %r" % (ob,))
for line in extract_stack(ob.gr_frame):
code.append(line)
_logger.info("\n".join(code))
class frozendict(dict):
""" An implementation of an immutable dictionary. """
def __delitem__(self, key):
raise NotImplementedError("'__delitem__' not supported on frozendict")
def __setitem__(self, key, val):
raise NotImplementedError("'__setitem__' not supported on frozendict")
def clear(self):
raise NotImplementedError("'clear' not supported on frozendict")
def pop(self, key, default=None):
raise NotImplementedError("'pop' not supported on frozendict")
def popitem(self):
raise NotImplementedError("'popitem' not supported on frozendict")
def setdefault(self, key, default=None):
raise NotImplementedError("'setdefault' not supported on frozendict")
def update(self, *args, **kwargs):
raise NotImplementedError("'update' not supported on frozendict")
@contextmanager
def ignore(*exc):
try:
yield
except exc:
pass
# Avoid DeprecationWarning while still remaining compatible with werkzeug pre-0.9
if parse_version(getattr(werkzeug, '__version__', '0.0')) < parse_version('0.9.0'):
def html_escape(text):
return werkzeug.utils.escape(text, quote=True)
else:
def html_escape(text):
return werkzeug.utils.escape(text)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
BT-astauder/odoo
|
openerp/tools/misc.py
|
Python
|
agpl-3.0
| 45,247
|
[
"VisIt"
] |
ef3b625c955b7f1d6b1044459855fd0c93fbfdec5430e876173d0692f58b907d
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import Qt
from customdelegate import CustomDelegate, DocumentationMetaclass
from camelot.view.controls import editors
from camelot.core.utils import ugettext as _, variant_to_pyobject
from camelot.view.proxy import ValueLoading
class BoolDelegate(CustomDelegate):
"""Custom delegate for boolean values"""
__metaclass__ = DocumentationMetaclass
editor = editors.BoolEditor
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
checked = index.model().data(index, Qt.EditRole).toBool()
background_color = QtGui.QColor(index.model().data(index, Qt.BackgroundRole))
check_option = QtGui.QStyleOptionButton()
rect = QtCore.QRect(option.rect.left(),
option.rect.top(),
option.rect.width(),
option.rect.height())
check_option.rect = rect
check_option.palette = option.palette
if (option.state & QtGui.QStyle.State_Selected):
painter.fillRect(option.rect, option.palette.highlight())
elif not self.editable:
painter.fillRect(option.rect, option.palette.window())
else:
painter.fillRect(option.rect, background_color)
if checked:
check_option.state = option.state | QtGui.QStyle.State_On
else:
check_option.state = option.state | QtGui.QStyle.State_Off
QtGui.QApplication.style().drawControl(QtGui.QStyle.CE_CheckBox,
check_option,
painter)
painter.restore()
class TextBoolDelegate(CustomDelegate):
editor = editors.TextBoolEditor
def __init__(self, parent=None, editable=True, yes='Yes', no='No', color_yes=None, color_no=None, **kwargs):
CustomDelegate.__init__(self, parent, editable, **kwargs)
self.yes = yes
self.no = no
self.color_no = color_no
self.color_yes = color_yes
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
field_attributes = variant_to_pyobject(index.data(Qt.UserRole))
editable, background_color = True, None
if field_attributes != ValueLoading:
editable = field_attributes.get( 'editable', True )
background_color = field_attributes.get( 'background_color', None )
rect = option.rect
value = index.model().data(index, Qt.EditRole).toBool()
font_color = QtGui.QColor()
if value:
text = self.yes
if self.color_yes:
color = self.color_yes
else:
text = self.no
if self.color_no:
color = self.color_no
font_color.setRgb(color.red(), color.green(), color.blue())
if( option.state & QtGui.QStyle.State_Selected ):
painter.fillRect(option.rect, option.palette.highlight())
else:
if editable:
painter.fillRect(option.rect, background_color or option.palette.base())
else:
painter.fillRect(option.rect, background_color or option.palette.window())
painter.setPen(font_color.toRgb())
painter.drawText(
rect.x() + 2,
rect.y(),
rect.width() - 4,
rect.height(),
Qt.AlignVCenter | Qt.AlignLeft,
_(text)
)
painter.restore()
|
kurtraschke/camelot
|
camelot/view/controls/delegates/booldelegate.py
|
Python
|
gpl-2.0
| 4,796
|
[
"VisIt"
] |
5d9707476beb24aead1e22f39ae7dd25364f7b5dba3f3f9434e666b268869125
|
#!/usr/bin/env python
# Written by Soo Lee and Carl Vitzthum
# This code is based on the following open-source projects:
# pytabix (https://github.com/slowkow/pytabix)
# pysam (https://github.com/pysam-developers)
# The Github repo for this project is:
# https://github.com/4dn-dcic/pairix
# IMPORTANT: use Python 2.7 or above for this package
from setuptools import setup, find_packages, Extension
EXT_MODULES = [
Extension("pypairix",
sources=[
"src/bgzf.c", "src/bgzip.c", "src/index.c",
"src/knetfile.c", "src/kstring.c",
"src/pairixmodule.c"
],
include_dirs=["src"],
libraries=["z"],
define_macros=[("_FILE_OFFSET_BITS", 64), ("_USE_KNETFILE", 1)]
)
]
with open("VERSION.txt") as version_file:
this_version = version_file.readlines()[-1].split()[-1].strip("\"'")
setup(
name = "pypairix",
version = this_version,
description = "Pypairix is a Python module for fast querying on a pairix-indexed bgzipped text file that contains a pair of genomic coordinates per line. For more information, see: https://github.com/4dn-dcic/pairix/blob/master/README.md.",
url = "https://github.com/4dn-dcic/pairix",
download_url = "https://github.com/4dn-dcic/pairix/tarball/" + this_version,
author = "Soo Lee, Carl Vitzthum",
author_email = "duplexa@gmail.com",
license = "MIT",
keywords = ["pairix","tabix", "bgzip", "bioinformatics", "genomics","hi-c"],
packages = find_packages(),
package_data = { "": ["*.gz", "*.gz.px2"] },
ext_modules = EXT_MODULES,
test_suite = "test",
classifiers = [
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
)
|
4dn-dcic/pairix
|
setup.py
|
Python
|
mit
| 2,134
|
[
"pysam"
] |
71ee7be862cd227ef0d4bca8286cc6852bb63a9f36db40f9c12cd24ecebfe8ce
|
"""
Soft Actor-Critic (SAC)
------------------
Actor policy in SAC is stochastic, with off-policy training.
And 'soft' in SAC indicates the trade-off between the entropy and expected return.
The additional consideration of entropy term helps with more explorative policy.
And this implementation contains an automatic update for the entropy factor.
This version of Soft Actor-Critic (SAC) implementation contains 5 networks:
2 Q net, 2 target Q net, 1 policy net.
It uses alpha loss.
Reference
---------
paper: https://arxiv.org/pdf/1812.05905.pdf
Environment
---
Openai Gym Pendulum-v0, continuous action space
https://gym.openai.com/envs/Pendulum-v0/
Prerequisites
--------------
tensorflow >=2.0.0a0
tensorflow-probability 0.6.0
tensorlayer >=2.0.0
&&
pip install box2d box2d-kengz --user
To run
------
python tutorial_SAC.py --train/test
"""
import argparse
import os
import random
import time
import gym
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import tensorlayer as tl
from tensorlayer.layers import Dense
from tensorlayer.models import Model
Normal = tfp.distributions.Normal
tl.logging.set_verbosity(tl.logging.DEBUG)
# add arguments in command --train/test
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=True)
args = parser.parse_args()
##################### hyper parameters ####################
ENV_ID = 'Pendulum-v0' # environment id
RANDOM_SEED = 2 # random seed
RENDER = False # render while training
# RL training
ALG_NAME = 'SAC'
TRAIN_EPISODES = 100 # total number of episodes for training
TEST_EPISODES = 10 # total number of episodes for training
MAX_STEPS = 200 # total number of steps for each episode
EXPLORE_STEPS = 100 # 500 for random action sampling in the beginning of training
BATCH_SIZE = 256 # update batch size
HIDDEN_DIM = 32 # size of hidden layers for networks
UPDATE_ITR = 3 # repeated updates for single step
SOFT_Q_LR = 3e-4 # q_net learning rate
POLICY_LR = 3e-4 # policy_net learning rate
ALPHA_LR = 3e-4 # alpha learning rate
POLICY_TARGET_UPDATE_INTERVAL = 3 # delayed update for the policy network and target networks
REWARD_SCALE = 1. # value range of reward
REPLAY_BUFFER_SIZE = 5e5 # size of the replay buffer
AUTO_ENTROPY = True # automatically updating variable alpha for entropy
############################### SAC ####################################
class ReplayBuffer:
"""
a ring buffer for storing transitions and sampling for training
:state: (state_dim,)
:action: (action_dim,)
:reward: (,), scalar
:next_state: (state_dim,)
:done: (,), scalar (0 and 1) or bool (True and False)
"""
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, state, action, reward, next_state, done):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = int((self.position + 1) % self.capacity) # as a ring buffer
def sample(self, BATCH_SIZE):
batch = random.sample(self.buffer, BATCH_SIZE)
state, action, reward, next_state, done = map(np.stack, zip(*batch)) # stack for each element
"""
the * serves as unpack: sum(a,b) <=> batch=(a,b), sum(*batch) ;
zip: a=[1,2], b=[2,3], zip(a,b) => [(1, 2), (2, 3)] ;
the map serves as mapping the function on each list element: map(square, [2,3]) => [4,9] ;
np.stack((1,2)) => array([1, 2])
"""
return state, action, reward, next_state, done
def __len__(self):
return len(self.buffer)
class SoftQNetwork(Model):
""" the network for evaluate values of state-action pairs: Q(s,a) """
def __init__(self, num_inputs, num_actions, hidden_dim, init_w=3e-3):
super(SoftQNetwork, self).__init__()
input_dim = num_inputs + num_actions
w_init = tf.keras.initializers.glorot_normal(
seed=None
) # glorot initialization is better than uniform in practice
# w_init = tf.random_uniform_initializer(-init_w, init_w)
self.linear1 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=input_dim, name='q1')
self.linear2 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='q2')
self.linear3 = Dense(n_units=1, W_init=w_init, in_channels=hidden_dim, name='q3')
def forward(self, input):
x = self.linear1(input)
x = self.linear2(x)
x = self.linear3(x)
return x
class PolicyNetwork(Model):
""" the network for generating non-deterministic (Gaussian distributed) action from the state input """
def __init__(
self, num_inputs, num_actions, hidden_dim, action_range=1., init_w=3e-3, log_std_min=-20, log_std_max=2
):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
w_init = tf.keras.initializers.glorot_normal(seed=None)
# w_init = tf.random_uniform_initializer(-init_w, init_w)
self.linear1 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=num_inputs, name='policy1')
self.linear2 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='policy2')
self.linear3 = Dense(n_units=hidden_dim, act=tf.nn.relu, W_init=w_init, in_channels=hidden_dim, name='policy3')
self.mean_linear = Dense(
n_units=num_actions, W_init=w_init, b_init=tf.random_uniform_initializer(-init_w, init_w),
in_channels=hidden_dim, name='policy_mean'
)
self.log_std_linear = Dense(
n_units=num_actions, W_init=w_init, b_init=tf.random_uniform_initializer(-init_w, init_w),
in_channels=hidden_dim, name='policy_logstd'
)
self.action_range = action_range
self.num_actions = num_actions
def forward(self, state):
x = self.linear1(state)
x = self.linear2(x)
x = self.linear3(x)
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = tf.clip_by_value(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def evaluate(self, state, epsilon=1e-6):
""" generate action with state for calculating gradients """
state = state.astype(np.float32)
mean, log_std = self.forward(state)
std = tf.math.exp(log_std) # no clip in evaluation, clip affects gradients flow
normal = Normal(0, 1)
z = normal.sample(mean.shape)
action_0 = tf.math.tanh(mean + std * z) # TanhNormal distribution as actions; reparameterization trick
action = self.action_range * action_0
# according to original paper, with an extra last term for normalizing different action range
log_prob = Normal(mean, std).log_prob(mean + std * z) - tf.math.log(1. - action_0**2 +
epsilon) - np.log(self.action_range)
# both dims of normal.log_prob and -log(1-a**2) are (N,dim_of_action);
# the Normal.log_prob outputs the same dim of input features instead of 1 dim probability,
# needs sum up across the dim of actions to get 1 dim probability; or else use Multivariate Normal.
log_prob = tf.reduce_sum(log_prob, axis=1)[:, np.newaxis] # expand dim as reduce_sum causes 1 dim reduced
return action, log_prob, z, mean, log_std
def get_action(self, state, greedy=False):
""" generate action with state for interaction with envronment """
mean, log_std = self.forward([state])
std = tf.math.exp(log_std)
normal = Normal(0, 1)
z = normal.sample(mean.shape)
action = self.action_range * tf.math.tanh(
mean + std * z
) # TanhNormal distribution as actions; reparameterization trick
action = self.action_range * tf.math.tanh(mean) if greedy else action
return action.numpy()[0]
def sample_action(self, ):
""" generate random actions for exploration """
a = tf.random.uniform([self.num_actions], -1, 1)
return self.action_range * a.numpy()
class SAC:
def __init__(
self, state_dim, action_dim, action_range, hidden_dim, replay_buffer, SOFT_Q_LR=3e-4, POLICY_LR=3e-4,
ALPHA_LR=3e-4
):
self.replay_buffer = replay_buffer
# initialize all networks
self.soft_q_net1 = SoftQNetwork(state_dim, action_dim, hidden_dim)
self.soft_q_net2 = SoftQNetwork(state_dim, action_dim, hidden_dim)
self.target_soft_q_net1 = SoftQNetwork(state_dim, action_dim, hidden_dim)
self.target_soft_q_net2 = SoftQNetwork(state_dim, action_dim, hidden_dim)
self.policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim, action_range)
self.soft_q_net1.train()
self.soft_q_net2.train()
self.target_soft_q_net1.eval()
self.target_soft_q_net2.eval()
self.policy_net.train()
self.log_alpha = tf.Variable(0, dtype=np.float32, name='log_alpha')
self.alpha = tf.math.exp(self.log_alpha)
print('Soft Q Network (1,2): ', self.soft_q_net1)
print('Policy Network: ', self.policy_net)
# set mode
self.soft_q_net1.train()
self.soft_q_net2.train()
self.target_soft_q_net1.eval()
self.target_soft_q_net2.eval()
self.policy_net.train()
# initialize weights of target networks
self.target_soft_q_net1 = self.target_ini(self.soft_q_net1, self.target_soft_q_net1)
self.target_soft_q_net2 = self.target_ini(self.soft_q_net2, self.target_soft_q_net2)
self.soft_q_optimizer1 = tf.optimizers.Adam(SOFT_Q_LR)
self.soft_q_optimizer2 = tf.optimizers.Adam(SOFT_Q_LR)
self.policy_optimizer = tf.optimizers.Adam(POLICY_LR)
self.alpha_optimizer = tf.optimizers.Adam(ALPHA_LR)
def target_ini(self, net, target_net):
""" hard-copy update for initializing target networks """
for target_param, param in zip(target_net.trainable_weights, net.trainable_weights):
target_param.assign(param)
return target_net
def target_soft_update(self, net, target_net, soft_tau):
""" soft update the target net with Polyak averaging """
for target_param, param in zip(target_net.trainable_weights, net.trainable_weights):
target_param.assign( # copy weight value into target parameters
target_param * (1.0 - soft_tau) + param * soft_tau
)
return target_net
def update(self, batch_size, reward_scale=10., auto_entropy=True, target_entropy=-2, gamma=0.99, soft_tau=1e-2):
""" update all networks in SAC """
state, action, reward, next_state, done = self.replay_buffer.sample(batch_size)
reward = reward[:, np.newaxis] # expand dim
done = done[:, np.newaxis]
reward = reward_scale * (reward - np.mean(reward, axis=0)) / (
np.std(reward, axis=0) + 1e-6
) # normalize with batch mean and std; plus a small number to prevent numerical problem
# Training Q Function
new_next_action, next_log_prob, _, _, _ = self.policy_net.evaluate(next_state)
target_q_input = tf.concat([next_state, new_next_action], 1) # the dim 0 is number of samples
target_q_min = tf.minimum(
self.target_soft_q_net1(target_q_input), self.target_soft_q_net2(target_q_input)
) - self.alpha * next_log_prob
target_q_value = reward + (1 - done) * gamma * target_q_min # if done==1, only reward
q_input = tf.concat([state, action], 1) # the dim 0 is number of samples
with tf.GradientTape() as q1_tape:
predicted_q_value1 = self.soft_q_net1(q_input)
q_value_loss1 = tf.reduce_mean(tf.losses.mean_squared_error(predicted_q_value1, target_q_value))
q1_grad = q1_tape.gradient(q_value_loss1, self.soft_q_net1.trainable_weights)
self.soft_q_optimizer1.apply_gradients(zip(q1_grad, self.soft_q_net1.trainable_weights))
with tf.GradientTape() as q2_tape:
predicted_q_value2 = self.soft_q_net2(q_input)
q_value_loss2 = tf.reduce_mean(tf.losses.mean_squared_error(predicted_q_value2, target_q_value))
q2_grad = q2_tape.gradient(q_value_loss2, self.soft_q_net2.trainable_weights)
self.soft_q_optimizer2.apply_gradients(zip(q2_grad, self.soft_q_net2.trainable_weights))
# Training Policy Function
with tf.GradientTape() as p_tape:
new_action, log_prob, z, mean, log_std = self.policy_net.evaluate(state)
new_q_input = tf.concat([state, new_action], 1) # the dim 0 is number of samples
""" implementation 1 """
predicted_new_q_value = tf.minimum(self.soft_q_net1(new_q_input), self.soft_q_net2(new_q_input))
# """ implementation 2 """
# predicted_new_q_value = self.soft_q_net1(new_q_input)
policy_loss = tf.reduce_mean(self.alpha * log_prob - predicted_new_q_value)
p_grad = p_tape.gradient(policy_loss, self.policy_net.trainable_weights)
self.policy_optimizer.apply_gradients(zip(p_grad, self.policy_net.trainable_weights))
# Updating alpha w.r.t entropy
# alpha: trade-off between exploration (max entropy) and exploitation (max Q)
if auto_entropy is True:
with tf.GradientTape() as alpha_tape:
alpha_loss = -tf.reduce_mean((self.log_alpha * (log_prob + target_entropy)))
alpha_grad = alpha_tape.gradient(alpha_loss, [self.log_alpha])
self.alpha_optimizer.apply_gradients(zip(alpha_grad, [self.log_alpha]))
self.alpha = tf.math.exp(self.log_alpha)
else: # fixed alpha
self.alpha = 1.
alpha_loss = 0
# Soft update the target value nets
self.target_soft_q_net1 = self.target_soft_update(self.soft_q_net1, self.target_soft_q_net1, soft_tau)
self.target_soft_q_net2 = self.target_soft_update(self.soft_q_net2, self.target_soft_q_net2, soft_tau)
def save(self): # save trained weights
path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID]))
if not os.path.exists(path):
os.makedirs(path)
extend_path = lambda s: os.path.join(path, s)
tl.files.save_npz(self.soft_q_net1.trainable_weights, extend_path('model_q_net1.npz'))
tl.files.save_npz(self.soft_q_net2.trainable_weights, extend_path('model_q_net2.npz'))
tl.files.save_npz(self.target_soft_q_net1.trainable_weights, extend_path('model_target_q_net1.npz'))
tl.files.save_npz(self.target_soft_q_net2.trainable_weights, extend_path('model_target_q_net2.npz'))
tl.files.save_npz(self.policy_net.trainable_weights, extend_path('model_policy_net.npz'))
np.save(extend_path('log_alpha.npy'), self.log_alpha.numpy()) # save log_alpha variable
def load_weights(self): # load trained weights
path = os.path.join('model', '_'.join([ALG_NAME, ENV_ID]))
extend_path = lambda s: os.path.join(path, s)
tl.files.load_and_assign_npz(extend_path('model_q_net1.npz'), self.soft_q_net1)
tl.files.load_and_assign_npz(extend_path('model_q_net2.npz'), self.soft_q_net2)
tl.files.load_and_assign_npz(extend_path('model_target_q_net1.npz'), self.target_soft_q_net1)
tl.files.load_and_assign_npz(extend_path('model_target_q_net2.npz'), self.target_soft_q_net2)
tl.files.load_and_assign_npz(extend_path('model_policy_net.npz'), self.policy_net)
self.log_alpha.assign(np.load(extend_path('log_alpha.npy'))) # load log_alpha variable
if __name__ == '__main__':
# initialization of env
env = gym.make(ENV_ID).unwrapped
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
action_range = env.action_space.high # scale action, [-action_range, action_range]
# reproducible
env.seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
# initialization of buffer
replay_buffer = ReplayBuffer(REPLAY_BUFFER_SIZE)
# initialization of trainer
agent = SAC(state_dim, action_dim, action_range, HIDDEN_DIM, replay_buffer, SOFT_Q_LR, POLICY_LR, ALPHA_LR)
t0 = time.time()
# training loop
if args.train:
frame_idx = 0
all_episode_reward = []
# need an extra call here to make inside functions be able to use model.forward
state = env.reset().astype(np.float32)
agent.policy_net([state])
for episode in range(TRAIN_EPISODES):
state = env.reset().astype(np.float32)
episode_reward = 0
for step in range(MAX_STEPS):
if RENDER:
env.render()
if frame_idx > EXPLORE_STEPS:
action = agent.policy_net.get_action(state)
else:
action = agent.policy_net.sample_action()
next_state, reward, done, _ = env.step(action)
next_state = next_state.astype(np.float32)
done = 1 if done is True else 0
replay_buffer.push(state, action, reward, next_state, done)
state = next_state
episode_reward += reward
frame_idx += 1
if len(replay_buffer) > BATCH_SIZE:
for i in range(UPDATE_ITR):
agent.update(
BATCH_SIZE, reward_scale=REWARD_SCALE, auto_entropy=AUTO_ENTROPY,
target_entropy=-1. * action_dim
)
if done:
break
if episode == 0:
all_episode_reward.append(episode_reward)
else:
all_episode_reward.append(all_episode_reward[-1] * 0.9 + episode_reward * 0.1)
print(
'Training | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
episode + 1, TRAIN_EPISODES, episode_reward,
time.time() - t0
)
)
agent.save()
plt.plot(all_episode_reward)
if not os.path.exists('image'):
os.makedirs('image')
plt.savefig(os.path.join('image', '_'.join([ALG_NAME, ENV_ID])))
if args.test:
agent.load_weights()
# need an extra call here to make inside functions be able to use model.forward
state = env.reset().astype(np.float32)
agent.policy_net([state])
for episode in range(TEST_EPISODES):
state = env.reset().astype(np.float32)
episode_reward = 0
for step in range(MAX_STEPS):
env.render()
state, reward, done, info = env.step(agent.policy_net.get_action(state, greedy=True))
state = state.astype(np.float32)
episode_reward += reward
if done:
break
print(
'Testing | Episode: {}/{} | Episode Reward: {:.4f} | Running Time: {:.4f}'.format(
episode + 1, TEST_EPISODES, episode_reward,
time.time() - t0
)
)
|
zsdonghao/tensorlayer
|
examples/reinforcement_learning/tutorial_SAC.py
|
Python
|
apache-2.0
| 19,736
|
[
"Gaussian"
] |
3d003f5a30536189e0b822b72623755051e9217dd4be7647368ff0d35b9d6eb7
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Normal (Gaussian) distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
__all__ = [
"Normal",
"NormalWithSoftplusScale",
]
@tf_export("distributions.Normal")
class Normal(distribution.Distribution):
"""The Normal distribution with location `loc` and `scale` parameters.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, sigma) = exp(-0.5 (x - mu)**2 / sigma**2) / Z
Z = (2 pi sigma**2)**0.5
```
where `loc = mu` is the mean, `scale = sigma` is the std. deviation, and, `Z`
is the normalization constant.
The Normal distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Normal(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Define a single scalar Normal distribution.
dist = tfd.Normal(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = tfd.Normal(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = tfd.Normal(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
@deprecation.deprecated(
"2019-01-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.distributions`.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Normal"):
"""Construct Normal distributions with mean and stddev `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor; the means of the distribution(s).
scale: Floating point tensor; the stddevs of the distribution(s).
Must contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
TypeError: if `loc` and `scale` have different `dtype`.
"""
parameters = dict(locals())
with ops.name_scope(name, values=[loc, scale]) as name:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Normal, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the mean."""
return self._loc
@property
def scale(self):
"""Distribution parameter for standard deviation."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc),
array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(),
self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat([[n], self.batch_shape_tensor()], 0)
sampled = random_ops.random_normal(
shape=shape, mean=0., stddev=1., dtype=self.loc.dtype, seed=seed)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_cdf(self, x):
return special_math.log_ndtr(self._z(x))
def _cdf(self, x):
return special_math.ndtr(self._z(x))
def _log_survival_function(self, x):
return special_math.log_ndtr(-self._z(x))
def _survival_function(self, x):
return special_math.ndtr(-self._z(x))
def _log_unnormalized_prob(self, x):
return -0.5 * math_ops.square(self._z(x))
def _log_normalization(self):
return 0.5 * math.log(2. * math.pi) + math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast scale.
scale = self.scale * array_ops.ones_like(self.loc)
return 0.5 * math.log(2. * math.pi * math.e) + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _quantile(self, p):
return self._inv_z(special_math.ndtri(p))
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit normal."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
def _inv_z(self, z):
"""Reconstruct input `x` from a its normalized version."""
with ops.name_scope("reconstruct", values=[z]):
return z * self.scale + self.loc
class NormalWithSoftplusScale(Normal):
"""Normal with softplus applied to `scale`."""
@deprecation.deprecated(
"2019-01-01",
"Use `tfd.Normal(loc, tf.nn.softplus(scale)) "
"instead.",
warn_once=True)
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="NormalWithSoftplusScale"):
parameters = dict(locals())
with ops.name_scope(name, values=[scale]) as name:
super(NormalWithSoftplusScale, self).__init__(
loc=loc,
scale=nn.softplus(scale, name="softplus_scale"),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=name)
self._parameters = parameters
@kullback_leibler.RegisterKL(Normal, Normal)
def _kl_normal_normal(n_a, n_b, name=None):
"""Calculate the batched KL divergence KL(n_a || n_b) with n_a and n_b Normal.
Args:
n_a: instance of a Normal distribution object.
n_b: instance of a Normal distribution object.
name: (optional) Name to use for created operations.
default is "kl_normal_normal".
Returns:
Batchwise KL(n_a || n_b)
"""
with ops.name_scope(name, "kl_normal_normal", [n_a.loc, n_b.loc]):
one = constant_op.constant(1, dtype=n_a.dtype)
two = constant_op.constant(2, dtype=n_a.dtype)
half = constant_op.constant(0.5, dtype=n_a.dtype)
s_a_squared = math_ops.square(n_a.scale)
s_b_squared = math_ops.square(n_b.scale)
ratio = s_a_squared / s_b_squared
return (math_ops.square(n_a.loc - n_b.loc) / (two * s_b_squared) +
half * (ratio - one - math_ops.log(ratio)))
|
dongjoon-hyun/tensorflow
|
tensorflow/python/ops/distributions/normal.py
|
Python
|
apache-2.0
| 9,800
|
[
"Gaussian"
] |
875666b4cdbfe946c7dfb242ff09f123466c7b1cddf8bfab004664c60b2aa8bc
|
import datetime
import math
import time
from django.utils.tzinfo import FixedOffset
from canvas.redis_models import RedisHash, RedisKey
from canvas import knobs
from django.conf import settings
class Sticker(object):
"""
A datastructure to hold a Canvas sticker definition.
Note that the `details` property (and `to_client()`) never gets updated after instantiation.
"""
def __eq__(self, another_sticker):
try:
return self.type_id == another_sticker.type_id
except AttributeError:
return False
def __init__(self, type_id, name="", value=None, preference=None, limited=False, hidden=False, unusable=False,
cost=None, title="", shop_filename=None, shop_text="", purchasable=None, achievement=None,
admin_only=False, maximum=None, hide_from_inventory=False, seasonal=False):
"""
admin:
Whether this sticker is only available for admins.
shop_filename:
Defaults to "name.png" if unspecified here.
purchasable:
Leave this as `None` and it will be determined from the cost and achievement status.
Set it to `False` to override this and force it to not be sold in the shop.
"""
if cost is not None:
cost = int(cost)
if value is None:
if cost is None or cost < 1:
value = 1
else:
value = math.sqrt(cost+1)
else:
value = float(value)
self._purchasable = purchasable
if purchasable and not cost:
raise ValueError('A sticker without a cost cannot be purchasable.')
if shop_filename is None:
shop_filename = u'{0}.png'.format(name)
self.type_id = type_id
self.name = name
self.value = value
self.preference = preference if preference is not None else type_id
self._is_limited = bool(limited or cost or maximum)
# not placeable anymore, but should still show up on existing posts.
self._is_unusable = unusable
# not placeable anymore nor should it show up on already stickered posts.
self.is_hidden = hidden
self.cost = cost
self.title = title
self.shop_filename = shop_filename
self.shop_text = shop_text
self.achievement = achievement
self.admin_only = admin_only
self.maximum = maximum
self.user_remaining = None
self.inventory_hash = None
if self.maximum:
self.inventory_hash = RedisKey("sticker:%s:remaining" % self.name)
self.seasonal = seasonal
self.hide_from_inventory = hide_from_inventory
@property
def active_seasonal(self):
return self in get_active_seasonal_stickers()
@property
def is_unusable(self):
if self.seasonal:
return not self.active_seasonal
else:
return self._is_unusable
@property
def is_limited(self):
if self.seasonal:
return self.active_seasonal
else:
return self._is_limited
def is_epic(self):
""" Recipients of Epic stickers get an exciting realtime notification. """
return self.cost >= knobs.EPIC_STICKER_COST_THRESHOLD
def is_star(self):
from django.conf import settings
if not hasattr(settings, 'STAR_STICKER_TYPE_ID'):
return False
return self.type_id == settings.STAR_STICKER_TYPE_ID
def is_usable(self, user):
"""
Whether this sticker can be used by the user. Takes several factors into account,
not just `Sticker.is_unusable`.
"""
return bool(self.cost
and not self.is_unusable
and not self.is_hidden
and (self.achievement is None or user.kv.achievements.by_id(self.achievement).get()))
def is_limited_inventory(self):
""" Whether this sticker has a limited number of units available. """
return self.maximum != None
def is_purchasable(self, user):
if self._purchasable is None:
if self.is_limited_inventory() and self.is_out_of_stock():
# Then it can be purchased if there are enough.
# Note that we do not check for whether the user has already bought the
# sticker here. We also do not check if the user can afford it.
# This logic is done in api.store_buy
return False
return bool(self.cost and (self.achievement is None
or user.kv.achievements.by_id(self.achievement).get()))
return self._purchasable
@property
def remaining(self):
""" Returns the number of stickers available. """
if self.inventory_hash:
# Was the value ever bootstrapped
if self.inventory_hash.get() == None:
self.inventory_hash.set(self.maximum)
return int(self.inventory_hash.get())
return None
def is_out_of_stock(self):
return self.remaining == 0
def decrement_inventory(self):
try:
return int(self.inventory_hash.decr())
except:
pass
def to_client(self, **kwargs):
keys = [
'type_id',
'name',
'value',
'preference',
'is_limited',
'is_unusable',
'is_hidden',
'cost',
'title',
'shop_filename',
'shop_text',
'achievement',
'admin_only',
'maximum',
'user_remaining',
]
return dict([(key, getattr(self, key)) for key in keys])
def sort_key(self, count):
score = count * (self.cost+1 if self.cost else 1)
return (score, int(self.is_limited), self.preference)
def __repr__(self):
return unicode(self.to_client())
_stickers = [
Sticker(0, 'dummy', value=0, unusable=True, hidden=True),
# Upvotes.
Sticker(1, 'smiley', preference=27),
Sticker(2, 'frowny', preference=22),
Sticker(3, 'monocle', preference=25),
Sticker(4, 'lol', preference=26),
Sticker(5, 'wtf', unusable=True),
Sticker(6, 'question', preference=20),
Sticker(7, 'num1', cost=1, limited=True, purchasable=False, hide_from_inventory=True),
Sticker(8, 'cookie', preference=21),
Sticker(9, 'heart', preference=24),
Sticker(10, 'wow', preference=23),
Sticker(11, 'empty', unusable=True),
# Seasonal.
Sticker(100, "note", seasonal=True),
Sticker(101, "texas", seasonal=True),
Sticker(102, "sxsw", seasonal=True),
Sticker(104, "monocle-sombrero", seasonal=True),
Sticker(105, "monocle-maracas", seasonal=True),
Sticker(106, "monocle-margarita", seasonal=True),
Sticker(107, "monocle-hungover", seasonal=True),
Sticker(108, "zalgo-black", seasonal=True),
Sticker(109, "america", seasonal=True),
Sticker(110, "andrewwk", seasonal=True),
Sticker(111, "partyhard", cost=300, purchasable=False, limited=True),
Sticker(112, "wow-tired", seasonal=True),
Sticker(113, "skull", seasonal=True),
Sticker(114, "cthulhu", seasonal=True),
Sticker(115, "jack-o-lantern", seasonal=True),
Sticker(116, "turkey", seasonal=True),
Sticker(117, "cornucopia", seasonal=True),
Sticker(118, "pumpkin-pie", seasonal=True),
Sticker(119, "gift", seasonal=True),
Sticker(120, "snowflake", seasonal=True),
Sticker(121, "rudolph", seasonal=True),
Sticker(122, "twothousandtwelve", seasonal=True),
Sticker(123, "champagne", seasonal=True),
Sticker(124, "mayan", seasonal=True),
Sticker(125, "sopa", seasonal=True),
Sticker(126, "lantern", seasonal=True),
Sticker(127, "cupcake", seasonal=True),
Sticker(128, "groundhog", seasonal=True),
Sticker(129, "teddy", seasonal=True),
Sticker(130, "valentine", seasonal=True),
Sticker(131, "forever-alone-seasonal", seasonal=True),
Sticker(132, "clover", seasonal=True),
Sticker(133, "green-beer", seasonal=True),
Sticker(134, "pot-o-gold", seasonal=True),
Sticker(135, "bill-fools1", seasonal=True),
Sticker(136, "bill-fools2", seasonal=True),
Sticker(137, "bill-fools3", seasonal=True),
Sticker(138, "bill-fools4", seasonal=True),
Sticker(139, "bill-fools5", seasonal=True),
Sticker(140, "bill-fools6", seasonal=True),
Sticker(141, "lobster", seasonal=True),
Sticker(142, "dave", seasonal=True),
Sticker(143, "egg-blue", seasonal=True),
Sticker(144, "egg-pink", seasonal=True),
Sticker(145, "egg-chocolate", seasonal=True),
Sticker(146, "jason", seasonal=True),
Sticker(147, "weed", seasonal=True),
Sticker(148, "bloodshot", seasonal=True),
Sticker(149, "joint", seasonal=True),
Sticker(150, "donut-strawberry", seasonal=True),
Sticker(151, "donut-chocolate", seasonal=True),
Sticker(152, "donut-glazed", seasonal=True),
Sticker(153, "usa-pin", seasonal=True),
Sticker(154, "usa-hat", seasonal=True),
Sticker(155, "usa-eagle", seasonal=True),
Sticker(156, "medal-gold", seasonal=True),
Sticker(157, "medal-silver", seasonal=True),
Sticker(158, "medal-copper", seasonal=True),
Sticker(159, "hurricane-sandy", seasonal=True),
Sticker(160, "slowpoke-pumpkin", seasonal=True),
Sticker(161, "slowpoke-ghost", seasonal=True),
# Inventory
Sticker(103, "banana", cost=5, title="Banana",
shop_text="A sticker you may one day earn. Until then, the sticker shall remain locked in here."),
Sticker(300, "nyancat", cost=25, title="Nyancat",
shop_text="nyan nyan nyan nyan <a href='http://www.prguitarman.com'>prguitarman</a> nyan nyan nyan nyan "
"nyan nyan nyan nyan nyan nyan nyan nyan nyan nyan nyan"),
Sticker(301, "number-oneocle", cost=100, title="Number Oneocle",
shop_filename="number-oneocle.gif",
shop_text="""For the post so classy it needs to be number one'd as well. "But wait," you say, "how is """
"""it double sided if it's a sticker?" Shhhhhh... it's best not to question the number """
"""oneocle."""),
Sticker(302, "fuckyeah", cost=150, title="Fuck Yeah",
shop_filename="fuckyeah.gif",
shop_text="FUCK YEAH!"),
Sticker(303, "cool", cost=5, title="Cool Guy",
shop_text="Sometimes someone makes a really cool post and only a pair of Raybans can express how you "
"feel."),
Sticker(304, "kawaii", cost=15, title="Kawaii",
shop_text="For something so cute that just a heart won't do :3"),
Sticker(305, "hipster", cost=20, achievement=0, title="Hipster",
shop_text="You probably haven't heard of this sticker, it's still underground."),
Sticker(306, "glove", cost=50, title="Glove of Power",
shop_text="I love the power glove. It's so BAD. But it's still pretty nerdy..."),
Sticker(307, "tacnayn", cost=25, title="Tacnayn", purchasable=False,
shop_text="Tacnayn, destroyer of worlds. Sowing death and destruction wherever he goes. The only thing "
"standing between Tacnayn and complete annihilation of the universe is his rainbowed enemy "
"Nyancat."),
Sticker(308, "super-lol", cost=30, title="Super LOL",
shop_text="For when it's so good that you just can't stop laughing."),
Sticker(309, "forever-alone", cost=10, title="Forever Alone",
shop_text="Only for the loneliest of posts."),
# Downvotes.
Sticker(500, 'stale', unusable=True, hidden=True),
Sticker(501, 'stop', unusable=True, hidden=True),
Sticker(502, 'poop', unusable=True, hidden=True),
# Note that there are two "downvote" stickers.
# This one is the sticker that gets applied to the comment (hence the -1)
# when someone uses the downvote action.
Sticker(503, 'downvote', value=-1, hidden=True),
# Sharing.
Sticker(2001, 'facebook', unusable=True),
Sticker(2002, 'twitter', unusable=True),
Sticker(2003, 'stumbleupon', unusable=True),
Sticker(2004, 'tumblr', unusable=True),
Sticker(2005, 'reddit', unusable=True),
Sticker(2006, 'email', unusable=True),
# Actions.
Sticker(3001, 'flag', unusable=True),
# This is a down vote action, not a sticker. Hence the "unusable" flag.
# When this action is applied to a comment, the comment gets a 503/downvote
# sticker.
Sticker(3002, 'downvote_action', unusable=True),
Sticker(3003, 'pin', unusable=True),
Sticker(3005, 'offtopic', unusable=True),
Sticker(3007, 'remix'),
Sticker(8902, 'curated', unusable=True, admin_only=True),
Sticker(8903, 'sticky', unusable=True, admin_only=True),
]
#
# DRAWQUEST
#
# Ugly hack.
if settings.PROJECT == 'drawquest':
from django.conf import settings
_stickers.append(Sticker(settings.STAR_STICKER_TYPE_ID, 'star'))
# Hashes for stickers by name and by id.
# This was we can look them up both ways.
_name_lookup = {}
_id_lookup = {}
def all_stickers():
return _id_lookup.values()
def add_sticker(sticker):
_id_lookup[sticker.type_id] = sticker
# Also index into stickers by name
_name_lookup[sticker.name] = sticker
def remove_sticker(sticker):
# This is used by tests only!!
del _id_lookup[sticker.type_id]
del _name_lookup[sticker.name]
map(add_sticker, _stickers)
### Mutually-exclusive lists.
primary_types = [_id_lookup[id] for id in [1, 2, 3, 4, 10, 6, 7, 8, 9]]
sharing_types = [_id_lookup[id] for id in [2001, 2002, 2004, 2003, 2005, 2006]]
downvote = _name_lookup.get("downvote")
# Actions that are available to everyone.
actions_base = [_name_lookup.get(sticker_name) for sticker_name in ["flag", "downvote_action", "pin"]]
actions_group_mod = [_name_lookup.get("offtopic")]
actions_staff = [_name_lookup.get("curated"), _name_lookup.get("sticky")]
# Actions that are predicated on a lab setting. get_actions looks for a sticker the action/sticker name
# (Sticker.details.get("name"))
labs_actions = []
def get(name_or_id):
if isinstance(name_or_id, Sticker):
return name_or_id
try:
# Assuming the typ_id is an int
sticker = _id_lookup[int(name_or_id)]
except (KeyError, TypeError, ValueError,):
sticker = _name_lookup.get(name_or_id)
if sticker is None:
sticker = _id_lookup[0]
#raise ValueError('No such sticker exists.')
return sticker
def get_purchasable(user):
""" Returns a list of `Sticker` instances that can be purchased by @user. """
return sorted(filter(lambda sticker: sticker.is_purchasable(user), all_stickers()),
key=lambda sticker: sticker.cost)
def get_inventory(user):
#TODO move to canvas_auth.models.User
sticks = sorted([sticker for sticker in all_stickers()
if sticker.is_usable(user) and not sticker.hide_from_inventory and user.kv.stickers[sticker.type_id].get()],
key=lambda sticker: sticker.cost)
# Andrew WK :]
if user.is_authenticated() and user.id == 56409:
sticks.append(get("partyhard"))
return sticks
def get_actions(user):
""" Returns a list of stickers. """
actions = actions_base[:]
if user.is_authenticated():
if user.is_staff:
actions += actions_staff
if user.userinfo.details()['moderatable']:
actions += actions_group_mod
# Grab stickers that are lab options
if labs_actions:
kv = user.redis.user_kv.hgetall()
for action in labs_actions:
if RedisHash.read_bool(kv, "labs:"+action.name):
actions.append(action)
return actions
def get_limited_stickers():
"""
Stickers that can be purchased.
Returns an iterator on Stickers.
"""
return filter(lambda sticker: sticker.is_limited or sticker.is_limited_inventory, all_stickers())
def get_managed_stickers():
""" Returns an iterator of Sticker(s) that have limited availability. """
for sticker in _stickers:
if sticker.is_limited and sticker.maximum:
yield sticker
def details_for(type_id=None, user=None, sticker=None):
if not sticker:
sticker = get(type_id)
if user and sticker.is_limited:
sticker.user_remaining = user.kv.stickers[sticker.type_id].get()
return sticker
def all_details(user=None):
stickers = all_stickers()
if user:
stickers += get_actions(user)
return dict([(sticker.type_id, details_for(user=user, sticker=sticker)) for sticker in stickers])
def sorted_counts(counts):
filtered_counts = filter(lambda (s,c): not s.is_hidden, counts.items())
return sorted(
filtered_counts,
key=lambda (stick, count): stick.sort_key(count),
reverse=True
)
class SeasonalEvent(object):
def __init__(self, name, start = 0, duration = 0, stickers = [], count = 0, grace_period = 2*60, enabled_locally=False):
"""
name:
The historically and globally unique name given to this season, to ensure sticks are delivered once and only once.
start:
a unixtime indicating the start time of the seasonal event, in the UTC timezone.
duration:
seconds indicating the duration of the event.
stickers:
a list of names or type_ids of stickers to give out
count:
the number of stickers to give the user
grace_period:
number of minutes the sticker is active after the time officially ends (time spent at blinking 00:00:00)
enabled_locally:
Ignores start and duration locally to allow easy testing
"""
self.name = name
self.start = start
self.duration = duration
self.stickers = [get(stick) for stick in stickers]
self.count = count
self.enabled_locally = enabled_locally
self.grace_period = grace_period
@property
def sticker_counts(self):
try:
return dict(zip(self.stickers, self.count))
except TypeError:
return dict((sticker, self.count) for sticker in self.stickers)
@property
def end(self):
return self.start + self.duration
@property
def active(self):
return self.start <= time.time() <= (self.end + self.grace_period) or (settings.LOCAL_SANDBOX and self.enabled_locally)
def to_client(self, **kwargs):
return {
'end_time': self.end,
}
_seasonal_events = [
SeasonalEvent(
"groundhog_wtf_redux",
start = 1332342980,
duration = 5 * 60,
grace_period = 2 * 60,
stickers = ["groundhog"],
count = 1,
),
SeasonalEvent(
"april_fools_crazy",
start = 1333246256,
duration = 12 * 60 * 60,
grace_period = 2 * 60,
stickers = ["bill-fools1","bill-fools2","bill-fools3","bill-fools4","bill-fools5","bill-fools6"],
count = 0,
),
SeasonalEvent(
"seafood_fools",
start = 1333295699,
duration = 12 * 60 * 60,
grace_period = 2 * 60,
stickers = ["lobster"],
count = 2,
),
SeasonalEvent(
"easter",
start = 1333890000,
duration = 24 * 60 * 60,
stickers = ["egg-pink", "egg-blue", "egg-chocolate"],
count = 3,
),
SeasonalEvent(
"friday_13_april_2012",
start = 1334307600,
duration = 24 * 60 * 60,
stickers = ["jason"],
count = 5,
),
SeasonalEvent(
"420",
start = 1334916000,
duration = 24 * 60 * 60,
stickers = ["weed", "bloodshot", "joint"],
count = 3,
),
SeasonalEvent(
"donut_day",
start = 1338544800,
duration = 24 * 60 * 60,
stickers = ["donut-strawberry", "donut-chocolate", "donut-glazed"],
count = 3,
),
SeasonalEvent(
"independence_day",
start = 1341399600,
duration = 24 * 60 * 60,
stickers = ["usa-pin", "usa-hat", "usa-eagle"],
count = 3,
),
SeasonalEvent(
"olympics",
start = 1344574800,
duration = 3 * 24 * 60 * 60,
stickers = ["medal-gold", "medal-silver", "medal-copper"],
count = [1, 3, 5],
),
SeasonalEvent(
"halloween",
start = 1351843200,
duration = 3 * 24 * 60 * 60,
stickers = ["hurricane-sandy", "slowpoke-pumpkin", "slowpoke-ghost"],
count = 5,
),
SeasonalEvent(
"thanksgiving",
start = 1353495600,
duration = 3 * 24 * 60 * 60,
stickers = ["turkey", "cornucopia", "pumpkin-pie"],
count = 5,
),
SeasonalEvent(
"dec212012",
start = 1356048000,
duration = 1 * 24 * 60 * 60,
stickers = ["mayan"],
count = 5,
),
SeasonalEvent(
"christmas",
start = 1356325200,
duration = 3 * 24 * 60 * 60,
stickers = ["gift", "snowflake", "rudolph"],
count = 5,
),
# In progress
SeasonalEvent(
"groundhog_day",
start = 1359799200,
duration = 1 * 24 * 60 * 60,
stickers = ["groundhog"],
count = 5,
),
]
def get_active_event():
for event in _seasonal_events:
if event.active:
return event
def get_active_seasonal_stickers():
event = get_active_event()
return event.stickers if event else []
|
drawquest/drawquest-web
|
website/canvas/stickers.py
|
Python
|
bsd-3-clause
| 21,635
|
[
"exciting"
] |
67dcfb86298b82d8086543b40aca11f39f38358c4a7e11f3ad453c5087dde66c
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.testing as npt
import pandas as pd
from scipy.spatial.distance import pdist
from unittest import TestCase, main
from skbio import OrdinationResults
from skbio.stats.ordination import ca
from skbio.util import get_data_path, assert_ordination_results_equal
def chi_square_distance(data_table, between_rows=True):
"""Computes the chi-square distance between two rows or columns of input.
It is a measure that has no upper limit, and it excludes double-zeros.
Parameters
----------
data_table : 2D array_like
An array_like object of shape (n, p). The input must be a
frequency table (so that the sum of all cells equals 1, and
all values are non-negative).
between_rows : bool (defaults to True)
Indicates whether distance is computed between rows (default)
or columns.
Returns
-------
Y : ndarray
Returns a condensed distance matrix. For each i and j (where
i<j<n), the chi square distance between u=X[i] and v=X[j] is
computed and stored in `Y[(n choose 2) - (n - i choose 2) + (j
- i - 1)]`.
See Also
--------
scipy.spatial.distance.squareform
References
----------
This coefficient appears in Legendre and Legendre (1998) as
formula 7.54 (as D_{16}). Another source is
http://www.springerreference.com/docs/html/chapterdbid/60817.html
"""
data_table = np.asarray(data_table, dtype=np.float64)
if not np.allclose(data_table.sum(), 1):
raise ValueError("Input is not a frequency table: if it is an"
" abundance table you could scale it as"
" `data_table / data_table.sum()`.")
if np.any(data_table < 0):
raise ValueError("A frequency table can't have negative values.")
# The distances are always computed between the rows of F
F = data_table if between_rows else data_table.T
row_sums = F.sum(axis=1, keepdims=True)
column_sums = F.sum(axis=0)
scaled_F = F / (row_sums * np.sqrt(column_sums))
return pdist(scaled_F, 'euclidean')
class TestChiSquareDistance(TestCase):
def test_errors(self):
a = np.array([[-0.5, 0],
[1, 0.5]])
with npt.assert_raises(ValueError):
chi_square_distance(a)
b = np.array([[0.5, 0],
[0.5, 0.1]])
with npt.assert_raises(ValueError):
chi_square_distance(b)
def test_results(self):
"""Some random numbers."""
a = np.array([[0.02808988764, 0.056179775281, 0.084269662921,
0.140449438202],
[0.01404494382, 0.196629213483, 0.109550561798,
0.033707865169],
[0.02808988764, 0.112359550562, 0.056179775281,
0.140449438202]])
dist = chi_square_distance(a)
expected = [0.91413919964333856,
0.33651110106124049,
0.75656884966269089]
npt.assert_almost_equal(dist, expected)
def test_results2(self):
"""A tiny example from Legendre & Legendre 1998, p. 285."""
a = np.array([[0, 1, 1],
[1, 0, 0],
[0, 4, 4]])
dist = chi_square_distance(a / a.sum())
# Note L&L used a terrible calculator because they got a wrong
# number (says it's 3.477) :(
expected = [3.4785054261852175, 0, 3.4785054261852175]
npt.assert_almost_equal(dist, expected)
class TestCAResults(TestCase):
def setUp(self):
"""Data from table 9.11 in Legendre & Legendre 1998."""
self.X = np.loadtxt(get_data_path('L&L_CA_data'))
self.sample_ids = ['Site1', 'Site2', 'Site3']
self.feature_ids = ['Species1', 'Species2', 'Species3']
self.pc_ids = ['CA1', 'CA2']
self.contingency = pd.DataFrame(self.X, self.sample_ids,
self.feature_ids)
def test_scaling2(self):
eigvals = pd.Series(np.array([0.09613302, 0.04094181]), self.pc_ids)
# p. 460 L&L 1998
features = pd.DataFrame(np.array([[0.40887, -0.06955], # F_hat
[-0.11539, 0.29977],
[-0.30997, -0.18739]]),
self.feature_ids,
self.pc_ids)
samples = pd.DataFrame(np.array([[-0.84896, -0.88276], # V_hat
[-0.22046, 1.34482],
[1.66697, -0.47032]]),
self.sample_ids,
self.pc_ids)
exp = OrdinationResults('CA', 'Correspondance Analysis',
eigvals=eigvals, features=features,
samples=samples)
scores = ca(self.contingency, 2)
assert_ordination_results_equal(exp, scores, decimal=5,
ignore_directionality=True)
def test_scaling1(self):
eigvals = pd.Series(np.array([0.09613302, 0.04094181]), self.pc_ids)
# p. 458
features = pd.DataFrame(np.array([[1.31871, -0.34374], # V
[-0.37215, 1.48150],
[-0.99972, -0.92612]]),
self.feature_ids,
self.pc_ids)
samples = pd.DataFrame(np.array([[-0.26322, -0.17862], # F
[-0.06835, 0.27211],
[0.51685, -0.09517]]),
self.sample_ids,
self.pc_ids)
exp = OrdinationResults('CA', 'Correspondance Analysis',
eigvals=eigvals, features=features,
samples=samples)
scores = ca(self.contingency, 1)
assert_ordination_results_equal(exp, scores, decimal=5,
ignore_directionality=True)
def test_maintain_chi_square_distance_scaling1(self):
"""In scaling 1, chi^2 distance among rows (samples) is equal to
euclidean distance between them in transformed space."""
frequencies = self.X / self.X.sum()
chi2_distances = chi_square_distance(frequencies)
transformed_sites = ca(self.contingency, 1).samples.values
euclidean_distances = pdist(transformed_sites, 'euclidean')
npt.assert_almost_equal(chi2_distances, euclidean_distances)
def test_maintain_chi_square_distance_scaling2(self):
"""In scaling 2, chi^2 distance among columns (features) is
equal to euclidean distance between them in transformed space."""
frequencies = self.X / self.X.sum()
chi2_distances = chi_square_distance(frequencies, between_rows=False)
transformed_species = ca(self.contingency, 2).features.values
euclidean_distances = pdist(transformed_species, 'euclidean')
npt.assert_almost_equal(chi2_distances, euclidean_distances)
class TestCAErrors(TestCase):
def setUp(self):
pass
def test_negative(self):
X = np.array([[1, 2], [-0.1, -2]])
with npt.assert_raises(ValueError):
ca(pd.DataFrame(X))
if __name__ == '__main__':
main()
|
xguse/scikit-bio
|
skbio/stats/ordination/tests/test_correspondence_analysis.py
|
Python
|
bsd-3-clause
| 7,852
|
[
"scikit-bio"
] |
956a6a58726b22358323e9cc249d822e0304d840627a575eaede9a07f814c4e4
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 15:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0103_visit_answerset'),
]
operations = [
migrations.RemoveField(
model_name='visit',
name='type',
),
]
|
koebbe/homeworks
|
visit/migrations/0104_remove_visit_type.py
|
Python
|
mit
| 384
|
[
"VisIt"
] |
f7e7082c26c68ca23cb67a7b3a078daf67349ff31600cb75bf10d5dcacd70520
|
# -*- coding: utf-8 -*-
#
##############################################################################
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Companyweb",
"version": "8.0.1.0.0",
"author": "ACSONE SA/NV,Odoo Community Association (OCA)",
"category": "Generic Modules/Accounting",
"website": "http://www.acsone.eu",
"depends": [
'account_financial_report_webkit',
'base_vat',
# TODO: account voucher is required
# for the test suite only
# (need to refactor the test suite)
'account_voucher',
],
'external_dependencies': {
'python': ['lxml', 'xlwt', 'xlrd'],
},
"description": """
Companyweb - Know who you are dealing with
==========================================
This module provides access to financial health information about Belgian
companies right from the OpenERP Customer form. Information is obtained
from the Companyweb database (www.companyweb.be).
You must be a Companyweb customer to use this module in production.
Please visit www.companyweb.be and use login 'cwacsone',
with password 'demo' to obtain test credentials.
Main Features
-------------
* Obtain crucial information about Belgian companies,
based on their VAT number: name, address,
credit limit, health barometer, financial informations
such as turnover or equity capital, and more.
* Update address and credit limit in your OpenERP database.
* Generate reports about payment habits of your customers.
* Access to detailed company information on www.companyweb.be.
Technical information
---------------------
This module depends on module account_financial_report_webkit which
provides an accurate algorithm for open invoices report.
Contributors
------------
* Stéphane Bidoul <stephane.bidoul@acsone.eu>
* Adrien Peiffer <adrien.peiffer@acsone.eu>
""",
"data": [
"wizard/account_companyweb_report_wizard_view.xml",
"wizard/account_companyweb_wizard_view.xml",
"view/res_config_view.xml",
"view/res_partner_view.xml",
],
"demo": [],
"license": "AGPL-3",
"installable": True,
}
|
acsone/l10n-belgium
|
account_companyweb/__openerp__.py
|
Python
|
agpl-3.0
| 2,962
|
[
"VisIt"
] |
07076977d05dc0d4c317bc10e01bd3cd783aec552c7a5e93094b8382c7c8c89a
|
import py
import pytest
import cfme
import subprocess
import sys
ROOT = py.path.local(cfme.__file__).dirpath()
MODULES = sorted(x for x in ROOT.visit("*.py") if 'test_' not in x.basename)
KNOWN_FAILURES = set(ROOT.dirpath().join(x) for x in[
'cfme/utils/ports.py', # module object
'cfme/utils/dockerbot/check_prs.py', # unprotected script
'cfme/utils/conf.py', # config object that replaces the module
'cfme/intelligence/rss.py', # import loops
'cfme/intelligence/chargeback/rates.py',
'cfme/intelligence/chargeback/assignments.py',
'cfme/intelligence/chargeback/__init__.py',
'cfme/fixtures/widgets.py',
'cfme/dashboard.py',
'cfme/configure/tasks.py',
])
@pytest.mark.parametrize('module_path', MODULES, ids=ROOT.dirpath().bestrelpath)
@pytest.mark.long_running
def test_import_own_module(module_path):
if module_path in KNOWN_FAILURES:
pytest.skip("{} is a known failed path".format(ROOT.dirpath().bestrelpath(module_path)))
subprocess.check_call(
[sys.executable, '-c',
'import sys, py;py.path.local(sys.argv[1]).pyimport()', str(module_path)])
|
anurag03/integration_tests
|
cfme/tests/test_modules_importable.py
|
Python
|
gpl-2.0
| 1,129
|
[
"VisIt"
] |
26d74a1423331e087529b99d15a7d51a17dae1e9e3d7b65e9fff126afc2209aa
|
#!/usr/bin/env python
#
# simple script to repeatedly publish an MQTT message
#
# uses the Python MQTT client from the Paho project
# http://eclipse.org/paho
#
# Andy Piper @andypiper http://andypiper.co.uk
#
# 2011/09/15 first version
# 2012/05/28 updated to use new pure python client from mosquitto
# 2014/02/03 updated to use the Paho client
#
# pip install paho-mqtt
# python blast.py
import paho.mqtt.client as paho
import os
import time
import sys
import random
random.seed()
# Create a client
mypid = os.getpid()
client_uniq = "pubclient_"+str(mypid)
mqttc = paho.Client(client_uniq, True) #clean session
# connect to broker
broker = "localhost"
port = 1883
mqttc.connect(broker, port, 60)
msg = "blue"
# remain connected and publish
while mqttc.loop() == 0:
mqttc.publish("vote", "!" + msg + "~", 0, False) #qos=0, retain=n
print "published: %s" % msg
time.sleep(random.uniform(0.1, 1))
pass
|
xively/ee_live_2014
|
python/pull-blue.py
|
Python
|
mit
| 920
|
[
"BLAST"
] |
899f0bf41a7d44022f530ba639821c29d0a81206084740a3813e7d02528a105f
|
#!/usr/bin/env python
# -*- coding: utf-8
"""This file contains Kegg related classes."""
import os
import shutil
import glob
import re
import copy
import statistics
import json
import time
import hashlib
import pandas as pd
import numpy as np
from scipy import stats
import anvio
import anvio.db as db
import anvio.utils as utils
import anvio.terminal as terminal
import anvio.filesnpaths as filesnpaths
import anvio.tables as t
import anvio.ccollections as ccollections
from anvio.errors import ConfigError
from anvio.drivers.hmmer import HMMer
from anvio.parsers import parser_modules
from anvio.tables.genefunctions import TableForGeneFunctions
from anvio.dbops import ContigsSuperclass, ContigsDatabase, ProfileSuperclass, ProfileDatabase
from anvio.genomedescriptions import MetagenomeDescriptions, GenomeDescriptions
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2020, the Meren Lab (http://merenlab.org/)"
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Iva Veseli"
__email__ = "iveseli@uchicago.edu"
run = terminal.Run()
progress = terminal.Progress()
run_quiet = terminal.Run(log_file_path=None, verbose=False)
progress_quiet = terminal.Progress(verbose=False)
pp = terminal.pretty_print
P = terminal.pluralize
"""Some critical constants for metabolism estimation output formatting."""
# dict containing possible output modes
# output_suffix should be unique to a mode so that multiple output modes can be used at once
# data_dict indicates which data dictionary is used for generating the output (modules or kofams)
# headers list describes which information to include in the output file; see OUTPUT_HEADERS dict below for more info
# description is what is printed when --list-available-modes parameter is used
OUTPUT_MODES = {'kofam_hits_in_modules': {
'output_suffix': "kofam_hits_in_modules.txt",
'data_dict': "modules",
'headers': ["unique_id", "kegg_module", "module_is_complete",
"module_completeness", "path_id", "path", "path_completeness",
"kofam_hit", "gene_caller_id", "contig"],
'description': "Information on each KOfam hit that belongs to a KEGG module"
},
'modules': {
'output_suffix': "modules.txt",
'data_dict': "modules",
'headers': ["unique_id", "kegg_module", "module_name", "module_class", "module_category",
"module_subcategory", "module_definition", "module_completeness", "module_is_complete",
"kofam_hits_in_module", "gene_caller_ids_in_module", "warnings"],
'description': "Information on KEGG modules"
},
'modules_custom': {
'output_suffix': "modules_custom.txt",
'data_dict': "modules",
'headers': None,
'description': "A custom tab-delimited output file where you choose the included KEGG modules data using --custom-output-headers"
},
'kofam_hits': {
'output_suffix': "kofam_hits.txt",
'data_dict': "kofams",
'headers': ["unique_id", "ko", "gene_caller_id", "contig", "modules_with_ko", "ko_definition"],
'description': "Information on all KOfam hits in the contigs DB, regardless of KEGG module membership"
},
}
# dict containing matrix headers of information that we can output in custom mode
# key corresponds to the header's key in output dictionary (returned from generate_output_dict_for_modules() function)
# cdict_key is the header's key in modules or kofams data dictionary (if any)
# mode_type indicates which category of output modes (modules or kofams) this header can be used for. If both, this is 'all'
# description is printed when --list-available-output-headers parameter is used
OUTPUT_HEADERS = {'unique_id' : {
'cdict_key': None,
'mode_type': 'all',
'description': "Just an integer that keeps our data organized. No real meaning here. Always included in output, so no need to specify it on the command line"
},
'kegg_module' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "KEGG module number"
},
'module_is_complete' : {
'cdict_key': 'complete',
'mode_type': 'modules',
'description': "Whether a KEGG module is considered complete or not based on its percent completeness and the completeness threshold"
},
'module_completeness' : {
'cdict_key': 'percent_complete',
'mode_type': 'modules',
'description': "Percent completeness of a KEGG module"
},
'module_name' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "English name/description of a KEGG module"
},
'module_class' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "Metabolism class of a KEGG module"
},
'module_category' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "Metabolism category of a KEGG module"
},
'module_subcategory' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "Metabolism subcategory of a KEGG module"
},
'module_definition' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "KEGG-formatted definition of a KEGG module. Describes the metabolic pathway "
"in terms of the KOS that belong to the module"
},
'module_substrates' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "Comma-separated list of compounds that serve as initial input to the metabolic pathway "
"(that is, substrate(s) to the initial reaction(s) in the pathway)"
},
'module_products' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "Comma-separated list of compounds that serve as final output from the metabolic pathway "
"(that is, product(s) of the final reaction(s) in the pathway)"
},
'module_intermediates' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "Comma-separated list of compounds that are intermediates the metabolic pathway "
"(compounds that are both outputs and inputs of reaction(s) in the pathway)"
},
'gene_caller_ids_in_module': {
'cdict_key': None,
'mode_type': 'modules',
'description': "Comma-separated list of gene caller IDs of KOfam hits in a module"
},
'gene_caller_id': {
'cdict_key': None,
'mode_type': 'all',
'description': "Gene caller ID of a single KOfam hit in the contigs DB. If you choose this header, each "
"line in the output file will be a KOfam hit"
},
'kofam_hits_in_module' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "Comma-separated list of KOfam hits in a module"
},
'kofam_hit' : {
'cdict_key': 'kofam_hits',
'mode_type': 'modules',
'description': "KO number of a single KOfam hit. If you choose this header, each line in the output file "
"will be a KOfam hit"
},
'contig' : {
'cdict_key': 'genes_to_contigs',
'mode_type': 'all',
'description': "Contig that a KOfam hit is found on. If you choose this header, each line in the output "
"file will be a KOfam hit"
},
'path_id' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "Integer ID for a path through a KEGG module. No real meaning and just for data organization. "
"If you choose this header, each line in the output file will be a KOfam hit"
},
'path' : {
'cdict_key': None,
'mode_type': 'modules',
'description': "A path through a KEGG module (a linear sequence of KOs that together represent each metabolic step "
"in the module. Most modules have several of these due to KO redundancy). If you choose this header, "
"each line in the output file will be a KOfam hit"
},
'path_completeness' : {
'cdict_key': 'pathway_completeness',
'mode_type': 'modules',
'description': "Percent completeness of a particular path through a KEGG module. If you choose this header, each line "
"in the output file will be a KOfam hit"
},
'warnings' : {
'cdict_key': 'warnings',
'mode_type': 'modules',
'description': "If we are missing a KOfam profile for one of the KOs in a module, there will be a note in this column. "
},
'ko' : {
'cdict_key': None,
'mode_type': 'kofams',
'description': 'KEGG Orthology (KO) number of a KOfam hit'
},
'modules_with_ko': {
'cdict_key': 'modules',
'mode_type': 'kofams',
'description': 'A comma-separated list of modules that the KO belongs to'
},
'ko_definition': {
'cdict_key': None,
'mode_type': 'kofams',
'description': 'The functional annotation associated with the KO number'
},
}
# global metadata header lists for matrix format
# if you want to add something here, don't forget to add it to the dictionary in the corresponding
# get_XXX_metadata_dictionary() function
MODULE_METADATA_HEADERS = ["module_name", "module_class", "module_category", "module_subcategory"]
KO_METADATA_HEADERS = ["ko_definition", "modules_with_ko"]
class KeggContext(object):
"""The purpose of this base class is to define shared functions and file paths for all KEGG operations."""
def __init__(self, args):
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
# default data directory will be called KEGG and will store the KEGG Module data as well
self.default_kegg_dir = os.path.join(os.path.dirname(anvio.__file__), 'data/misc/KEGG')
self.kegg_data_dir = A('kegg_data_dir') or self.default_kegg_dir
self.orphan_data_dir = os.path.join(self.kegg_data_dir, "orphan_data")
self.module_data_dir = os.path.join(self.kegg_data_dir, "modules")
self.hmm_data_dir = os.path.join(self.kegg_data_dir, "HMMs")
self.pathway_data_dir = os.path.join(self.kegg_data_dir, "pathways")
self.quiet = A('quiet') or False
self.just_do_it = A('just_do_it')
# shared variables for all KEGG subclasses
self.kofam_hmm_file_path = os.path.join(self.hmm_data_dir, "Kofam.hmm") # file containing concatenated KOfam hmms
self.ko_list_file_path = os.path.join(self.kegg_data_dir, "ko_list.txt")
self.kegg_module_file = os.path.join(self.kegg_data_dir, "modules.keg")
self.kegg_pathway_file = os.path.join(self.kegg_data_dir, "pathways.keg")
self.kegg_modules_db_path = os.path.join(self.kegg_data_dir, "MODULES.db")
# sanity check to prevent automatic overwriting of non-default kegg data dir
if A('reset') and A('kegg_data_dir'):
raise ConfigError("You are attempting to run KEGG setup on a non-default data directory (%s) using the --reset flag. "
"To avoid automatically deleting a directory that may be important to you, anvi'o refuses to reset "
"directories that have been specified with --kegg-data-dir. If you really want to get rid of this "
"directory and regenerate it with KEGG data inside, then please remove the directory yourself using "
"a command like `rm -r %s`. We are sorry to make you go through this extra trouble, but it really is "
"the safest way to handle things." % (self.kegg_data_dir, self.kegg_data_dir))
def setup_ko_dict(self):
"""The purpose of this function is to process the ko_list file into usable form by KEGG sub-classes.
The ko_list file (which is downloaded along with the KOfam HMM profiles) contains important
information for each KEGG Orthology number (KO, or knum), incuding pre-defined scoring thresholds
for limiting HMM hits and annotation information.
It looks something like this:
knum threshold score_type profile_type F-measure nseq nseq_used alen mlen eff_nseq re/pos definition
K00001 329.57 domain trim 0.231663 1473 1069 1798 371 17.12 0.590 alcohol dehydrogenase [EC:1.1.1.1]
Since this information is useful for both the setup process (we need to know all the knums) and HMM process,
all KEGG subclasses need to have access to this dictionary.
This is a dictionary (indexed by knum) of dictionaries(indexed by column name).
Here is an example of the dictionary structure:
self.ko_dict["K00001"]["threshold"] = 329.57
"""
self.ko_dict = utils.get_TAB_delimited_file_as_dictionary(self.ko_list_file_path)
self.ko_skip_list, self.ko_no_threshold_list = self.get_ko_skip_list()
# if we are currently setting up KEGG, we should generate a text file with the ko_list entries
# of the KOs that have no scoring threshold
if self.__class__.__name__ in ['KeggSetup']:
orphan_ko_dict = {ko:self.ko_dict[ko] for ko in self.ko_skip_list}
orphan_ko_dict.update({ko:self.ko_dict[ko] for ko in self.ko_no_threshold_list})
if not os.path.exists(self.orphan_data_dir): # should not happen but we check just in case
raise ConfigError("Hmm. Something is out of order. The orphan data directory %s does not exist "
"yet, but it needs to in order for the setup_ko_dict() function to work." % self.orphan_data_dir)
orphan_ko_path = os.path.join(self.orphan_data_dir, "01_ko_fams_with_no_threshold.txt")
orphan_ko_headers = ["threshold","score_type","profile_type","F-measure","nseq","nseq_used","alen","mlen","eff_nseq","re/pos", "definition"]
utils.store_dict_as_TAB_delimited_file(orphan_ko_dict, orphan_ko_path, key_header="knum", headers=orphan_ko_headers)
[self.ko_dict.pop(ko) for ko in self.ko_skip_list]
[self.ko_dict.pop(ko) for ko in self.ko_no_threshold_list]
def get_ko_skip_list(self):
"""The purpose of this function is to determine which KO numbers have no associated data or just no score threshold in the ko_list file.
That is, their ko_list entries look like this, with hypens in all but the first and last columns:
K14936 - - - - - - - - - - small nucleolar RNA snR191
K15035 - - - - - - - - - - transfer-messenger RNA
K15841 - - - - - - - - - - small regulatory RNA GlmY
K15851 - - - - - - - - - - quorum regulatory RNA Qrr
K16736 - - - - - - - - - - bantam
K16863 - - - - - - - - - - microRNA 21
These are RNAs.
Or, their ko_list entries look like this, with no score threshold (but the rest of the data is not completely blank):
K23749 - - - - 1 1 2266 2266 0.39 0.592 spectinabilin polyketide synthase system NorC [EC:2.3.1.290]
Returns:
skip_list list of strings, each string is a KO number that has no associated data (ie, RNAs)
no_threshold_list list of strings, each string is a KO number that has no scoring threshold
"""
col_names_to_check = ["threshold","score_type","profile_type","F-measure","nseq","nseq_used","alen","mlen","eff_nseq","re/pos"]
skip_list = []
no_threshold_list = []
for k in self.ko_dict.keys():
should_skip = True
no_threshold = False
for c in col_names_to_check:
if not self.ko_dict[k][c] == "-":
should_skip = False
break # here we stop checking this KO num because we already found a value in our columns of interest
if c == "threshold":
no_threshold = True # if we got to this line of code, there is a '-' in the threshold column
if should_skip: # should be True unless we found a value above
skip_list.append(k)
elif no_threshold:
no_threshold_list.append(k)
return skip_list, no_threshold_list
def get_module_metadata_dictionary(self, mnum):
"""Returns a dictionary of metadata for the given module.
The dictionary must include all the metadata from MODULE_METADATA_HEADERS,
using those headers as keys.
"""
mnum_class_dict = self.kegg_modules_db.get_kegg_module_class_dict(mnum, class_value=self.all_modules_in_db[mnum]['CLASS'])
metadata_dict = {}
metadata_dict["module_name"] = self.all_modules_in_db[mnum]['NAME']
metadata_dict["module_class"] = mnum_class_dict["class"]
metadata_dict["module_category"] = mnum_class_dict["category"]
metadata_dict["module_subcategory"] = mnum_class_dict["subcategory"]
return metadata_dict
def get_ko_metadata_dictionary(self, knum):
"""Returns a dictionary of metadata for the given KO.
The dictionary must include all the metadata from KO_METADATA_HEADERS,
using those headers as keys.
"""
mod_list = self.all_kos_in_db[knum] if knum in self.all_kos_in_db else None
if mod_list:
mod_list_str = ",".join(mod_list)
else:
mod_list_str = "None"
if knum not in self.ko_dict:
raise ConfigError("Something is mysteriously wrong. Your contigs database "
f"has an annotation for KO {knum} but this KO is not in "
"the KO dictionary. This should never have happened.")
metadata_dict = {}
metadata_dict["ko_definition"] = self.ko_dict[knum]['definition']
metadata_dict["modules_with_ko"] = mod_list_str
return metadata_dict
class KeggSetup(KeggContext):
"""Class for setting up KEGG Kofam HMM profiles and modules.
It performs sanity checks and downloads, unpacks, and prepares the profiles for later use by `hmmscan`.
It also downloads module files and creates the MODULES.db.
Parameters
==========
args: Namespace object
All the arguments supplied by user to anvi-setup-kegg-kofams
skip_init: Boolean
Developers can use this flag to skip the sanity checks and creation of directories when testing this class
"""
def __init__(self, args, run=run, progress=progress, skip_init=False):
self.args = args
self.run = run
self.progress = progress
self.kegg_archive_path = args.kegg_archive
self.download_from_kegg = True if args.download_from_kegg else False
self.kegg_snapshot = args.kegg_snapshot
if self.kegg_archive_path and self.download_from_kegg:
raise ConfigError("You provided two incompatible input options, --kegg-archive and --download-from-kegg. "
"Please pick either just one or none of these. ")
if self.kegg_snapshot and self.download_from_kegg or self.kegg_snapshot and self.kegg_archive_path:
raise ConfigError("You cannot request setup from an anvi'o KEGG snapshot at the same time as from KEGG directly or from one of your "
"KEGG archives. Please pick just one setup option and try again.")
# initializing this to None here so that it doesn't break things downstream
self.pathway_dict = None
# init the base class
KeggContext.__init__(self, self.args)
filesnpaths.is_program_exists('hmmpress')
# this is to avoid a strange os.path.dirname() bug that returns nothing if the input doesn't look like a path
if '/' not in self.kegg_data_dir:
self.kegg_data_dir += '/'
filesnpaths.is_output_dir_writable(os.path.dirname(self.kegg_data_dir))
if not args.reset and not anvio.DEBUG and not skip_init:
self.is_database_exists()
if self.download_from_kegg and not self.kegg_archive_path and not skip_init:
filesnpaths.gen_output_directory(self.kegg_data_dir, delete_if_exists=args.reset)
filesnpaths.gen_output_directory(self.hmm_data_dir, delete_if_exists=args.reset)
filesnpaths.gen_output_directory(self.orphan_data_dir, delete_if_exists=args.reset)
filesnpaths.gen_output_directory(self.module_data_dir, delete_if_exists=args.reset)
filesnpaths.gen_output_directory(self.pathway_data_dir, delete_if_exists=args.reset)
# get KEGG snapshot info for default setup
self.target_snapshot = self.kegg_snapshot or 'v2020-12-23'
self.target_snapshot_yaml = os.path.join(os.path.dirname(anvio.__file__), 'data/misc/KEGG-SNAPSHOTS.yaml')
self.snapshot_dict = utils.get_yaml_as_dict(self.target_snapshot_yaml)
if self.target_snapshot not in self.snapshot_dict.keys():
self.run.warning(None, header="AVAILABLE KEGG SNAPSHOTS", lc="yellow")
available_snapshots = sorted(list(self.snapshot_dict.keys()))
for snapshot_name in available_snapshots:
self.run.info_single(snapshot_name + (' (latest)' if snapshot_name == available_snapshots[-1] else ''))
raise ConfigError("Whoops. The KEGG snapshot you requested is not one that is known to anvi'o. Please try again, and "
"this time pick from the list shown above.")
# default download path for KEGG snapshot
self.default_kegg_data_url = self.snapshot_dict[self.target_snapshot]['url']
self.default_kegg_archive_file = self.snapshot_dict[self.target_snapshot]['archive_name']
# download from KEGG option: ftp path for HMM profiles and KO list
# for ko list, add /ko_list.gz to end of url
# for profiles, add /profiles.tar.gz to end of url
self.database_url = "ftp://ftp.genome.jp/pub/db/kofam"
# dictionary mapping downloaded file name to final decompressed file name or folder location
self.files = {'ko_list.gz': self.ko_list_file_path, 'profiles.tar.gz': self.kegg_data_dir}
# download from KEGG option: module/pathway map htext files and API link
self.kegg_module_download_path = "https://www.genome.jp/kegg-bin/download_htext?htext=ko00002.keg&format=htext&filedir="
self.kegg_pathway_download_path = "https://www.genome.jp/kegg-bin/download_htext?htext=br08901.keg&format=htext&filedir="
self.kegg_rest_api_get = "http://rest.kegg.jp/get"
def is_database_exists(self):
"""This function determines whether the user has already downloaded the Kofam HMM profiles and KEGG modules."""
if os.path.exists(self.kofam_hmm_file_path):
raise ConfigError("It seems you already have KOfam HMM profiles installed in '%s', please use the --reset flag "
"or delete this directory manually if you want to re-download it." % self.kegg_data_dir)
if os.path.exists(self.kegg_module_file):
raise ConfigError("Interestingly, though KOfam HMM profiles are not installed on your system, KEGG module "
"information seems to have been already downloaded in %s. Please use the --reset flag or "
"delete this directory manually to let this script re-download everything from scratch."
% self.kegg_data_dir)
if os.path.exists(self.kegg_pathway_file):
raise ConfigError("Interestingly, though KOfam HMM profiles are not installed on your system, KEGG pathway "
"information seems to have been already downloaded in %s. Please use the --reset flag or "
"delete this directory manually to let this script re-download everything from scratch."
% self.kegg_data_dir)
if os.path.exists(self.module_data_dir):
raise ConfigError("It seems the KEGG module directory %s already exists on your system. This is even more "
"strange because Kofam HMM profiles have not been downloaded. We suggest you to use the "
"--reset flag or delete the KEGG directory (%s) manually to download everything from scratch."
% (self.module_data_dir, self.kegg_data_dir))
if os.path.exists(self.pathway_data_dir):
raise ConfigError("It seems the KEGG pathway directory %s already exists on your system. This is even more "
"strange because Kofam HMM profiles have not been downloaded. We suggest you to use the "
"--reset flag or delete the KEGG directory (%s) manually to download everything from scratch."
% (self.pathway_data_dir, self.kegg_data_dir))
def download_profiles(self):
"""This function downloads the Kofam profiles."""
self.run.info("Kofam Profile Database URL", self.database_url)
try:
for file_name in self.files.keys():
utils.download_file(self.database_url + '/' + file_name,
os.path.join(self.kegg_data_dir, file_name), progress=self.progress, run=self.run)
except Exception as e:
print(e)
raise ConfigError("Anvi'o failed to download KEGG KOfam profiles from the KEGG website. Something "
"likely changed on the KEGG end. Please contact the developers to see if this is "
"a fixable issue. If it isn't, we may be able to provide you with a legacy KEGG "
"data archive that you can use to setup KEGG with the --kegg-archive flag.")
def process_module_file(self):
"""This function reads the kegg module file into a dictionary. It should be called during setup to get the KEGG module numbers so that KEGG modules can be downloaded.
The structure of this file is like this:
+D Module
#<h2><a href="/kegg/kegg2.html"><img src="/Fig/bget/kegg3.gif" align="middle" border=0></a> KEGG Modules</h2>
!
A<b>Pathway modules</b>
B
B <b>Carbohydrate metabolism</b>
C Central carbohydrate metabolism
D M00001 Glycolysis (Embden-Meyerhof pathway), glucose => pyruvate [PATH:map00010 map01200 map01100]
D M00002 Glycolysis, core module involving three-carbon compounds [PATH:map00010 map01200 map01230 map01100]
D M00003 Gluconeogenesis, oxaloacetate => fructose-6P [PATH:map00010 map00020 map01100]
In other words, a bunch of initial lines to be ignored, and thereafter the line's information can be determined by the one-letter code at the start.
A = Pathway modules (metabolic pathways) or signature modules (gene sets that indicate a phenotypic trait, ie toxins).
B = Category of module (a type of metabolism for pathway modules. For signature modules, either Gene Set or Module Set)
C = Sub-category of module
D = Module
"""
self.module_dict = {}
filesnpaths.is_file_exists(self.kegg_module_file)
filesnpaths.is_file_plain_text(self.kegg_module_file)
f = open(self.kegg_module_file, 'rU')
self.progress.new("Parsing KEGG Module file")
current_module_type = None
current_category = None
current_subcategory = None
for line in f.readlines():
line = line.strip('\n')
first_char = line[0]
# garbage lines
if first_char in ["+", "#", "!"]:
continue
else:
# module type
if first_char == "A":
fields = re.split('<[^>]*>', line) # we split by the html tag here
current_module_type = fields[1]
# Category
elif first_char == "B":
fields = re.split('<[^>]*>', line) # we split by the html tag here
if len(fields) == 1: # sometimes this level has lines with only a B
continue
current_category = fields[1]
# Sub-category
elif first_char == "C":
fields = re.split('\s{2,}', line) # don't want to split the subcategory name, so we have to split at least 2 spaces
current_subcategory = fields[1]
# module
elif first_char == "D":
fields = re.split('\s{2,}', line)
mnum = fields[1]
self.module_dict[mnum] = {"name" : fields[2], "type" : current_module_type, "category" : current_category, "subcategory" : current_subcategory}
# unknown code
else:
raise ConfigError("While parsing the KEGG file %s, we found an unknown line code %s. This has "
"made the file unparseable. It is likely that an update to KEGG has broken "
"things such that anvi'o doesn't know what is going on anymore. Sad, we know. :( "
"Please contact the developers to see if this is a fixable issue, and in the "
"meantime use an older version of the KEGG data directory (if you have one). "
"If we cannot fix it, we may be able to provide you with a legacy KEGG "
"data archive that you can use to setup KEGG with the --kegg-archive flag." % (self.kegg_module_file, first_char))
self.progress.end()
def process_pathway_file(self):
"""This function reads the kegg pathway map file into a dictionary. It should be called during setup to get the KEGG pathway ids so the pathways can be downloaded.
The structure of this file is like this:
+C Map number
#<h2><a href="/kegg/kegg2.html"><img src="/Fig/bget/kegg3.gif" align="middle" border=0></a> KEGG Pathway Maps</h2>
!
A<b>Metabolism</b>
B Global and overview maps
C 01100 Metabolic pathways
C 01110 Biosynthesis of secondary metabolites
C 01120 Microbial metabolism in diverse environments
C 01200 Carbon metabolism
C 01210 2-Oxocarboxylic acid metabolism
Initial lines can be ignored and thereafter the line's information can be determined by the one-letter code at the start.
A = Category of Pathway Map
B = Sub-category of Pathway Map
C = Pathway Map identifier number and name
Note that not all Pathway Maps that we download will have ORTHOLOGY fields. We don't exclude these here, but processing later
will have to be aware of the fact that not all pathways will have associated KOs.
We do, however, exclude Pathway Maps that don't have existing `koXXXXX` identifiers (these yield 404 errors when attempting to
download them). For instance, we exclude those that start with the code 010 (chemical structure maps) or with 07 (drug structure maps).
"""
self.pathway_dict = {}
filesnpaths.is_file_exists(self.kegg_pathway_file)
filesnpaths.is_file_plain_text(self.kegg_pathway_file)
f = open(self.kegg_pathway_file, 'rU')
self.progress.new("Parsing KEGG Pathway file")
current_category = None
current_subcategory = None
for line in f.readlines():
line = line.strip('\n')
first_char = line[0]
# garbage lines
if first_char in ["+", "#", "!"]:
continue
else:
# Category
if first_char == "A":
fields = re.split('<[^>]*>', line) # we split by the html tag here
current_category = fields[1]
# Sub-category
elif first_char == "B":
fields = re.split('\s{2,}', line) # don't want to split the subcategory name, so we have to split at least 2 spaces
current_subcategory = fields[1]
elif first_char == "C":
fields = re.split('\s{2,}', line)
konum = "ko" + fields[1]
if konum[:5] != "ko010" and konum[:4] != "ko07":
self.pathway_dict[konum] = {"name" : fields[2], "category" : current_category, "subcategory" : current_subcategory}
# unknown code
else:
raise ConfigError("While parsing the KEGG file %s, we found an unknown line code %s. This has "
"made the file unparseable. It is likely that an update to KEGG has broken "
"things such that anvi'o doesn't know what is going on anymore. Sad, we know. :( "
"Please contact the developers to see if this is a fixable issue, and in the "
"meantime use an older version of the KEGG data directory (if you have one). "
"If we cannot fix it, we may be able to provide you with a legacy KEGG "
"data archive that you can use to setup KEGG with the --kegg-archive flag." % (self.kegg_pathway_file, first_char))
self.progress.end()
def download_modules(self):
"""This function downloads the KEGG modules.
To do so, it also processes the KEGG module file into a dictionary via the process_module_file() function.
To verify that each file has been downloaded properly, we check that the last line is '///'.
"""
self.run.info("KEGG Module Database URL", self.kegg_rest_api_get)
# download the kegg module file, which lists all modules
try:
utils.download_file(self.kegg_module_download_path, self.kegg_module_file, progress=self.progress, run=self.run)
except Exception as e:
print(e)
raise ConfigError("Anvi'o failed to download the KEGG Module htext file from the KEGG website. Something "
"likely changed on the KEGG end. Please contact the developers to see if this is "
"a fixable issue. If it isn't, we may be able to provide you with a legacy KEGG "
"data archive that you can use to setup KEGG with the --kegg-archive flag.")
# get module dict
self.process_module_file()
self.run.info("Number of KEGG Modules", len(self.module_dict.keys()))
# download all modules
for mnum in self.module_dict.keys():
file_path = os.path.join(self.module_data_dir, mnum)
utils.download_file(self.kegg_rest_api_get + '/' + mnum,
file_path, progress=self.progress, run=self.run)
# verify entire file has been downloaded
f = open(file_path, 'rU')
f.seek(0, os.SEEK_END)
f.seek(f.tell() - 4, os.SEEK_SET)
last_line = f.readline().strip('\n')
if not last_line == '///':
raise ConfigError("The KEGG module file %s was not downloaded properly. We were expecting the last line in the file "
"to be '///', but instead it was %s. Formatting of these files may have changed on the KEGG website. "
"Please contact the developers to see if this is a fixable issue. If it isn't, we may be able to "
"provide you with a legacy KEGG data archive that you can use to setup KEGG with the --kegg-archive flag."
% (file_path, last_line))
def download_pathways(self):
"""This function downloads the KEGG Pathways.
To do so, it first processes a KEGG file containing pathway and map identifiers into a dictionary via the process_pathway_file()
function. To verify that each file has been downloaded properly, we check that the last line is '///'.
"""
# note that this is the same as the REST API for modules - perhaps at some point this should be printed elsewhere so we don't repeat ourselves.
self.run.info("KEGG Pathway Database URL", self.kegg_rest_api_get)
# download the kegg pathway file, which lists all modules
try:
utils.download_file(self.kegg_pathway_download_path, self.kegg_pathway_file, progress=self.progress, run=self.run)
except Exception as e:
print(e)
raise ConfigError("Anvi'o failed to download the KEGG Pathway htext file from the KEGG website. Something "
"likely changed on the KEGG end. Please contact the developers to see if this is "
"a fixable issue. If it isn't, we may be able to provide you with a legacy KEGG "
"data archive that you can use to setup KEGG with the --kegg-archive flag.")
# get pathway dict
self.process_pathway_file()
self.run.info("Number of KEGG Pathways", len(self.pathway_dict.keys()))
# download all pathways
for konum in self.pathway_dict.keys():
file_path = os.path.join(self.pathway_data_dir, konum)
utils.download_file(self.kegg_rest_api_get + '/' + konum,
file_path, progress=self.progress, run=self.run)
# verify entire file has been downloaded
f = open(file_path, 'rU')
f.seek(0, os.SEEK_END)
f.seek(f.tell() - 4, os.SEEK_SET)
last_line = f.readline().strip('\n')
if not last_line == '///':
raise ConfigError("The KEGG pathway file %s was not downloaded properly. We were expecting the last line in the file "
"to be '///', but instead it was %s. Formatting of these files may have changed on the KEGG website. "
"Please contact the developers to see if this is a fixable issue. If it isn't, we may be able to "
"provide you with a legacy KEGG data archive that you can use to setup KEGG with the --kegg-archive flag."
% (file_path, last_line))
def decompress_files(self):
"""This function decompresses the Kofam profiles."""
self.progress.new('Decompressing files')
for file_name in self.files.keys():
self.progress.update('Decompressing file %s' % file_name)
full_path = os.path.join(self.kegg_data_dir, file_name)
if full_path.endswith("tar.gz"):
utils.tar_extract_file(full_path, output_file_path=self.files[file_name], keep_original=False)
else:
utils.gzip_decompress_file(full_path, output_file_path=self.files[file_name], keep_original=False)
self.progress.update("File decompressed. Yay.")
self.progress.end()
def confirm_downloaded_profiles(self):
"""This function verifies that all Kofam profiles have been properly downloaded.
It is intended to be run after the files have been decompressed. The profiles directory should contain hmm files from K00001.hmm to
K23763.hmm with some exceptions; all KO numbers from ko_list file (except those in ko_skip_list) should be included.
"""
ko_nums = self.ko_dict.keys()
for k in ko_nums:
if k not in self.ko_skip_list:
hmm_path = os.path.join(self.kegg_data_dir, "profiles/%s.hmm" % k)
if not os.path.exists(hmm_path):
raise ConfigError("The KOfam HMM profile at %s does not exist. This probably means that something went wrong "
"while downloading the KOfam database. Please run `anvi-setup-kegg-kofams` with the --reset "
"flag. If that still doesn't work, please contact the developers to see if the issue is fixable. "
"If it isn't, we may be able to provide you with a legacy KEGG data archive that you can use to "
"setup KEGG with the --kegg-archive flag." % (hmm_path))
def move_orphan_files(self):
"""This function moves the following to the orphan files directory:
- profiles that do not have ko_list entries
- profiles whose ko_list entries have no scoring threshold (in ko_no_threshold_list)
And, the following profiles should not have been downloaded, but if they were then we move them, too:
- profiles whose ko_list entries have no data at all (in ko_skip_list)
"""
if not os.path.exists(self.orphan_data_dir): # should not happen but we check just in case
raise ConfigError("Hmm. Something is out of order. The orphan data directory %s does not exist "
"yet, but it needs to in order for the move_orphan_files() function to work." % self.orphan_data_dir)
no_kofam_path = os.path.join(self.orphan_data_dir, "00_hmm_profiles_with_no_ko_fams.hmm")
no_kofam_file_list = []
no_threshold_path = os.path.join(self.orphan_data_dir, "02_hmm_profiles_with_ko_fams_with_no_threshold.hmm")
no_threshold_file_list = []
no_data_path = os.path.join(self.orphan_data_dir, "03_hmm_profiles_with_ko_fams_with_no_data.hmm")
no_data_file_list = []
hmm_list = [k for k in glob.glob(os.path.join(self.kegg_data_dir, 'profiles/*.hmm'))]
for hmm_file in hmm_list:
ko = re.search('profiles/(K\d{5})\.hmm', hmm_file).group(1)
if ko not in self.ko_dict.keys():
if ko in self.ko_no_threshold_list:
no_threshold_file_list.append(hmm_file)
elif ko in self.ko_skip_list: # these should not have been downloaded, but if they were we will move them
no_data_file_list.append(hmm_file)
else:
no_kofam_file_list.append(hmm_file)
# now we concatenate the orphan KO hmms into the orphan data directory
if no_kofam_file_list:
utils.concatenate_files(no_kofam_path, no_kofam_file_list, remove_concatenated_files=True)
self.progress.reset()
self.run.warning("Please note that while anvi'o was building your databases, she found %d "
"HMM profiles that did not have any matching KOfam entries. We have removed those HMM "
"profiles from the final database. You can find them under the directory '%s'."
% (len(no_kofam_file_list), self.orphan_data_dir))
if no_threshold_file_list:
utils.concatenate_files(no_threshold_path, no_threshold_file_list, remove_concatenated_files=True)
self.progress.reset()
self.run.warning("Please note that while anvi'o was building your databases, she found %d "
"KOfam entries that did not have any threshold to remove weak hits. We have removed those HMM "
"profiles from the final database. You can find them under the directory '%s'."
% (len(no_threshold_file_list), self.orphan_data_dir))
if no_data_file_list:
utils.concatenate_files(no_data_path, no_data_file_list, remove_concatenated_files=True)
self.progress.reset()
self.run.warning("Please note that while anvi'o was building your databases, she found %d "
"HMM profiles that did not have any associated data (besides an annotation) in their KOfam entries. "
"We have removed those HMM profiles from the final database. You can find them under the directory '%s'."
% (len(no_data_file_list), self.orphan_data_dir))
def run_hmmpress(self):
"""This function concatenates the Kofam profiles and runs hmmpress on them."""
self.progress.new('Preparing Kofam HMM Profiles')
self.progress.update('Verifying the Kofam directory %s contains all HMM profiles' % self.kegg_data_dir)
self.confirm_downloaded_profiles()
self.progress.update('Handling orphan files')
self.move_orphan_files()
self.progress.update('Concatenating HMM profiles into one file...')
hmm_list = [k for k in glob.glob(os.path.join(self.kegg_data_dir, 'profiles/*.hmm'))]
utils.concatenate_files(self.kofam_hmm_file_path, hmm_list, remove_concatenated_files=False)
# there is no reason to keep the original HMM profiles around, unless we are debugging
if not anvio.DEBUG:
shutil.rmtree((os.path.join(self.kegg_data_dir, "profiles")))
self.progress.update('Running hmmpress...')
cmd_line = ['hmmpress', self.kofam_hmm_file_path]
log_file_path = os.path.join(self.hmm_data_dir, '00_hmmpress_log.txt')
ret_val = utils.run_command(cmd_line, log_file_path)
if ret_val:
raise ConfigError("Hmm. There was an error while running `hmmpress` on the Kofam HMM profiles. "
"Check out the log file ('%s') to see what went wrong." % (log_file_path))
else:
# getting rid of the log file because hmmpress was successful
os.remove(log_file_path)
self.progress.end()
def setup_modules_db(self):
"""This function creates the Modules DB from the KEGG Module files."""
try:
mod_db = KeggModulesDatabase(self.kegg_modules_db_path, args=self.args, module_dictionary=self.module_dict, pathway_dictionary=self.pathway_dict, run=run, progress=progress)
mod_db.create()
except Exception as e:
print(e)
raise ConfigError("While attempting to build the MODULES.db, anvi'o encountered an error, which should be printed above. "
"If you look at that error and it seems like something you cannot handle, please contact the developers "
"for assistance, as it may be possible that a recent update to KEGG has broken our setup process. If that "
"is the case, we may be able to provide you with a legacy KEGG data archive that you can use to set up "
"KEGG with the --kegg-archive flag, while we try to fix things. :) ")
def kegg_archive_is_ok(self, unpacked_archive_path):
"""This function checks the structure and contents of an unpacked KEGG archive and returns True if it is as expected.
Please note that we check for existence of the files that are necessary to run KEGG scripts, but we don't check the file
formats. This means that people could technically trick this function into returning True by putting a bunch of crappy files
with the right names/paths into the archive file. But what would be the point of that?
We also don't care about the contents of certain folders (ie modules) because they are not being directly used
when running KEGG scripts. In the case of modules, all the information should already be in the MODULES.db so we don't
waste our time checking that all the module files are there. We only check that the directory is there. If later changes
to the implementation require the direct use of the files in these folders, then this function should be updated
to check for those.
"""
is_ok = True
# check top-level files and folders
path_to_kegg_in_archive = os.path.join(unpacked_archive_path, "KEGG")
expected_directories_and_files = [self.orphan_data_dir,
self.module_data_dir,
self.hmm_data_dir,
#self.pathway_data_dir, #TODO: uncomment me when we start incorporating pathways
self.ko_list_file_path,
self.kegg_module_file,
#self.kegg_pathway_file, #TODO: uncomment me when we start incorporating pathways
self.kegg_modules_db_path]
for f in expected_directories_and_files:
path_to_f_in_archive = os.path.join(path_to_kegg_in_archive, os.path.basename(f))
if not os.path.exists(path_to_f_in_archive):
is_ok = False
if anvio.DEBUG:
self.run.warning("The KEGG archive does not contain the folllowing expected file or directory: %s"
% (path_to_f_in_archive))
# check hmm files
path_to_hmms_in_archive = os.path.join(path_to_kegg_in_archive, os.path.basename(self.hmm_data_dir))
kofam_hmm_basename = os.path.basename(self.kofam_hmm_file_path)
expected_hmm_files = [kofam_hmm_basename]
for h in expected_hmm_files:
path_to_h_in_archive = os.path.join(path_to_hmms_in_archive, h)
if not os.path.exists(path_to_h_in_archive):
is_ok = False
if anvio.DEBUG:
self.run.warning("The KEGG archive does not contain the folllowing expected hmm file: %s"
% (path_to_h_in_archive))
expected_extensions = ['.h3f', '.h3i', '.h3m', '.h3p']
for ext in expected_extensions:
path_to_expected_hmmpress_file = path_to_h_in_archive + ext
if not os.path.exists(path_to_expected_hmmpress_file):
is_ok = False
if anvio.DEBUG:
self.run.warning("The KEGG archive does not contain the folllowing expected `hmmpress` output: %s"
% (path_to_expected_hmmpress_file))
return is_ok
def check_modules_db_version(self):
"""This function checks if the MODULES.db is out of date and if so warns the user to migrate it"""
# get current version of db
db_conn = db.DB(self.kegg_modules_db_path, None, ignore_version=True)
current_db_version = int(db_conn.get_meta_value('version'))
db_conn.disconnect()
# if modules.db is out of date, give warning
target_version = int(anvio.tables.versions_for_db_types['modules'])
if current_db_version != target_version:
self.run.warning(f"Just so you know, the KEGG archive that was just set up contains an outdated MODULES.db (version: "
"{current_db_version}). You may want to run `anvi-migrate` on this database before you do anything else. "
f"Here is the path to the database: {self.kegg_modules_db_path}")
def setup_from_archive(self):
"""This function sets up the KEGG data directory from an archive of a previously-setup KEGG data directory.
To do so, it unpacks the archive and checks its structure and that all required components are there.
"""
self.run.info("KEGG archive", self.kegg_archive_path)
self.progress.new('Unzipping KEGG archive file...')
if not self.kegg_archive_path.endswith("tar.gz"):
self.progress.reset()
raise ConfigError("The provided archive file %s does not appear to be an archive at all. Perhaps you passed "
"the wrong file to anvi'o?" % (self.kegg_archive_path))
unpacked_archive_name = "KEGG_archive_unpacked"
utils.tar_extract_file(self.kegg_archive_path, output_file_path=unpacked_archive_name, keep_original=True)
self.progress.update('Checking KEGG archive structure and contents...')
archive_is_ok = self.kegg_archive_is_ok(unpacked_archive_name)
self.progress.end()
if archive_is_ok:
if os.path.exists(self.kegg_data_dir) and self.kegg_data_dir != self.default_kegg_dir:
raise ConfigError("You are attempting to set up KEGG from a KEGG data archive in a non-default data directory (%s) which already exists. "
"To avoid automatically deleting a directory that may be important to you, anvi'o refuses to get rid of "
"directories that have been specified with --kegg-data-dir. If you really want to get rid of this "
"directory and replace it with the KEGG archive data, then please remove the directory yourself using "
"a command like `rm -r %s`. We are sorry to make you go through this extra trouble, but it really is "
"the safest way to handle things." % (self.kegg_data_dir, self.kegg_data_dir))
elif os.path.exists(self.kegg_data_dir):
shutil.rmtree(self.kegg_data_dir)
path_to_kegg_in_archive = os.path.join(unpacked_archive_name, "KEGG")
shutil.move(path_to_kegg_in_archive, self.kegg_data_dir)
shutil.rmtree(unpacked_archive_name)
# if necessary, warn user about migrating the modules db
self.check_modules_db_version()
else:
debug_output = "We kept the unpacked archive for you to take a look at it. It is at %s and you may want " \
"to delete it after you are done checking its contents." % os.path.abspath(unpacked_archive_name)
if not anvio.DEBUG:
shutil.rmtree(unpacked_archive_name)
debug_output = "The unpacked archive has been deleted, but you can re-run the script with the --debug " \
"flag to keep it if you want to see its contents."
else:
self.run.warning("The unpacked archive file %s was kept for debugging purposes. You may want to "
"clean it up after you are done looking through it." % (os.path.abspath(unpacked_archive_name)))
raise ConfigError("The provided archive file %s does not appear to be a KEGG data directory, so anvi'o is unable "
"to use it. %s" % (self.kegg_archive_path, debug_output))
def setup_kegg_snapshot(self):
"""This is the default setup strategy in which we unpack a specific KEGG archive.
We do this so that everyone who uses the same release of anvi'o will also have the same default KEGG
data, which facilitates sharing and also means they do not have to continuously re-annotate their datasets
when KEGG is updated.
It is essentially a special case of setting up from an archive.
"""
if anvio.DEBUG:
self.run.info("Downloading from: ", self.default_kegg_data_url)
self.run.info("Downloading to: ", self.default_kegg_archive_file)
utils.download_file(self.default_kegg_data_url, self.default_kegg_archive_file, progress=self.progress, run=self.run)
# a hack so we can use the archive setup function
self.kegg_archive_path = self.default_kegg_archive_file
self.setup_from_archive()
# if all went well, let's get rid of the archive we used and the log file
if not anvio.DEBUG:
os.remove(self.default_kegg_archive_file)
else:
self.run.warning(f"Because you used the --debug flag, the KEGG archive file at {self.default_kegg_archive_file} "
"has been kept. You may want to remove it later.")
def setup_data(self):
"""This is a driver function which executes the KEGG setup process.
It downloads, decompresses, and hmmpresses the KOfam profiles.
It also downloads and processes the KEGG Module files into the MODULES.db.
"""
if self.kegg_archive_path:
self.setup_from_archive()
elif self.download_from_kegg:
# mostly for developers and the adventurous
self.download_profiles()
self.decompress_files()
self.download_modules()
#self.download_pathways() # This is commented out because we do not do anything with pathways downstream, but we will in the future.
self.setup_ko_dict()
self.run_hmmpress()
self.setup_modules_db()
else:
# the default, set up from frozen KEGG release
self.setup_kegg_snapshot()
class RunKOfams(KeggContext):
"""Class for running `hmmscan` against the KOfam database and adding the resulting hits to contigs DB for later metabolism prediction.
Parameters
==========
args: Namespace object
All the arguments supplied by user to anvi-run-kegg-kofams
"""
def __init__(self, args, run=run, progress=progress):
self.args = args
self.run = run
self.progress = progress
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.contigs_db_path = A('contigs_db')
self.num_threads = A('num_threads')
self.hmm_program = A('hmmer_program') or 'hmmsearch'
self.keep_all_hits = True if A('keep_all_hits') else False
self.log_bitscores = True if A('log_bitscores') else False
self.skip_bitscore_heuristic = True if A('skip_bitscore_heuristic') else False
self.bitscore_heuristic_e_value = A('heuristic_e_value')
self.bitscore_heuristic_bitscore_fraction = A('heuristic_bitscore_fraction')
self.ko_dict = None # should be set up by setup_ko_dict()
# init the base class
KeggContext.__init__(self, self.args)
filesnpaths.is_program_exists(self.hmm_program)
# verify that Kofam HMM profiles have been set up
if not os.path.exists(self.kofam_hmm_file_path):
raise ConfigError("Anvi'o is unable to find the Kofam.hmm file at %s. This can happen one of two ways. Either you "
"didn't specify the correct KEGG data directory using the flag --kegg-data-dir, or you haven't "
"yet set up the Kofam data by running `anvi-setup-kegg-kofams`. Hopefully you now know what to do "
"to fix this problem. :) " % self.hmm_data_dir)
utils.is_contigs_db(self.contigs_db_path)
self.setup_ko_dict() # read the ko_list file into self.ko_dict
# load existing kegg modules db
self.kegg_modules_db = KeggModulesDatabase(self.kegg_modules_db_path, args=self.args)
# reminder to be a good citizen
self.run.warning("Anvi'o will annotate your database with the KEGG KOfam database, as described in "
"Aramaki et al (doi:10.1093/bioinformatics/btz859) When you publish your findings, "
"please do not forget to properly credit this work.", lc='green', header="CITATION")
def check_hash_in_contigs_db(self):
"""Checks the contigs DB self table to make sure it was not already annotated"""
A = lambda x: self.args.__dict__[x] if x in self.args.__dict__ else None
self.contigs_db_path = A('contigs_db')
contigs_db = ContigsDatabase(self.contigs_db_path)
current_module_hash_in_contigs_db = contigs_db.db.get_meta_value('modules_db_hash', return_none_if_not_in_table=True)
if current_module_hash_in_contigs_db and not self.just_do_it:
contigs_db.disconnect()
raise ConfigError("The contigs database (%s) has already been annotated with KOfam hits. If you really want to "
"overwrite these annotations with new ones, please re-run the command with the flag --just-do-it. "
"For those who need this information, the Modules DB used to annotate this contigs database previously "
"had the following hash: %s" % (self.contigs_db_path, current_module_hash_in_contigs_db))
def set_hash_in_contigs_db(self):
"""Modifies the contigs DB self table to indicate which MODULES.db has been used to annotate it."""
A = lambda x: self.args.__dict__[x] if x in self.args.__dict__ else None
self.contigs_db_path = A('contigs_db')
contigs_db = ContigsDatabase(self.contigs_db_path)
contigs_db.db.set_meta_value('modules_db_hash', self.kegg_modules_db.db.get_meta_value('hash'))
contigs_db.disconnect()
def get_annotation_from_ko_dict(self, knum, ok_if_missing_from_dict=False):
"""Returns the functional annotation of the provided KO number.
Parameters
==========
knum : str
The KO number for which to get an annotation for
ok_if_missing_from_dict : bool
If false, not finding the KO will raise an error. If true, the function will quietly return an "Unknown" annotation string for the missing KO
Returns
=======
annotation : str
"""
if not self.ko_dict:
raise ConfigError("Oops! The ko_list file has not been properly loaded, so get_annotation_from_ko_dict() is "
"extremely displeased and unable to function properly. Please refrain from calling this "
"function until after setup_ko_dict() has been called.")
if not knum in self.ko_dict:
if ok_if_missing_from_dict:
return "Unknown function with KO num %s" % knum
else:
raise ConfigError("It seems %s found a KO number that does not exist "
"in the KOfam ko_list file: %s" % (self.hmm_program, knum))
return self.ko_dict[knum]['definition']
def parse_kofam_hits(self, hits_dict):
"""This function applies bitscore thresholding (if requested) to establish the self.functions_dict
which can then be used to store annotations in the contigs DB.
If self.keep_all_hits is True, all hits will be added to the self.functions_dict regardless of bitscore
threshold.
Note that the input hits_dict contains bitscores, but the self.functions_dict does not (because the DB
tables do not have a column for it, at least at the time of writing this).
PARAMETERS
===========
hits_dict : dictionary
The output from the hmmsearch parser, which should contain all hits (ie, weak hits not yet removed)
RETURNS
========
counter : int
The number of functions added to self.functions_dict. Useful for downstream functions that want to
add to this dictionary, since it is the next available integer key.
"""
total_num_hits = len(hits_dict.values())
self.progress.new("Parsing KOfam hits", progress_total_items=total_num_hits)
self.functions_dict = {}
self.kegg_module_names_dict = {}
self.kegg_module_classes_dict = {}
self.gcids_to_hits_dict = {}
self.gcids_to_functions_dict = {}
counter = 0
num_hits_removed = 0
cur_num_hit = 0
for hit_key,hmm_hit in hits_dict.items():
cur_num_hit += 1
knum = hmm_hit['gene_name']
gcid = hmm_hit['gene_callers_id']
keep = False
if cur_num_hit % 1000 == 0:
self.progress.update("Removing weak hits [%d of %d KOs]" % (cur_num_hit, total_num_hits))
self.progress.increment(increment_to=cur_num_hit)
# later, we will need to quickly access the hits for each gene call. So we map gcids to the keys in the raw hits dictionary
if gcid not in self.gcids_to_hits_dict:
self.gcids_to_hits_dict[gcid] = [hit_key]
else:
self.gcids_to_hits_dict[gcid].append(hit_key)
if knum not in self.ko_dict:
self.progress.reset()
raise ConfigError("Something went wrong while parsing the KOfam HMM hits. It seems that KO "
f"{knum} is not in the noise cutoff dictionary for KOs. That means we do "
"not know how to distinguish strong hits from weak ones for this KO. "
"Anvi'o will fail now :( Please contact a developer about this error to "
"get this mess fixed. ")
# if hit is above the bitscore threshold, we will keep it
if self.ko_dict[knum]['score_type'] == 'domain':
if hmm_hit['domain_bit_score'] >= float(self.ko_dict[knum]['threshold']):
keep = True
elif self.ko_dict[knum]['score_type'] == 'full':
if hmm_hit['bit_score'] >= float(self.ko_dict[knum]['threshold']):
keep = True
else:
self.progress.reset()
raise ConfigError(f"The KO noise cutoff dictionary for {knum} has a strange score type which "
f"is unknown to anvi'o: {self.ko_dict[knum]['score_type']}")
if keep or self.keep_all_hits:
self.functions_dict[counter] = {
'gene_callers_id': gcid,
'source': 'KOfam',
'accession': knum,
'function': self.get_annotation_from_ko_dict(knum, ok_if_missing_from_dict=True),
'e_value': hmm_hit['e_value'],
}
# later, we will need to know if a particular gene call has hits or not. So here we are just saving for each
# gene caller id the keys for its corresponding hits in the function dictionary.
if gcid not in self.gcids_to_functions_dict:
self.gcids_to_functions_dict[gcid] = [counter]
else:
self.gcids_to_functions_dict[gcid].append(counter)
# add associated KEGG module information to database
mods = self.kegg_modules_db.get_modules_for_knum(knum)
names = self.kegg_modules_db.get_module_names_for_knum(knum)
classes = self.kegg_modules_db.get_module_classes_for_knum_as_list(knum)
if mods:
mod_annotation = "!!!".join(mods)
mod_class_annotation = "!!!".join(classes) # why do we split by '!!!'? Because that is how it is done in COGs. So so sorry. :'(
mod_name_annotation = ""
for mod in mods:
if mod_name_annotation:
mod_name_annotation += "!!!" + names[mod]
else:
mod_name_annotation = names[mod]
self.kegg_module_names_dict[counter] = {
'gene_callers_id': gcid,
'source': 'KEGG_Module',
'accession': mod_annotation,
'function': mod_name_annotation,
'e_value': None,
}
self.kegg_module_classes_dict[counter] = {
'gene_callers_id': gcid,
'source': 'KEGG_Class',
'accession': mod_annotation,
'function': mod_class_annotation,
'e_value': None,
}
counter += 1
else:
num_hits_removed += 1
self.progress.end()
self.run.info("Number of weak hits removed by KOfam parser", num_hits_removed)
self.run.info("Number of hits remaining in annotation dict ", len(self.functions_dict.keys()))
return counter
def update_dict_for_genes_with_missing_annotations(self, gcids_list, hits_dict, next_key):
"""This function adds functional annotations for genes with missing hits to the dictionary.
The reason this is necessary is that the bitscore thresholds can be too stringent, causing
us to miss legitimate annotations. To find these annotations, we adopt the following heuristic:
For every gene without a KOfam annotation, we examine all the hits with an e-value below X
and a bitscore above Y% of the threshold. If those hits are all to a unique KO profile,
then we annotate the gene call with that KO.
X is self.bitscore_heuristic_e_value, Y is self.bitscore_heuristic_bitscore_fraction
For reasons that are hopefully obvious, this function must be called after parse_kofam_hits(),
which establishes the self.functions_dict attribute.
PARAMETERS
===========
gcids_list : list
The list of gene caller ids in the contigs database. We will use this to figure out which
genes have no annotations
hits_dict : dictionary
The output from the hmmsearch parser, which should contain all hits (ie, weak hits not yet removed)
next_key : int
The next integer key that is available for adding functions to self.functions_dict
"""
self.run.warning("Anvi'o will now re-visit genes without KOfam annotations to see if potentially valid "
"functional annotations were missed. These genes will be annotated with a KO only if "
f"all KOfam hits to this gene with e-value <= {self.bitscore_heuristic_e_value} and bitscore > "
f"({self.bitscore_heuristic_bitscore_fraction} * KEGG threshold) are hits to the same KO. Just "
"so you know what is going on here. If this sounds like A Very Bad Idea to you, then please "
"feel free to turn off this behavior with the flag --skip-bitscore-heuristic or to change "
"the e-value/bitscore parameters (see the help page for more info).")
num_annotations_added = 0
total_num_genes = len(gcids_list)
self.progress.new("Relaxing bitscore threshold", progress_total_items=total_num_genes)
# for each gene call, check for annotation in self.functions_dict
current_gene_num = 0
for gcid in gcids_list:
current_gene_num += 1
if current_gene_num % 1000 == 0:
self.progress.update("Adding back decent hits [%d of %d gene calls]" % (current_gene_num, total_num_genes))
self.progress.increment(increment_to=current_gene_num)
if gcid not in self.gcids_to_functions_dict:
decent_hit_kos = set()
best_e_value = 100 # just an arbitrary positive value that will be larger than any evalue
best_hit_key = None
# if no annotation, get all hits for gene caller id from hits_dict
if gcid in self.gcids_to_hits_dict:
for hit_key in self.gcids_to_hits_dict[gcid]:
knum = hits_dict[hit_key]['gene_name']
ko_threshold = float(self.ko_dict[knum]['threshold'])
# get set of hits that fit specified heuristic parameters
if self.ko_dict[knum]['score_type'] == 'domain':
hit_bitscore = hits_dict[hit_key]['domain_bit_score']
elif self.ko_dict[knum]['score_type'] == 'full':
hit_bitscore = hits_dict[hit_key]['bit_score']
if hits_dict[hit_key]['e_value'] <= self.bitscore_heuristic_e_value and hit_bitscore > (self.bitscore_heuristic_bitscore_fraction * ko_threshold):
decent_hit_kos.add(knum)
# keep track of hit with lowest e-value we've seen so far
if hits_dict[hit_key]['e_value'] <= best_e_value:
best_e_value = hits_dict[hit_key]['e_value']
best_hit_key = hit_key
# if unique KO, add annotation with best e-value to self.functions_dict
if len(decent_hit_kos) == 1:
best_knum = hits_dict[best_hit_key]['gene_name']
## TODO: WE NEED A GENERIC FUNCTION FOR THIS SINCE IT IS SAME AS ABOVE
self.functions_dict[next_key] = {
'gene_callers_id': gcid,
'source': 'KOfam',
'accession': best_knum,
'function': self.get_annotation_from_ko_dict(best_knum, ok_if_missing_from_dict=True),
'e_value': hits_dict[best_hit_key]['e_value'],
}
# we may never access this downstream but let's add to it to be consistent
self.gcids_to_functions_dict[gcid] = [next_key]
# add associated KEGG module information to database
mods = self.kegg_modules_db.get_modules_for_knum(best_knum)
names = self.kegg_modules_db.get_module_names_for_knum(best_knum)
classes = self.kegg_modules_db.get_module_classes_for_knum_as_list(best_knum)
if mods:
mod_annotation = "!!!".join(mods)
mod_class_annotation = "!!!".join(classes) # why do we split by '!!!'? Because that is how it is done in COGs. So so sorry. :'(
mod_name_annotation = ""
for mod in mods:
if mod_name_annotation:
mod_name_annotation += "!!!" + names[mod]
else:
mod_name_annotation = names[mod]
self.kegg_module_names_dict[next_key] = {
'gene_callers_id': gcid,
'source': 'KEGG_Module',
'accession': mod_annotation,
'function': mod_name_annotation,
'e_value': None,
}
self.kegg_module_classes_dict[next_key] = {
'gene_callers_id': gcid,
'source': 'KEGG_Class',
'accession': mod_annotation,
'function': mod_class_annotation,
'e_value': None,
}
next_key += 1
num_annotations_added += 1
self.progress.end()
self.run.info("Number of decent hits added back after relaxing bitscore threshold", num_annotations_added)
self.run.info("Total number of hits in annotation dictionary after adding these back", len(self.functions_dict.keys()))
def store_annotations_in_db(self):
"""Takes the dictionary of function annotations (already parsed, if necessary) and puts them in the DB.
Should be called after the function that parses the HMM hits and creates self.functions_dict :) which is
parse_kofam_hits()
"""
# get an instance of gene functions table
gene_function_calls_table = TableForGeneFunctions(self.contigs_db_path, self.run, self.progress)
if self.functions_dict:
gene_function_calls_table.create(self.functions_dict)
if self.kegg_module_names_dict:
gene_function_calls_table.create(self.kegg_module_names_dict)
if self.kegg_module_classes_dict:
gene_function_calls_table.create(self.kegg_module_classes_dict)
else:
self.run.warning("There are no KOfam hits to add to the database. Returning empty handed, "
"but still adding KOfam as a functional source.")
gene_function_calls_table.add_empty_sources_to_functional_sources({'KOfam'})
def process_kofam_hmms(self):
"""This is a driver function for running HMMs against the KOfam database and processing the hits into the provided contigs DB."""
tmp_directory_path = filesnpaths.get_temp_directory_path()
contigs_db = ContigsSuperclass(self.args) # initialize contigs db
# we will need the gene caller ids later
all_gcids_in_contigs_db = contigs_db.genes_in_contigs_dict.keys()
# safety check for previous annotations so that people don't overwrite those if they don't want to
self.check_hash_in_contigs_db()
# get AA sequences as FASTA
target_files_dict = {'AA:GENE': os.path.join(tmp_directory_path, 'AA_gene_sequences.fa')}
contigs_db.get_sequences_for_gene_callers_ids(output_file_path=target_files_dict['AA:GENE'],
simple_headers=True,
report_aa_sequences=True)
# run hmmscan
hmmer = HMMer(target_files_dict, num_threads_to_use=self.num_threads, program_to_use=self.hmm_program)
hmm_hits_file = hmmer.run_hmmer('KOfam', 'AA', 'GENE', None, None, len(self.ko_dict), self.kofam_hmm_file_path, None, None)
if not hmm_hits_file:
run.info_single("The HMM search returned no hits :/ So there is nothing to add to the contigs database. But "
"now anvi'o will add KOfam as a functional source with no hits, clean the temporary directories "
"and gracefully quit.", nl_before=1, nl_after=1)
if not anvio.DEBUG:
shutil.rmtree(tmp_directory_path)
hmmer.clean_tmp_dirs()
else:
self.run.warning("Because you ran this script with the --debug flag, anvi'o will not clean up the temporary "
"directories located at %s and %s. Please be responsible for cleaning up this directory yourself "
"after you are finished debugging :)" % (tmp_directory_path, ', '.join(hmmer.tmp_dirs)), header="Debug")
gene_function_calls_table = TableForGeneFunctions(self.contigs_db_path, self.run, self.progress)
gene_function_calls_table.add_empty_sources_to_functional_sources({'KOfam'})
return
# parse hmmscan output
parser = parser_modules['search']['hmmer_table_output'](hmm_hits_file, alphabet='AA', context='GENE', program=self.hmm_program)
search_results_dict = parser.get_search_results()
# add functions and KEGG modules info to database
next_key_in_functions_dict = self.parse_kofam_hits(search_results_dict)
if not self.skip_bitscore_heuristic:
self.update_dict_for_genes_with_missing_annotations(all_gcids_in_contigs_db, search_results_dict, next_key=next_key_in_functions_dict)
self.store_annotations_in_db()
# If requested, store bit scores of each hit in file
if self.log_bitscores:
self.bitscore_log_file = os.path.splitext(os.path.basename(self.contigs_db_path))[0] + "_bitscores.txt"
anvio.utils.store_dict_as_TAB_delimited_file(search_results_dict, self.bitscore_log_file, key_header='entry_id')
self.run.info("Bit score information file: ", self.bitscore_log_file)
# mark contigs db with hash of modules.db content for version tracking
self.set_hash_in_contigs_db()
if anvio.DEBUG:
run.warning("The temp directories, '%s' and '%s' are kept. Please don't forget to clean those up "
"later" % (tmp_directory_path, ', '.join(hmmer.tmp_dirs)), header="Debug")
else:
run.info_single("Cleaning up the temp directory (you can use `--debug` if you would "
"like to keep it for testing purposes)", nl_before=1, nl_after=1)
shutil.rmtree(tmp_directory_path)
hmmer.clean_tmp_dirs()
class KeggEstimatorArgs():
def __init__(self, args, format_args_for_single_estimator=False, run=run, progress=progress):
"""A base class to assign arguments for KeggMetabolism estimator classes.
Parameters
==========
format_args_for_single_estimator: bool
This is a special case where an args instance is generated to be passed to the
single estimator from within multi estimator. More specifically, the multi estimator
class is nothing but one that iterates through all contigs DBs
given to it using the single estimator class. So it needs to create instances of
single estimators, and collect results at upstream. The problem is, if a single
estimator is initiated with the args of a multi estimator, the sanity check will
go haywire. This flag nullifies most common offenders.
"""
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.metagenome_mode = True if A('metagenome_mode') else False
self.module_completion_threshold = A('module_completion_threshold') or 0.75
self.output_file_prefix = A('output_file_prefix') or "kegg-metabolism"
self.write_dict_to_json = True if A('get_raw_data_as_json') else False
self.json_output_file_path = A('get_raw_data_as_json')
self.store_json_without_estimation = True if A('store_json_without_estimation') else False
self.estimate_from_json = A('estimate_from_json') or None
self.output_modes = A('kegg_output_modes') or A('output_modes') or "modules"
self.custom_output_headers = A('custom_output_headers') or None
self.matrix_format = True if A('matrix_format') else False
self.matrix_include_metadata = True if A('include_metadata') else False
self.exclude_zero_modules = False if A('include_zeros') else True
self.only_complete = True if A('only_complete') else False
self.add_coverage = True if A('add_coverage') else False
self.module_specific_matrices = A('module_specific_matrices') or None
self.no_comments = True if A('no_comments') else False
self.external_genomes_file = A('external_genomes') or None
self.internal_genomes_file = A('internal_genomes') or None
self.metagenomes_file = A('metagenomes') or None
self.kegg_data_dir = A('kegg_data_dir')
self.modules_unique_id = None
self.ko_unique_id = None
self.genome_mode = False ## controls some warnings output, will be set to True downstream if necessary
# if necessary, assign 0 completion threshold, which evaluates to False above
if A('module_completion_threshold') == 0:
self.module_completion_threshold = 0.0
# output modes and headers that we can handle
self.available_modes = OUTPUT_MODES
self.available_headers = OUTPUT_HEADERS
if format_args_for_single_estimator:
# to fool a single estimator into passing sanity checks, nullify multi estimator args here
self.databases = None
self.matrix_format = False # we won't be storing data from the single estimator anyway
self.module_specific_matrices = None
# parse requested output modes if necessary
if isinstance(self.output_modes, str):
# parse requested output modes and make sure we can handle them all
self.output_modes = self.output_modes.split(",")
# parse requested output headers if necessary
if self.custom_output_headers and isinstance(self.custom_output_headers, str):
self.custom_output_headers = self.custom_output_headers.split(",")
if "unique_id" not in self.custom_output_headers:
self.custom_output_headers = ["unique_id"] + self.custom_output_headers
elif self.custom_output_headers.index("unique_id") != 0:
self.custom_output_headers.remove("unique_id")
self.custom_output_headers = ["unique_id"] + self.custom_output_headers
self.available_modes['modules_custom']['headers'] = self.custom_output_headers
# parse specific matrix modules if necessary
if self.module_specific_matrices:
self.module_specific_matrices = [_m.strip() for _m in self.module_specific_matrices.split(",")]
def setup_output_for_appending(self):
"""Initializes and returns a dictionary of AppendableFile objects, one for each output mode"""
output_dict = {}
for mode in self.output_modes:
output_path = self.output_file_prefix + "_" + self.available_modes[mode]["output_suffix"]
if filesnpaths.is_file_exists(output_path, dont_raise=True):
raise ConfigError("It seems like output files with your requested prefix already exist, for "
f"example: {output_path}. Please delete the existing files or provide a "
"different output prefix.")
output_file_for_mode = filesnpaths.AppendableFile(output_path, append_type=dict, fail_if_file_exists=False)
output_dict[mode] = output_file_for_mode
self.run.info(f"Output file for {mode} mode", output_path)
return output_dict
def init_data_from_modules_db(self):
"""This function reads mucho data from the MODULES.db into dictionaries for later access.
It generates the self.all_modules_in_db dictionary, which contains all data values for all modules
in the db, keyed by module number.
It also generates the self.all_kos_in_db dictionary, which maps each KO in the db to its list of modules.
We do this once at the start so as to reduce the number of on-the-fly database queries
that have to happen during the estimation process.
"""
self.all_modules_in_db = self.kegg_modules_db.get_modules_table_data_values_as_dict()
self.all_kos_in_db = {}
for mod in self.all_modules_in_db:
ko_list = self.all_modules_in_db[mod]['ORTHOLOGY']
if not isinstance(ko_list, list):
ko_list = [ko_list]
# we convert to a set because some modules have duplicate orthology lines for the same KO
for k in set(ko_list):
if k not in self.all_kos_in_db:
self.all_kos_in_db[k] = []
self.all_kos_in_db[k].append(mod)
class KeggMetabolismEstimator(KeggContext, KeggEstimatorArgs):
""" Class for reconstructing/estimating metabolism for a SINGLE contigs DB based on hits to KEGG databases.
==========
args: Namespace object
All the arguments supplied by user to anvi-estimate-metabolism
"""
def __init__(self, args, run=run, progress=progress):
self.args = args
self.run = run
self.progress = progress
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.contigs_db_path = A('contigs_db')
self.profile_db_path = A('profile_db')
self.collection_name = A('collection_name')
self.bin_id = A('bin_id')
self.bin_ids_file = A('bin_ids_file')
self.contigs_db_project_name = "Unknown"
self.database_name = A('database_name')
self.multi_mode = True if A('multi_mode') else False
# This can be initialized later if necessary using init_gene_coverage()
self.profile_db = None
KeggEstimatorArgs.__init__(self, self.args)
self.name_header = None
if self.metagenome_mode:
self.name_header = 'contig_name'
elif self.profile_db_path and self.collection_name and not self.metagenome_mode:
self.name_header = 'bin_name'
else:
self.name_header = 'genome_name'
# update available modes and headers with appropriate genome/bin/metagenome identifier
for m in self.available_modes:
if m != 'modules_custom' and self.name_header not in self.available_modes[m]['headers']:
self.available_modes[m]['headers'].insert(1, self.name_header)
if self.metagenome_mode and self.available_modes[m]['headers'] and 'contig' in self.available_modes[m]['headers']:
# avoid duplicate columns since contig_name is the name_header in metagenome_mode
self.available_modes[m]['headers'].remove('contig')
self.available_headers[self.name_header] = {
'cdict_key': None,
'mode_type' : 'all',
'description': "Name of genome/bin/metagenome in which we find KOfam hits and/or KEGG modules"
}
# input options sanity checks
if not self.estimate_from_json and not self.contigs_db_path:
raise ConfigError("NO INPUT PROVIDED. You must provide (at least) a contigs database or genomes file to this program, unless you are using the --estimate-from-json "
"flag, in which case you must provide a JSON-formatted file.")
self.bin_ids_to_process = None
if self.bin_id and self.bin_ids_file:
raise ConfigError("You have provided anvi'o with both the individual bin id %s and a file with bin ids (%s). "
"Please make up your mind. Which one do you want an estimate for? :)" % (self.bin_id, self.bin_ids_file))
elif self.bin_id:
self.bin_ids_to_process = [self.bin_id]
elif self.bin_ids_file:
filesnpaths.is_file_exists(self.bin_ids_file)
self.bin_ids_to_process = [line.strip() for line in open(self.bin_ids_file).readlines()]
if (self.bin_id or self.bin_ids_file or self.collection_name) and not self.profile_db_path:
raise ConfigError("You have requested metabolism estimation for a bin or set of bins, but you haven't provided "
"a profiles database. Unfortunately, this just does not work. Please try again.")
if self.profile_db_path and not (self.collection_name or self.add_coverage or self.metagenome_mode):
raise ConfigError("If you provide a profile DB, you should also provide either a collection name (to estimate metabolism "
"on a collection of bins) or use the --add-coverage flag (so that coverage info goes into the output "
"files), or both. Otherwise the profile DB is useless.")
if self.store_json_without_estimation and not self.json_output_file_path:
raise ConfigError("Whoops. You seem to want to store the metabolism dictionary in a JSON file, but you haven't provided the name of that file. "
"Please use the --get-raw-data-as-json flag to do so.")
if self.store_json_without_estimation and self.estimate_from_json:
raise ConfigError("It is impossible to both estimate metabolism from JSON data and produce a JSON file without estimation at the same time... "
"anvi'o is judging you SO hard right now.")
if self.profile_db_path:
utils.is_profile_db_and_contigs_db_compatible(self.profile_db_path, self.contigs_db_path)
if self.add_coverage and not self.profile_db_path:
raise ConfigError("Adding coverage values requires a profile database. Please provide one if you can. :)")
elif self.add_coverage and utils.is_blank_profile(self.profile_db_path):
raise ConfigError("You have provided a blank profile database, which sadly will not contain any coverage "
"values, so the --add-coverage flag will not work.")
elif self.add_coverage:
self.add_gene_coverage_to_headers_list()
# output options sanity checks
if anvio.DEBUG:
run.info("Output Modes", ", ".join(self.output_modes))
run.info("Module completeness threshold", self.module_completion_threshold)
run.info("Only complete modules included in output", self.only_complete)
run.info("Zero-completeness modules excluded from output", self.exclude_zero_modules)
illegal_modes = set(self.output_modes).difference(set(self.available_modes.keys()))
if illegal_modes:
raise ConfigError("You have requested some output modes that we cannot handle. The offending modes "
"are: %s. Please use the flag --list-available-modes to see which ones are acceptable."
% (", ".join(illegal_modes)))
if self.custom_output_headers and "modules_custom" not in self.output_modes:
raise ConfigError("You seem to have provided a list of custom headers without actually requesting a 'custom' output "
"mode. We think perhaps you missed something, so we are stopping you right there.")
if "modules_custom" in self.output_modes and not self.custom_output_headers:
raise ConfigError("You have requested a 'custom' output mode, but haven't told us what headers to include in that output. "
"You should be using the --custom-output-headers flag to do this.")
if self.custom_output_headers:
if anvio.DEBUG:
self.run.info("Custom Output Headers", ", ".join(self.custom_output_headers))
illegal_headers = set(self.custom_output_headers).difference(set(self.available_headers.keys()))
if illegal_headers:
raise ConfigError("You have requested some output headers that we cannot handle. The offending ones "
"are: %s. Please use the flag --list-available-output-headers to see which ones are acceptable."
% (", ".join(illegal_headers)))
# check if any headers requested for modules_custom mode are reserved for KOfams mode
if "modules_custom" in self.output_modes:
for header in self.custom_output_headers:
if self.available_headers[header]['mode_type'] != "modules" and self.available_headers[header]['mode_type'] != "all":
raise ConfigError(f"Oh dear. You requested the 'modules_custom' output mode, but gave us a header ({header}) "
"that is suitable only for %s mode(s). Not good." % (self.available_headers[header]['mode_type']))
outputs_require_ko_dict = [m for m in self.output_modes if self.available_modes[m]['data_dict'] == 'kofams']
output_string = ", ".join(outputs_require_ko_dict)
if self.estimate_from_json and len(outputs_require_ko_dict):
raise ConfigError("You have requested to estimate metabolism from a JSON file and produce the following KOfam hit "
f"output mode(s): {output_string}. Unforunately, this is not possible because "
"our JSON estimation function does not currently produce the required data for KOfam hit output. "
"Please instead request some modules-oriented output mode(s) for your JSON input.")
if self.matrix_format:
raise ConfigError("You have asked for output in matrix format, but unfortunately this currently only works in "
"multi-mode. Please give this program an input file contining multiple bins or contigs databases instead "
"of the single contigs database that you have provided. We are very sorry for any inconvenience.")
# init the base class
KeggContext.__init__(self, self.args)
# let user know what they told anvi'o to work
self.run.info("Contigs DB", self.contigs_db_path, quiet=self.quiet)
self.run.info("Profile DB", self.profile_db_path, quiet=self.quiet)
self.run.info('Metagenome mode', self.metagenome_mode)
if self.collection_name:
self.run.info('Collection', self.collection_name)
if self.bin_id:
self.run.info('Bin ID', self.bin_id)
elif self.bin_ids_file:
self.run.info('Bin IDs file', self.bin_ids_file)
# init the KO dictionary
self.setup_ko_dict()
if not self.estimate_from_json:
utils.is_contigs_db(self.contigs_db_path)
# load existing kegg modules db
if not os.path.exists(self.kegg_modules_db_path):
raise ConfigError("It appears that a modules database (%s) does not exist in the KEGG data directory %s. "
"Perhaps you need to specify a different KEGG directory using --kegg-data-dir. Or perhaps you didn't run "
"`anvi-setup-kegg-kofams`, though we are not sure how you got to this point in that case "
"since you also cannot run `anvi-run-kegg-kofams` without first having run KEGG setup. But fine. Hopefully "
"you now know what you need to do to make this message go away." % ("MODULES.db", self.kegg_data_dir))
kegg_modules_db = KeggModulesDatabase(self.kegg_modules_db_path, args=self.args, quiet=self.quiet)
if not self.estimate_from_json:
# here we load the contigs DB just for sanity check purposes.
# We will need to load it again later just before accessing data to avoid SQLite error that comes from different processes accessing the DB
contigs_db = ContigsDatabase(self.contigs_db_path, run=self.run, progress=self.progress)
self.contigs_db_project_name = contigs_db.meta['project_name']
# sanity check that contigs db was annotated with same version of MODULES.db that will be used for metabolism estimation
if 'modules_db_hash' not in contigs_db.meta:
raise ConfigError("Based on the contigs DB metadata, the contigs DB that you are working with has not been annotated with hits to the "
"KOfam database, so there are no KOs to estimate metabolism from. Please run `anvi-run-kegg-kofams` on this contigs DB "
"before you attempt to run this script again.")
contigs_db_mod_hash = contigs_db.meta['modules_db_hash']
mod_db_hash = kegg_modules_db.db.get_meta_value('hash')
if contigs_db_mod_hash != mod_db_hash:
raise ConfigError("The contigs DB that you are working with has been annotated with a different version of the MODULES.db than you are working with now. "
"Perhaps you updated your KEGG setup after running `anvi-run-kegg-kofams` on this contigs DB? Or maybe you have multiple KEGG data "
"directories set up on your computer, and the one you are using now is different from the one that you used for `anvi-run-kegg-kofams`? "
"Well. The solution to the first problem is to re-run `anvi-run-kegg-kofams` on the contigs DB (%s) using the updated MODULES.db "
"(located in the KEGG data directory %s). The solution to the second problem is to specify the appropriate KEGG data directory using "
"the --kegg-data-dir flag. If neither of those things make this work, then you should contact the developers to see if they can help you "
"figure this out. For those who need this information, the Modules DB used to annotate this contigs database previously had the "
"following hash: %s. And the hash of the current Modules DB is: %s" % (self.contigs_db_path, self.kegg_data_dir, contigs_db_mod_hash, mod_db_hash))
contigs_db.disconnect()
kegg_modules_db.disconnect()
if not self.quiet:
self.run.warning("Anvi'o will reconstruct metabolism for modules in the KEGG MODULE database, as described in "
"Kanehisa and Goto et al (doi:10.1093/nar/gkr988). When you publish your findings, "
"please do not forget to properly credit this work.", lc='green', header="CITATION")
def list_output_modes(self):
"""This function prints out the available output modes for the metabolism estimation script."""
run.warning(None, header="AVAILABLE OUTPUT MODES", lc="green")
for mode, mode_meta in self.available_modes.items():
self.run.info(mode, mode_meta['description'])
def list_output_headers(self):
"""This function prints out the available output headers for the 'custom' output mode"""
run.warning(None, header="AVAILABLE OUTPUT HEADERS", lc="green")
for header, header_meta in self.available_headers.items():
desc_str = header_meta['description']
type_str = header_meta['mode_type']
mode_str = "output modes" if header_meta['mode_type'] == 'all' else "output mode"
self.run.info(header, f"{desc_str} [{type_str} {mode_str}]")
######### ATOMIC ESTIMATION FUNCTIONS #########
def init_hits_and_splits(self, annotation_sources=['KOfam']):
"""This function loads KOfam hits, gene calls, splits, and contigs from the contigs DB.
We will need the hits with their KO numbers (accessions) so that we can go through the MODULES.db and determine
which steps are present in each module. And we will need the other information so that we can determine which hits belong
to which genomes/bins when we are handling multiple of these, and for help in computing redundancy.
This function gets this info as a list of tuples (one tuple per kofam hit), and it makes sure that these lists don't include
hits that we shouldn't be considering.
PARAMETERS
==========
annotation_sources : list
which functional annotation sources to obtain gene calls from. Should at least contain 'Kofam' for
default usage. Adding other sources may be necessary when working with user-defined metabolic modules.
RETURNS
=======
kofam_gene_split_contig : list
(ko_num, gene_call_id, split, contig) tuples, one per KOfam hit in the splits we are considering
"""
self.progress.new("Loading split data from contigs DB")
split_names_in_contigs_db = set(utils.get_all_item_names_from_the_database(self.contigs_db_path))
splits_to_use = split_names_in_contigs_db
# first, resolve differences in splits between profile and contigs db
if self.profile_db_path:
self.progress.update("Loading split data from profile DB")
# if we were given a blank profile, we will assume we want all splits and pull all splits from the contigs DB
if utils.is_blank_profile(self.profile_db_path):
self.progress.reset()
self.run.warning("You seem to have provided a blank profile. No worries, we can still estimate metabolism "
"for you. But we cannot load splits from the profile DB, so instead we are assuming that "
"you are interested in ALL splits and we will load those from the contigs database.")
else:
split_names_in_profile_db = set(utils.get_all_item_names_from_the_database(self.profile_db_path))
splits_missing_in_profile_db = split_names_in_contigs_db.difference(split_names_in_profile_db)
if len(splits_missing_in_profile_db):
min_contig_length_in_profile_db = pp(ProfileDatabase(self.profile_db_path).meta['min_contig_length'])
num_splits_contig = pp(len(split_names_in_contigs_db))
num_splits_profile = pp(len(split_names_in_profile_db))
num_missing = pp(len(splits_missing_in_profile_db))
self.progress.reset()
self.run.warning(f"Please note that anvi'o found {num_splits_contig} splits in your contigs database. "
f"But only {num_splits_profile} of them appear in the profile database. As a result, "
f"anvi'o will now remove the {num_missing} splits that occur only in the contigs db "
f"from all downstream analyses. Where is this difference coming from though? Well. This "
f"is often the case because the 'minimum contig length parameter' set during the `anvi-profile` "
f"step can exclude many contigs from downstream analyses (often for good reasons, too). For "
f"instance, in your case the minimum contig length set in the profile database is "
f"{min_contig_length_in_profile_db} nts. Anvi'o hopes that this explains some things.")
splits_to_use = split_names_in_profile_db
self.progress.update('Loading gene call data from contigs DB')
contigs_db = ContigsDatabase(self.contigs_db_path, run=self.run, progress=self.progress)
split_list = ','.join(["'%s'" % split_name for split_name in splits_to_use])
splits_where_clause = f'''split IN ({split_list})'''
genes_in_splits = contigs_db.db.get_some_columns_from_table(t.genes_in_splits_table_name, "gene_callers_id, split",
where_clause=splits_where_clause)
gene_list = ','.join(["'%s'" % gcid for gcid,split in genes_in_splits])
contigs_where_clause = f'''gene_callers_id IN ({gene_list})'''
genes_in_contigs = contigs_db.db.get_some_columns_from_table(t.genes_in_contigs_table_name, "gene_callers_id, contig",
where_clause=contigs_where_clause)
source_list = ','.join(["'%s'" % src for src in annotation_sources])
hits_where_clause = f'''source IN ({source_list}) AND gene_callers_id IN ({gene_list})'''
kofam_hits = contigs_db.db.get_some_columns_from_table(t.gene_function_calls_table_name, "gene_callers_id, accession",
where_clause=hits_where_clause)
contigs_db.disconnect()
# combine the information for each gene call into neat tuples for returning
# each gene call is only on one split of one contig, so we can convert these lists of tuples into dictionaries for easy access
# but some gene calls have multiple kofam hits (and some kofams have multiple gene calls), so we must keep the tuple structure for those
self.progress.update("Organizing KOfam hit data")
gene_calls_splits_dict = {tpl[0] : tpl[1] for tpl in genes_in_splits}
gene_calls_contigs_dict = {tpl[0] : tpl[1] for tpl in genes_in_contigs}
assert len(gene_calls_splits_dict.keys()) == len(genes_in_contigs)
kofam_gene_split_contig = []
for gene_call_id, ko in kofam_hits:
kofam_gene_split_contig.append((ko, gene_call_id, gene_calls_splits_dict[gene_call_id], gene_calls_contigs_dict[gene_call_id]))
self.progress.update("Done")
self.progress.end()
self.run.info("KOfam hits", "%d found" % len(kofam_hits), quiet=self.quiet)
if not self.quiet and not len(kofam_hits):
self.run.warning("Hmmm. No KOfam hits were found in this contigs DB, so all metabolism estimate outputs will be empty. This is fine, and "
"could even be biologically correct. But we thought we'd mention it just in case you thought it was weird. "
"Other, technical reasons that this could have happened include: 1) you didn't annotate with `anvi-run-kegg-kofams` "
"and 2) you imported KEGG functional annotations but the 'source' was not 'KOfam'.")
return kofam_gene_split_contig
def init_paths_for_modules(self):
"""This function unrolls the module DEFINITION for each module and places it in an attribute variable for
all downstream functions to access.
It unrolls the module definition into a list of all possible paths, where each path is a list of atomic steps.
Atomic steps include singular KOs, protein complexes, modules, non-essential steps, and steps without associated KOs.
"""
self.module_paths_dict = {}
modules = self.all_modules_in_db.keys()
for m in modules:
module_definition = self.all_modules_in_db[m]["DEFINITION"]
# the below function expects a list
if not isinstance(module_definition, list):
module_definition = [module_definition]
self.module_paths_dict[m] = self.kegg_modules_db.unroll_module_definition(m, def_lines=module_definition)
def init_gene_coverage(self, gcids_for_kofam_hits):
"""This function initializes gene coverage/detection values from the provided profile DB.
The profile DB should be already initialized for this to work (currently add_gene_coverage_to_headers_list()
handles this). The reason we split the initalization of the profile db from the initialization of gene
coverage/detection values is so that we only work on the set of gene calls with KOfam hits rather than all
genes in the contigs DB.
PARAMETERS
==========
gcids_for_kofam_hits : set
The gene caller ids for all genes with KOfam hits in the contigs DB
"""
if not self.profile_db:
raise ConfigError("A profile DB has not yet been initialized, so init_gene_coverage() will not work. "
"If you are a programmer, you should probably either 1) call this function after "
"add_gene_coverage_to_headers_list() or 2) extend this function so that it initializes "
"the profile db. If you are not a programmer, you should probably find one :) ")
self.run.info_single("Since the --add-coverage flag was provided, we are now loading the relevant "
"coverage information from the provided profile database.")
self.profile_db.init_gene_level_coverage_stats_dicts(gene_caller_ids_of_interest=gcids_for_kofam_hits)
def add_gene_coverage_to_headers_list(self):
"""Updates the headers lists for relevant output modes with coverage and detection column headers.
The profile DB is initialized in this function in order to get access to the sample names that will
be part of the available coverage/detection headers.
"""
if not self.profile_db:
self.args.skip_consider_gene_dbs = True
self.profile_db = ProfileSuperclass(self.args)
# first we get lists of all the headers we will need to add.
# there will be one column per sample for both coverage and detection (for individual genes and for module averages)
kofam_hits_coverage_headers = []
kofam_hits_detection_headers = []
modules_coverage_headers = []
modules_detection_headers = []
samples_in_profile_db = self.profile_db.p_meta['samples']
for s in samples_in_profile_db:
# we update the available header list so that these additional headers pass the sanity checks
kofam_hits_coverage_headers.append(s + "_coverage")
self.available_headers[s + "_coverage"] = {'cdict_key': None,
'mode_type': 'kofam_hits_in_modules',
'description': f"Mean coverage of gene with KOfam hit in sample {s}"
}
kofam_hits_detection_headers.append(s + "_detection")
self.available_headers[s + "_detection"] = {'cdict_key': None,
'mode_type': 'kofam_hits_in_modules',
'description': f"Detection of gene with KOfam hit in sample {s}"
}
modules_coverage_headers.extend([s + "_gene_coverages", s + "_avg_coverage"])
self.available_headers[s + "_gene_coverages"] = {'cdict_key': None,
'mode_type': 'all',
'description': f"Comma-separated coverage values for each gene in module in sample {s}"
}
self.available_headers[s + "_avg_coverage"] = {'cdict_key': None,
'mode_type': 'modules',
'description': f"Average coverage of all genes in module in sample {s}"
}
modules_detection_headers.extend([s + "_gene_detection", s + "_avg_detection"])
self.available_headers[s + "_gene_detection"] = {'cdict_key': None,
'mode_type': 'modules',
'description': f"Comma-separated detection values for each gene in module in sample {s}"
}
self.available_headers[s + "_avg_detection"] = {'cdict_key': None,
'mode_type': 'modules',
'description': f"Average detection of all genes in module in sample {s}"
}
# we update the header list for the affected modes
self.available_modes["kofam_hits_in_modules"]["headers"].extend(kofam_hits_coverage_headers + kofam_hits_detection_headers)
self.available_modes["kofam_hits"]["headers"].extend(kofam_hits_coverage_headers + kofam_hits_detection_headers)
self.available_modes["modules"]["headers"].extend(modules_coverage_headers + modules_detection_headers)
def mark_kos_present_for_list_of_splits(self, kofam_hits_in_splits, split_list=None, bin_name=None):
"""This function generates two bin-level dictionaries of dictionaries to store metabolism data.
The first dictionary of dictionaries is the module completeness dictionary, which associates modules with the KOs
that are present in the bin for each module.
The structure of the dictionary is like this example:
{mnum: {"gene_caller_ids" : set([132, 133, 431, 6777]),
"kofam_hits" : {'K00033' : [431, 6777],
'K01057' : [133],
'K00036' : [132] },
"genes_to_contigs": {132: 0,
133: 0,
431: 2,
6777: 1 },
"contigs_to_genes": { 0: set([132, 133]),
1: set(6777),
2: set(431) },}}
This dictionary will be expanded later by other functions.
The second dictionary of dictionaries is the KOfam hit dictionary, which stores all of the KOfam hits in the bin
regardless of whether they are in a KEGG module or not.
The structure of the dictionary is like this example:
{ko: {"gene_caller_ids" : set([431, 6777]),
"modules" : ["M00001", "M00555"], **Can be None if KO does not belong to any KEGG modules
"genes_to_contigs": { 431: 2,
6777: 1 },
"contigs_to_genes": { 1: set(6777),
2: set(431) }}}
PARAMETERS
==========
kofam_hits_in_splits : list
(ko_num, gene_call_id, split, contig) tuples, one per KOfam hit in the splits we are considering
split_list : list
splits we are considering, this is only for debugging output
bin_name : str
name of the bin containing these splits, this is only for debugging output
RETURNS
=======
bin_level_module_dict : dictionary of dictionaries
initialized module completeness dictionary for the list of splits (genome, metagenome, or bin) provided
bin_level_ko_dict : dictionary of dictionaries
dictionary of ko hits within the list of splits provided
"""
bin_level_module_dict = {}
bin_level_ko_dict = {}
if anvio.DEBUG:
self.run.info("Marking KOs present for bin", bin_name)
self.run.info("Number of splits", len(split_list))
# initialize all modules with empty lists and dicts for kos, gene calls
modules = self.all_modules_in_db.keys()
all_kos = self.all_kos_in_db.keys()
for mnum in modules:
bin_level_module_dict[mnum] = {"gene_caller_ids" : set(),
"kofam_hits" : {},
"genes_to_contigs" : {},
"contigs_to_genes" : {},
"warnings" : []
}
for knum in all_kos:
if knum not in self.ko_dict:
mods_it_is_in = self.all_kos_in_db[knum]
if mods_it_is_in:
if anvio.DEBUG:
mods_str = ", ".join(mods_it_is_in)
self.run.warning(f"Oh dear. We do not appear to have a KOfam profile for {knum}. This means "
"that any modules this KO belongs to can never be fully complete (this includes "
f"{mods_str}). ")
for m in mods_it_is_in:
if knum[0] != 'M':
bin_level_module_dict[m]["warnings"].append(f"No KOfam profile for {knum}")
continue
bin_level_ko_dict[knum] = {"gene_caller_ids" : set(),
"modules" : None,
"genes_to_contigs" : {},
"contigs_to_genes" : {}
}
kos_not_in_modules = []
for ko, gene_call_id, split, contig in kofam_hits_in_splits:
if ko not in self.all_kos_in_db:
kos_not_in_modules.append(ko)
# KOs that are not in modules will not be initialized above in the ko hit dictionary, so we add them here if we haven't already
if ko not in bin_level_ko_dict:
bin_level_ko_dict[ko] = {"gene_caller_ids" : set(),
"modules" : None,
"genes_to_contigs" : {},
"contigs_to_genes" : {}
}
else:
present_in_mods = self.all_kos_in_db[ko]
bin_level_ko_dict[ko]["modules"] = present_in_mods
for m in present_in_mods:
bin_level_module_dict[m]["gene_caller_ids"].add(gene_call_id)
if ko in bin_level_module_dict[m]["kofam_hits"] and gene_call_id not in bin_level_module_dict[m]["kofam_hits"][ko]:
bin_level_module_dict[m]["kofam_hits"][ko].append(gene_call_id)
else:
bin_level_module_dict[m]["kofam_hits"][ko] = [gene_call_id]
bin_level_module_dict[m]["genes_to_contigs"][gene_call_id] = contig
if contig in bin_level_module_dict[m]["contigs_to_genes"]:
bin_level_module_dict[m]["contigs_to_genes"][contig].add(gene_call_id)
else:
bin_level_module_dict[m]["contigs_to_genes"][contig] = set([gene_call_id])
bin_level_ko_dict[ko]["gene_caller_ids"].add(gene_call_id)
bin_level_ko_dict[ko]["genes_to_contigs"][gene_call_id] = contig
if contig in bin_level_ko_dict[ko]["contigs_to_genes"]:
bin_level_ko_dict[ko]["contigs_to_genes"][contig].add(gene_call_id)
else:
bin_level_ko_dict[ko]["contigs_to_genes"][contig] = set([gene_call_id])
if anvio.DEBUG:
self.run.info("KOs processed", "%d in bin" % len(kofam_hits_in_splits))
if kos_not_in_modules:
self.run.warning("Just so you know, the following KOfam hits did not belong to any KEGG modules in the MODULES.db: %s"
% ", ".join(kos_not_in_modules))
return bin_level_module_dict, bin_level_ko_dict
def compute_module_completeness_for_bin(self, mnum, meta_dict_for_bin):
"""This function calculates the completeness of the specified module within the given bin metabolism dictionary.
To do this, it works with the unrolled module definition: a list of all possible paths, where each path is a list of atomic steps.
Atomic steps include singular KOs, protein complexes, modules, non-essential steps, and steps without associated KOs.
An atomic step (or parts of a protein complex) can be considered 'present' if the corresponding KO(s) has a hit in the bin.
For each path, the function computes the path completeness as the number of present (essential) steps divided by the number of total steps in the path.
The module completeness is simply the highest path completeness.
There are some special cases to consider here.
1) Non-essential steps. These are steps that are marked with a preceding "-" to indicate that they are not required for the module to
be considered complete. They often occur in pathways with multiple forks. What we do with these is save and count them separately as
non-essential steps, but we do not use them in our module completeness calculations. Another thing we do is continue parsing the rest
of the module steps as normal, even though some of them may affect steps after the non-essential one. That may eventually change.
See comments in the code below.
2) Steps without associated KOs. These are steps marked as "--". They may require an enzyme, but if so that enzyme is not in the KOfam
database, so we can't know whether they are complete or not from our KOfam hits. Therefore, we assume these steps are incomplete, and
warn the user to go back and check the module manually.
3) Steps defined by entire modules. These steps have module numbers instead of KOs, so they require an entire module to be complete in
order to be complete. We can't figure this out until after we've evaluated all modules, so we simply parse these steps without marking
them complete, and later will go back to adjust the completeness score once all modules have been marked complete or not.
PARAMETERS
==========
mnum : string
module number to work on
meta_dict_for_bin : dictionary of dictionaries
metabolism completeness dict for the current bin, to be modified in-place
NEW KEYS ADDED TO METABOLISM COMPLETENESS DICT
=======
"pathway_completeness" a list of the completeness of each pathway
"present_nonessential_kos" a list of non-essential KOs in the module that were found to be present
"most_complete_paths" a list of the paths with maximum completeness
"percent_complete" the completeness of the module, which is the maximum pathway completeness
"complete" whether the module completeness falls over the completeness threshold
RETURNS
=======
over_complete_threshold : boolean
whether or not the module is considered "complete" overall based on the threshold fraction of completeness
has_nonessential_step : boolean
whether or not the module contains non-essential steps. Used for warning the user about these.
has_no_ko_step : boolean
whether or not the module contains steps without associated KOs. Used for warning the user about these.
defined_by_modules : boolean
whether or not the module contains steps defined by other modules. Used for going back to adjust completeness later.
"""
present_list_for_mnum = meta_dict_for_bin[mnum]["kofam_hits"].keys()
if not present_list_for_mnum:
# no KOs in this module are present
if anvio.DEBUG:
self.run.warning("No KOs present for module %s. Parsing for completeness is still being done to obtain module information." % mnum)
# stuff to put in the module's dictionary
module_nonessential_kos = [] # KOs that are present but unnecessary for module completeness
# stuff that will be returned
over_complete_threshold = False
has_nonessential_step = False
has_no_ko_step = False
defined_by_modules = False
meta_dict_for_bin[mnum]["pathway_completeness"] = []
for p in self.module_paths_dict[mnum]:
num_complete_steps_in_path = 0
num_nonessential_steps_in_path = 0 # so that we don't count nonessential steps when computing completeness
for atomic_step in p:
# there are 5 types of atomic steps to take care of
# 1) regular old single KOs, ie Kxxxxx
if atomic_step[0] == "K" and len(atomic_step) == 6:
if atomic_step in present_list_for_mnum:
num_complete_steps_in_path += 1
# 2) protein complexes, ie Kxxxxx+Kyyyyy-Kzzzzz (2 types of complex components - essential and nonessential)
elif atomic_step[0] == "K" and (atomic_step[6] == "+" or atomic_step[6] == "-"):
idx = 6
essential_components = [atomic_step[0:idx]]
while idx < len(atomic_step):
component_ko = atomic_step[idx+1:idx+7]
if atomic_step[idx] == "+":
essential_components.append(component_ko)
else:
has_nonessential_step = True
if component_ko not in module_nonessential_kos:
module_nonessential_kos.append(component_ko)
idx += 7
num_present_components = 0
for c in essential_components:
if c in present_list_for_mnum:
num_present_components += 1
component_completeness = num_present_components / len(essential_components)
num_complete_steps_in_path += component_completeness
# 3) non-essential KOs, ie -Kxxxxx
elif atomic_step[0:2] == "-K" and len(atomic_step) == 7:
"""
OKAY, SO HERE WE HAVE SOME POOPINESS THAT MAY NEED TO BE FIXED EVENTUALLY.
Basically, some DEFINITION lines have KOs that seem to be marked non-essential;
ie, "-K11024" in "K11023 -K11024 K11025 K11026 K11027".
It was difficult to decide whether we should consider only K11024, or K11024 and all following KOs, to be non-essential.
For instance, the module M00778 is a complex case that gave us pause - see Fiesta issue 955.
But for now, we have decided to just track only the one KO as a 'non-essential step', and to not include such steps in
the module completeness estimate.
"""
if atomic_step[1:] not in module_nonessential_kos:
module_nonessential_kos.append(atomic_step[1:])
num_nonessential_steps_in_path += 1
has_nonessential_step = True
# 4) steps without associated KOs, ie --
elif atomic_step == "--":
# when '--' in a DEFINITION line happens, it signifies a reaction step that has no associated KO.
# we assume that such steps are not complete, because we really can't know if it is from the KOfam hits alone
has_no_ko_step = True
warning_str = "'--' steps are assumed incomplete"
if warning_str not in meta_dict_for_bin[mnum]["warnings"]:
meta_dict_for_bin[mnum]["warnings"].append(warning_str)
# 5) Module numbers, ie Mxxxxx
elif atomic_step[0] == "M" and len(atomic_step) == 6:
"""
This happens when a module is defined by other modules. For example, photosynthesis module M00611 is defined as
(M00161,M00163) M00165 === (photosystem II or photosystem I) and calvin cycle
We need all the modules to have been evaluated before we can determine completeness of steps with module numbers.
So what we will do here is to use a flag variable to keep track of the modules that have this sort of definition
in a list so we can go back and evaluate completeness of steps with module numbers later.
"""
defined_by_modules = True
else:
raise ConfigError("Well. While estimating completeness for module %s, we found an atomic step in the pathway that we "
"are not quite sure what to do with. Here it is: %s" % (mnum, atomic_step))
path_completeness = num_complete_steps_in_path / (len(p) - num_nonessential_steps_in_path)
meta_dict_for_bin[mnum]["pathway_completeness"].append(path_completeness)
# once all paths have been evaluated, we find the path(s) of maximum completeness and set that as the overall module completeness
# this is not very efficient as it takes two passes over the list but okay
meta_dict_for_bin[mnum]["percent_complete"] = max(meta_dict_for_bin[mnum]["pathway_completeness"])
if meta_dict_for_bin[mnum]["percent_complete"] > 0:
meta_dict_for_bin[mnum]["most_complete_paths"] = [self.module_paths_dict[mnum][i] for i, pc in enumerate(meta_dict_for_bin[mnum]["pathway_completeness"]) if pc == meta_dict_for_bin[mnum]["percent_complete"]]
else:
meta_dict_for_bin[mnum]["most_complete_paths"] = []
if anvio.DEBUG and len(meta_dict_for_bin[mnum]["most_complete_paths"]) > 1:
self.run.warning("Found %d complete paths for module %s with completeness %s. " % (len(meta_dict_for_bin[mnum]["most_complete_paths"]), mnum, meta_dict_for_bin[mnum]["percent_complete"]),
header='DEBUG OUTPUT', lc='yellow')
over_complete_threshold = True if meta_dict_for_bin[mnum]["percent_complete"] >= self.module_completion_threshold else False
meta_dict_for_bin[mnum]["complete"] = over_complete_threshold
meta_dict_for_bin[mnum]["present_nonessential_kos"] = module_nonessential_kos
if over_complete_threshold:
meta_dict_for_bin["num_complete_modules"] += 1
return over_complete_threshold, has_nonessential_step, has_no_ko_step, defined_by_modules
def adjust_module_completeness_for_bin(self, mod, meta_dict_for_bin):
"""This function adjusts completeness of modules that are defined by other modules.
This can only be done after all other modules have been evaluated for completeness.
The function uses similar logic as compute_module_completeness_for_bin() to re-assess whether steps defined
by other modules are complete, and updates the metabolism completess dictionary accordingly.
PARAMETERS
==========
mod : string
the module number to adjust
meta_dict_for_bin : dictionary of dictionaries
metabolism completeness dictionary for the current bin
RETURNS
=======
now_complete : boolean
whether or not the module is NOW considered "complete" overall based on the threshold fraction of completeness
"""
for i in range(len(self.module_paths_dict[mod])):
p = self.module_paths_dict[mod][i]
num_essential_steps_in_path = 0 # note that the len(p) will include nonessential steps; we should count only essential ones
num_complete_module_steps = 0
for atomic_step in p:
# single KOs and protein complexes and '--' steps; were already counted as complete by previous function
if atomic_step[0] == "K" or atomic_step == "--":
num_essential_steps_in_path += 1
# non-essential KO, don't count as a step in the path
elif atomic_step[0:2] == "-K" and len(atomic_step) == 7:
pass
# module step; we need to count these based on previously computed module completeness
elif atomic_step[0] == "M" and len(atomic_step) == 6:
num_complete_module_steps += meta_dict_for_bin[atomic_step]["percent_complete"]
num_essential_steps_in_path += 1
else:
raise ConfigError("Well. While adjusting completeness estimates for module %s, we found an atomic step in the pathway that we "
"are not quite sure what to do with. Here it is: %s" % (mod, atomic_step))
# now we adjust the previous pathway completeness
old_complete_steps_in_path = meta_dict_for_bin[mod]["pathway_completeness"][i] * num_essential_steps_in_path
adjusted_num_complete_steps_in_path = old_complete_steps_in_path + num_complete_module_steps
meta_dict_for_bin[mod]["pathway_completeness"][i] = adjusted_num_complete_steps_in_path / num_essential_steps_in_path
# after adjusting for all paths, adjust overall module completeness
meta_dict_for_bin[mod]["percent_complete"] = max(meta_dict_for_bin[mod]["pathway_completeness"])
if meta_dict_for_bin[mod]["percent_complete"] > 0:
meta_dict_for_bin[mod]["most_complete_paths"] = [self.module_paths_dict[mod][i] for i, pc in enumerate(meta_dict_for_bin[mod]["pathway_completeness"]) if pc == meta_dict_for_bin[mod]["percent_complete"]]
else:
meta_dict_for_bin[mod]["most_complete_paths"] = []
was_already_complete = meta_dict_for_bin[mod]["complete"]
now_complete = True if meta_dict_for_bin[mod]["percent_complete"] >= self.module_completion_threshold else False
meta_dict_for_bin[mod]["complete"] = now_complete
if now_complete and not was_already_complete:
meta_dict_for_bin["num_complete_modules"] += 1
return now_complete
def add_module_coverage(self, mod, meta_dict_for_bin):
"""This function updates the metabolism dictionary with coverage values for the given module.
It must be called after init_gene_coverage() or add_gene_coverage_to_headers_list() so that
the self.profile_db attribute is established.
NEW KEYS ADDED TO METABOLISM COMPLETENESS DICT
=======
"genes_to_coverage" dictionary of mean coverage in each sample for each gene
coverage = meta_dict_for_bin[module]["genes_to_coverage"][sample][gcid]
"genes_to_detection" dictionary of detection in each sample for each gene
detection = meta_dict_for_bin[module]["genes_to_detection"][sample][gcid]
"average_coverage_per_sample" dictionary of average mean coverage of all genes in module, per sample
avg_coverage = meta_dict_for_bin[module]["average_coverage_per_sample"][sample]
"average_detection_per_sample" dictionary of average detection of all genes in module, per sample
avg_detection = meta_dict_for_bin[module]["average_detection_per_sample"][sample]
"""
if not self.profile_db:
raise ConfigError("The add_module_coverage() function cannot work without a properly initialized "
"profile database.")
if self.custom_output_headers:
# determine the specific set of samples we are interested in so we don't make the dictionary huge
sample_set = set()
for h in self.custom_output_headers:
if 'coverage' in h or 'detection' in h:
if '_gene_coverages' in h:
sample = h.replace('_gene_coverages', '')
elif '_avg_coverage' in h:
sample = h.replace('_avg_coverage', '')
elif '_gene_detection' in h:
sample = h.replace('_gene_detection', '')
elif '_avg_detection' in h:
sample = h.replace('_avg_detection', '')
sample_set.add(sample)
self.coverage_sample_list = list(sample_set)
else:
self.coverage_sample_list = self.profile_db.p_meta['samples']
meta_dict_for_bin[mod]["genes_to_coverage"] = {}
meta_dict_for_bin[mod]["genes_to_detection"] = {}
meta_dict_for_bin[mod]["average_coverage_per_sample"] = {}
meta_dict_for_bin[mod]["average_detection_per_sample"] = {}
num_genes = len(meta_dict_for_bin[mod]["gene_caller_ids"])
for s in self.coverage_sample_list:
meta_dict_for_bin[mod]["genes_to_coverage"][s] = {}
meta_dict_for_bin[mod]["genes_to_detection"][s] = {}
coverage_sum = 0
detection_sum = 0
for g in meta_dict_for_bin[mod]["gene_caller_ids"]:
cov = self.profile_db.gene_level_coverage_stats_dict[g][s]['mean_coverage']
det = self.profile_db.gene_level_coverage_stats_dict[g][s]['detection']
coverage_sum += cov
detection_sum += det
meta_dict_for_bin[mod]["genes_to_coverage"][s][g] = cov
meta_dict_for_bin[mod]["genes_to_detection"][s][g] = det
if num_genes == 0:
meta_dict_for_bin[mod]["average_coverage_per_sample"][s] = 0
meta_dict_for_bin[mod]["average_detection_per_sample"][s] = 0
else:
meta_dict_for_bin[mod]["average_coverage_per_sample"][s] = coverage_sum / num_genes
meta_dict_for_bin[mod]["average_detection_per_sample"][s] = detection_sum / num_genes
def estimate_for_list_of_splits(self, metabolism_dict_for_list_of_splits, bin_name=None):
"""This is the atomic metabolism estimator function, which builds up the metabolism completeness dictionary for an arbitrary list of splits.
For example, the list of splits may represent a bin, a single isolate genome, or an entire metagenome.
The function takes in a metabolism completeness dictionary already initialized with the relevant KOfam hits per module, and updates it
with the individual steps and completion estimates for each module.
PARAMETERS
==========
metabolism_dict_for_list_of_splits : dictionary of dictionaries
the metabolism completeness dictionary of dictionaries for this list of splits. It contains
one dictionary of module steps and completion information for each module (keyed by module number),
as well as one key num_complete_modules that tracks the number of complete modules found in these splits.
Calling functions should assign this dictionary to a metabolism superdict with the bin name as a key.
bin_name : str
the name of the bin/genome/metagenome that we are working with
"""
metabolism_dict_for_list_of_splits["num_complete_modules"] = 0
complete_mods = []
mods_def_by_modules = [] # a list of modules that have module numbers in their definitions
# modules to warn about
mods_with_unassociated_ko = [] # a list of modules that have "--" steps without an associated KO
mods_with_nonessential_steps = [] # a list of modules that have nonessential steps like "-K11024"
# estimate completeness of each module
for mod in metabolism_dict_for_list_of_splits.keys():
if mod == "num_complete_modules":
continue
mod_is_complete, has_nonessential_step, has_no_ko_step, defined_by_modules \
= self.compute_module_completeness_for_bin(mod, metabolism_dict_for_list_of_splits)
if mod_is_complete:
complete_mods.append(mod)
if has_nonessential_step:
mods_with_nonessential_steps.append(mod)
if has_no_ko_step:
mods_with_unassociated_ko.append(mod)
if defined_by_modules:
mods_def_by_modules.append(mod)
if self.add_coverage:
self.add_module_coverage(mod, metabolism_dict_for_list_of_splits)
# go back and adjust completeness of modules that are defined by other modules
if mods_def_by_modules:
for mod in mods_def_by_modules:
mod_is_complete = self.adjust_module_completeness_for_bin(mod, metabolism_dict_for_list_of_splits)
if mod_is_complete:
complete_mods.append(mod)
# estimate redundancy of each module
for mod in metabolism_dict_for_list_of_splits.keys():
if mod == "num_complete_modules":
continue
self.compute_module_redundancy_for_bin(mod, metabolism_dict_for_list_of_splits)
# notify user of the modules that gave some fishy results -- but only for genome mode because it's too wordy otherwise
if not self.quiet and self.genome_mode:
if mods_with_nonessential_steps:
self.run.warning("Please note that anvi'o found one or more non-essential steps in the following KEGG modules: %s. "
"At this time, we are not counting these steps in our percent completion estimates."
% (", ".join(mods_with_nonessential_steps)))
if mods_with_unassociated_ko:
self.run.warning("Just so you know, while estimating the completeness of some KEGG modules, anvi'o saw "
"'--' in the module DEFINITION. This indicates a step in the pathway that has no "
"associated KO. So we really cannot know just based on KOfam hits whether or not this "
"step is present. By default, anvi'o marks these steps incomplete. But they may not be, "
"and as a result their modules may be falsely considered incomplete. So it may be in your "
"interest to go back and take a look at these individual modules to see if you can find the "
"missing enzyme in some other way. Best of luck to you. Here is the list of modules to check out: %s"
% (", ".join(mods_with_unassociated_ko)))
if anvio.DEBUG or self.genome_mode:
self.run.info("Bin name", bin_name)
self.run.info("Module completion threshold", self.module_completion_threshold)
self.run.info("Number of complete modules", metabolism_dict_for_list_of_splits["num_complete_modules"])
if complete_mods:
self.run.info("Complete modules", ", ".join(complete_mods))
return metabolism_dict_for_list_of_splits
######### REDUNDANCY FUNCTIONS (UNUSED) #########
def compute_naive_redundancy_for_path(self, num_ko_hits_in_path_dict):
"""This function computes a naive redundancy measure for a module path, given the number of hits per KO in the path.
naive redundancy = # extra hits / len(path) where a hit is "extra" if it is not the first hit to the KO.
"""
extra_hits = [num_ko_hits_in_path_dict[ko] - 1 if num_ko_hits_in_path_dict[ko] > 1 else 0 for ko in num_ko_hits_in_path_dict]
return sum(extra_hits)/len(num_ko_hits_in_path_dict.keys())
def compute_copywise_redundancy_for_path(self, num_ko_hits_in_path_dict, aggregation_measure="average"):
"""This function computes redundancy based on the completeness of each extra copy of a path.
The 'base' redundancy score is determined by the number of extra copies with 100% completeness.
The completeness measurements of all other extra copies are aggregated (using the aggregation_measure) and
added to this 'base' redundancy to get the overall path redundancy.
"""
accepted_aggregation_measures = ["average", "median", "weighted_sum", "geometric_mean"]
extra_hits = [num_ko_hits_in_path_dict[ko] - 1 if num_ko_hits_in_path_dict[ko] > 1 else 0 for ko in num_ko_hits_in_path_dict]
base_redundancy = min(extra_hits) # number of extra copies of path that are 100% complete
extra_copy_completeness = []
# here we get the completeness of every extra copy of the path
for i in range((base_redundancy+1), max(extra_hits) + 1):
num_present_kos_in_copy = len([num_hits for num_hits in extra_hits if num_hits >= i])
extra_copy_completeness.append(num_present_kos_in_copy/len(num_ko_hits_in_path_dict.keys()))
aggregated_completeness = None
if not extra_copy_completeness: # this handles the case when ALL extra copies are 100% complete
aggregated_completeness = 0
else:
if aggregation_measure == "average":
aggregated_completeness = statistics.mean(extra_copy_completeness)
elif aggregation_measure == "median":
aggregated_completeness = statistics.median(extra_copy_completeness)
elif aggregation_measure == "weighted_sum":
aggregated_completeness = 0
for c in range(len(extra_copy_completeness)):
aggregated_completeness += 1/(c+1) * extra_copy_completeness[c]
elif aggregation_measure == "geometric_mean":
aggregated_completeness = stats.gmean(extra_copy_completeness)
else:
raise ConfigError("The function compute_copywise_redundancy_for_path() doesn't know how to handle the aggregation measure '%s'. "
"Accepted aggregation measures include: %s " % (aggregation_measure, ", ".join(accepted_aggregation_measures)))
return (base_redundancy + aggregated_completeness), extra_copy_completeness
def compute_entropy_weighted_redundancy_for_bin(self, num_ko_hits_in_path_dict):
"""This function computes naive redundancy but weights it by the entropy of the hit distribution."""
extra_hits = [num_ko_hits_in_path_dict[ko] - 1 if num_ko_hits_in_path_dict[ko] > 1 else 0 for ko in num_ko_hits_in_path_dict]
total_extra_hits = sum(extra_hits)
num_kos = len(num_ko_hits_in_path_dict.keys())
naive_redundancy = total_extra_hits/num_kos
if all(e == 0 for e in extra_hits):
return 0.0
entropy = stats.entropy(extra_hits)
max_entropy_distribution = [total_extra_hits // num_kos] * num_kos
for i in range(total_extra_hits % num_kos):
max_entropy_distribution[i] += 1
max_entropy = stats.entropy(max_entropy_distribution)
# avoid divide by 0
max_entropy += 1e-20
return naive_redundancy * entropy/max_entropy
def compute_module_redundancy_for_bin(self, mnum, meta_dict_for_bin):
"""This function calculates the redundancy of the specified module within the given bin metabolism dictionary.
Each module can have multiple paths, but we only compute redundancy on the paths with the highest completeness
(stored under the "most_complete_paths" key). If there are no paths in this list (which only happens when there
are 0 KOfam hits to the module), then we do not compute redundancy.
PARAMETERS
==========
mnum : string
module number to work on
meta_dict_for_bin : dictionary of dictionaries
metabolism completeness dict for the current bin, to be modified in-place
"""
meta_dict_for_bin[mnum]["naive_redundancy"] = []
meta_dict_for_bin[mnum]["copywise_average"] = []
meta_dict_for_bin[mnum]["copywise_completeness_distributions"] = []
meta_dict_for_bin[mnum]["copywise_median"] = []
meta_dict_for_bin[mnum]["copywise_weighted-sum"] = []
meta_dict_for_bin[mnum]["entropy_weighted"] = []
paths_of_highest_completeness = meta_dict_for_bin[mnum]["most_complete_paths"]
if not paths_of_highest_completeness:
# put zero values in dict wherever necessary
return
for p in paths_of_highest_completeness:
kofam_hits_in_path = { ko : meta_dict_for_bin[mnum]["kofam_hits"][ko] for ko in meta_dict_for_bin[mnum]["kofam_hits"].keys() if ko in p }
num_hits_per_kofam = { ko : len(kofam_hits_in_path[ko]) for ko in kofam_hits_in_path.keys() }
for ko in p:
if ko not in num_hits_per_kofam:
num_hits_per_kofam[ko] = 0
# for now, we will try a bunch of different redundancy calculations and put them all into the dictionary until we find the ones we like
meta_dict_for_bin[mnum]["naive_redundancy"].append(self.compute_naive_redundancy_for_path(num_hits_per_kofam))
cw_avg_redundancy, copy_completeness_distribution = self.compute_copywise_redundancy_for_path(num_hits_per_kofam, aggregation_measure="average")
meta_dict_for_bin[mnum]["copywise_average"].append(cw_avg_redundancy)
meta_dict_for_bin[mnum]["copywise_completeness_distributions"].append(copy_completeness_distribution)
cw_med_redundancy, copy_completeness_distribution = self.compute_copywise_redundancy_for_path(num_hits_per_kofam, aggregation_measure="median")
meta_dict_for_bin[mnum]["copywise_median"].append(cw_med_redundancy)
cw_ws_redundancy, copy_completeness_distribution = self.compute_copywise_redundancy_for_path(num_hits_per_kofam, aggregation_measure="weighted_sum")
meta_dict_for_bin[mnum]["copywise_weighted-sum"].append(cw_ws_redundancy)
cw_gm_redundancy, copy_completeness_distribution = self.compute_copywise_redundancy_for_path(num_hits_per_kofam, aggregation_measure="geometric_mean")
meta_dict_for_bin[mnum]["copywise_weighted-sum"].append(cw_gm_redundancy)
meta_dict_for_bin[mnum]["entropy_weighted"].append(self.compute_entropy_weighted_redundancy_for_bin(num_hits_per_kofam))
return
######### ESTIMATION DRIVER FUNCTIONS #########
def estimate_for_genome(self, kofam_gene_split_contig):
"""This is the metabolism estimation function for a contigs DB that contains a single genome.
Assuming this contigs DB contains only one genome, it sends all of the splits and their kofam hits to the atomic
estimation function for processing. It then returns the metabolism and ko completion dictionaries for the genome, wrapped in the superdict format.
PARAMETERS
==========
kofam_gene_split_contig : list
(ko_num, gene_call_id, split, contig) tuples, one per KOfam hit in the splits we are considering
RETURNS
=======
genome_metabolism_dict : dictionary of dictionary of dictionaries
dictionary mapping genome name to its metabolism completeness dictionary
genome_ko_superdict : dictionary of dictionary of dictionaries
maps genome name to its KOfam hit dictionary
"""
genome_metabolism_superdict = {}
genome_ko_superdict = {}
# since all hits belong to one genome, we can take the UNIQUE splits from all the hits
splits_in_genome = list(set([tpl[2] for tpl in kofam_gene_split_contig]))
metabolism_dict_for_genome, ko_dict_for_genome = self.mark_kos_present_for_list_of_splits(kofam_gene_split_contig, split_list=splits_in_genome,
bin_name=self.contigs_db_project_name)
if not self.store_json_without_estimation:
genome_metabolism_superdict[self.contigs_db_project_name] = self.estimate_for_list_of_splits(metabolism_dict_for_genome, bin_name=self.contigs_db_project_name)
genome_ko_superdict[self.contigs_db_project_name] = ko_dict_for_genome
else:
genome_metabolism_superdict[self.contigs_db_project_name] = metabolism_dict_for_genome
genome_ko_superdict[self.contigs_db_project_name] = ko_dict_for_genome
# append to file
self.append_kegg_metabolism_superdicts(genome_metabolism_superdict, genome_ko_superdict)
return genome_metabolism_superdict, genome_ko_superdict
def estimate_for_bins_in_collection(self, kofam_gene_split_contig):
"""
This function calls metabolism estimation for every bin the user requests.
PARAMETERS
==========
kofam_gene_split_contig : list
(ko_num, gene_call_id, split, contig) tuples, one per KOfam hit in the splits we are considering
RETURNS
=======
bins_metabolism_superdict : dictionary of dictionary of dictionaries
dictionary mapping bin name to its metabolism completeness dictionary
bins_ko_superdict : dictionary of dictionary of dictionaries
dictionary mapping bin name to its KOfam hits dictionary
"""
bins_metabolism_superdict = {}
bins_ko_superdict = {}
bin_name_to_split_names_dict = ccollections.GetSplitNamesInBins(self.args).get_dict()
num_bins = len(bin_name_to_split_names_dict)
self.run.info_single("%s split names associated with %s bins in collection '%s' have been "
"successfully recovered 🎊" % (pp(sum([len(v) for v in bin_name_to_split_names_dict.values()])),
pp(num_bins),
self.collection_name), nl_before=1, nl_after=1)
self.progress.new("Estimating metabolism for each bin", progress_total_items=num_bins)
for bin_name in bin_name_to_split_names_dict:
self.progress.update("[%d of %d] %s" % (self.progress.progress_current_item + 1, num_bins, bin_name))
splits_in_bin = bin_name_to_split_names_dict[bin_name]
ko_in_bin = [tpl for tpl in kofam_gene_split_contig if tpl[2] in splits_in_bin]
metabolism_dict_for_bin, ko_dict_for_bin = self.mark_kos_present_for_list_of_splits(ko_in_bin, split_list=splits_in_bin, bin_name=bin_name)
if not self.store_json_without_estimation:
bins_metabolism_superdict[bin_name] = self.estimate_for_list_of_splits(metabolism_dict_for_bin, bin_name=bin_name)
single_bin_module_superdict = {bin_name: bins_metabolism_superdict[bin_name]}
bins_ko_superdict[bin_name] = ko_dict_for_bin
else:
bins_metabolism_superdict[bin_name] = metabolism_dict_for_bin
bins_ko_superdict[bin_name] = ko_dict_for_bin
single_bin_module_superdict = {bin_name: metabolism_dict_for_bin}
# append individual bin to file
single_bin_ko_superdict = {bin_name: ko_dict_for_bin}
self.append_kegg_metabolism_superdicts(single_bin_module_superdict, single_bin_ko_superdict)
self.progress.increment()
self.progress.reset()
self.progress.end()
return bins_metabolism_superdict, bins_ko_superdict
def estimate_for_contigs_db_for_metagenome(self, kofam_gene_split_contig):
"""This function handles metabolism estimation for an entire metagenome.
We treat each contig in the metagenome to be its own 'bin' or 'genome' and estimate
metabolism separately for each one.
PARAMETERS
==========
kofam_gene_split_contig : list
(ko_num, gene_call_id, split, contig) tuples, one per KOfam hit in the splits we are considering
RETURNS
=======
metagenome_metabolism_superdict : dictionary of dictionary of dictionaries
dictionary mapping metagenome name to its metabolism completeness dictionary
metagenome_ko_superdict : dictionary of dictionary of dictionaries
dictionary mapping metagenome name to its KOfam hits dictionary
"""
metagenome_metabolism_superdict = {}
metagenome_ko_superdict = {}
contigs_in_metagenome = list(set([tpl[3] for tpl in kofam_gene_split_contig]))
num_contigs = len(contigs_in_metagenome)
self.progress.new("Estimating metabolism for each contig in metagenome", progress_total_items=num_contigs)
for contig in contigs_in_metagenome:
self.progress.update("[%d of %d] %s" % (self.progress.progress_current_item + 1, num_contigs, contig))
# get unique split names associated with this contig
splits_in_contig = list(set([tpl[2] for tpl in kofam_gene_split_contig if tpl[3] == contig]))
if anvio.DEBUG:
self.run.info_single(f"{len(splits_in_contig)} splits recovered from contig {contig} ✌")
ko_in_contig = [tpl for tpl in kofam_gene_split_contig if tpl[2] in splits_in_contig]
metabolism_dict_for_contig, ko_dict_for_contig = self.mark_kos_present_for_list_of_splits(ko_in_contig, split_list=splits_in_contig, bin_name=contig)
if not self.store_json_without_estimation:
metagenome_metabolism_superdict[contig] = self.estimate_for_list_of_splits(metabolism_dict_for_contig, bin_name=contig)
single_contig_module_superdict = {contig: metagenome_metabolism_superdict[contig]}
metagenome_ko_superdict[contig] = ko_dict_for_contig
else:
metagenome_metabolism_superdict[contig] = metabolism_dict_for_contig
metagenome_ko_superdict[contig] = ko_dict_for_contig
single_contig_module_superdict = {contig: metabolism_dict_for_contig}
# append individual contig to file
single_contig_ko_superdict = {contig: ko_dict_for_contig}
self.append_kegg_metabolism_superdicts(single_contig_module_superdict, single_contig_ko_superdict)
self.progress.increment()
self.progress.reset()
self.progress.end()
return metagenome_metabolism_superdict, metagenome_ko_superdict
def estimate_metabolism_from_json_data(self):
"""This function runs the estimation functions on data obtained from a provided JSON file.
Does NOT currently produce KO hits output.
"""
self.run.info("JSON input file", self.estimate_from_json)
filesnpaths.is_file_json_formatted(self.estimate_from_json)
kegg_metabolism_superdict = json.load(open(self.estimate_from_json), parse_int=int)
new_kegg_metabolism_superdict = {}
expected_keys_for_module = {"gene_caller_ids", "kofam_hits", "genes_to_contigs", "contigs_to_genes"}
bins_found = []
additional_keys = set([])
self.init_data_from_modules_db()
self.init_paths_for_modules()
for bin_name, meta_dict_for_bin in kegg_metabolism_superdict.items():
bins_found.append(bin_name)
for mod, mod_dict in meta_dict_for_bin.items():
if mod == "num_complete_modules":
self.run.warning("Your JSON file appears to have been generated from data that already contains metabolic module completeness information. "
"We say this because the key 'num_complete_modules' was found. This isn't a problem; however you should know that anvi'o "
"won't take any of the existing estimation information into account. The only module-level keys that will be used from this file "
"are: %s" % (expected_keys_for_module))
continue
# verify that dict contains the necessary keys for estimation
if not expected_keys_for_module.issubset(set(mod_dict.keys())):
missing_keys = expected_keys_for_module.difference(set(mod_dict.keys()))
raise ConfigError("Your JSON file is incorrectly formatted for metabolism estimation. We expect the following keys: %s. "
"However, we didn't find some of them for module %s in %s. Here are the missing keys: %s"
% (expected_keys_for_module, mod, bin_name, missing_keys))
additional_keys = additional_keys.union(set(mod_dict.keys()).difference(expected_keys_for_module))
# convert gene_caller_ids and contigs_to_genes lists to sets
mod_dict['gene_caller_ids'] = set(mod_dict['gene_caller_ids'])
for contig, gene_list in mod_dict['contigs_to_genes'].items():
mod_dict['contigs_to_genes'][contig] = set(gene_list)
mod_dict['genes_to_contigs'] = {int(g):c for g,c in mod_dict['genes_to_contigs'].items()}
new_kegg_metabolism_superdict[bin_name] = self.estimate_for_list_of_splits(meta_dict_for_bin, bin_name=bin_name)
single_bin_module_superdict = {bin_name: new_kegg_metabolism_superdict[bin_name]}
self.append_kegg_metabolism_superdicts(single_bin_module_superdict, ko_superdict_for_list_of_splits={})
if not self.quiet and additional_keys:
self.run.warning("Just to let you know, we found the following module-level keys in your JSON file that were totally ignored during metabolism estimation "
"(no harm was done by including them): %s" % (additional_keys))
self.run.info("Bins/genomes/metagenomes found", ", ".join(bins_found))
return new_kegg_metabolism_superdict
def estimate_metabolism(self, skip_storing_data=False, output_files_dictionary=None, return_superdicts=False,
return_subset_for_matrix_format=False, all_modules_in_db=None, all_kos_in_db=None):
"""This is the driver function for estimating metabolism for a single contigs DB.
It will decide what to do based on whether the input contigs DB is a genome or metagenome.
It usually avoids returning the metabolism data to save on memory (as this data is typically appended to
files immediately), but this behavior can be changed by setting return_superdicts to True (for the entire
modules/ko superdictionaries) or return_subset_for_matrix_format to True (for a subset of these dicts that
multi-estimators need for matrix output generation).
PARAMETERS
==========
skip_storing_data : boolean
set to True if we don't want the metabolism data dictionary to be stored as a file (useful when using this function
for on-the-fly visualization or for generating matrix format output from a multi estimator class)
output_files_dictionary : dictionary of mode, AppendableFile object pairs
contains an initialized AppendableFile object to append output to for each output mode
(used in multi-mode to direct all output from several estimators to the same files)
return_superdicts : boolean
set to True if you want the kegg_metabolism_superdict and kofam_hits_superdict to be returned.
we don't return these by default to save on memory
return_subset_for_matrix_format : boolean
set to True if you want subsets of the superdicts to be returned: one subdict for module completeness scores, one
subdict for module presence/absence, and one subdict for KO hits. Used for matrix format output.
all_modules_in_db : dictionary
if this function is called from the KeggMetabolismEstimatorMulti class, this parameter contains the module information
loaded from the MODULES.db in init_data_from_modules_db(). Otherwise, it is None and this function will have to call
init_data_from_modules_db()
all_kos_in_db : dictionary
This is the same deal as the all_modules_in_db param - it should only have a value if passed from the
KeggMetabolismEstimatorMulti class
RETURNS
=======
kegg_metabolism_superdict : dictionary of dictionaries of dictionaries
a complex data structure containing the metabolism estimation data for each genome/bin in the contigs DB
(only returned if return_superdicts is True)
kofam_hits_superdict : dictionary of dictionaries of dictionaries
a complex data structure containing the KOfam hits information for each genome/bin in the contigs DB
(only returned if return_superdicts is True)
"""
kegg_metabolism_superdict = {}
kofam_hits_superdict = {}
self.kegg_modules_db = KeggModulesDatabase(self.kegg_modules_db_path, args=self.args, run=run_quiet, quiet=self.quiet)
if skip_storing_data or self.write_dict_to_json:
self.output_file_dict = {}
else:
if output_files_dictionary:
self.output_file_dict = output_files_dictionary
else:
self.output_file_dict = self.setup_output_for_appending()
if self.estimate_from_json:
kegg_metabolism_superdict = self.estimate_metabolism_from_json_data()
else:
# we either get the modules DB info from the previous class, or we have to initialize it here
if all_modules_in_db:
self.all_modules_in_db = all_modules_in_db
self.all_kos_in_db = all_kos_in_db
else:
self.init_data_from_modules_db()
kofam_hits_info = self.init_hits_and_splits()
self.init_paths_for_modules()
if self.add_coverage:
self.init_gene_coverage(gcids_for_kofam_hits={int(tpl[1]) for tpl in kofam_hits_info})
if self.profile_db_path and self.collection_name and not self.metagenome_mode:
kegg_metabolism_superdict, kofam_hits_superdict = self.estimate_for_bins_in_collection(kofam_hits_info)
elif not self.collection_name and not self.metagenome_mode:
self.genome_mode = True
kegg_metabolism_superdict, kofam_hits_superdict = self.estimate_for_genome(kofam_hits_info)
elif self.metagenome_mode:
kegg_metabolism_superdict, kofam_hits_superdict = self.estimate_for_contigs_db_for_metagenome(kofam_hits_info)
else:
raise ConfigError("This class doesn't know how to deal with that yet :/")
if self.write_dict_to_json:
self.store_metabolism_superdict_as_json(kegg_metabolism_superdict, self.json_output_file_path + ".json")
self.kegg_modules_db.disconnect()
if not self.multi_mode:
for mode, file_object in self.output_file_dict.items():
file_object.close()
# at this point, if we are generating long-format output, the data has already been appended to files
# so we needn't keep it in memory. We don't return it, unless the programmer wants us to.
if return_superdicts:
return kegg_metabolism_superdict, kofam_hits_superdict
# on the other hand, if we are generating matrix output, we need a limited subset of this data downstream
# so in this case, we can extract and return smaller dictionaries for module completeness, module presence/absence,
# and KO hits.
elif return_subset_for_matrix_format:
return self.generate_subsets_for_matrix_format(kegg_metabolism_superdict, kofam_hits_superdict)
# otherwise we return nothing at all
return
######### OUTPUT DICTIONARY FUNCTIONS #########
def generate_output_dict_for_modules(self, kegg_superdict, headers_to_include=None, only_complete_modules=False, exclude_zero_completeness=True):
"""This dictionary converts the metabolism superdict to a two-level dict containing desired headers for output.
The metabolism superdict is a three-to-four-level dictionary. The first three levels are: genomes/metagenomes/bins, modules, and module completion information.
The module completion dictionary also has some dictionaries in it, and those make up the fourth level.
The structure of the module completion dictionary is like this example:
{mnum: {"gene_caller_ids": set([132, 133, 431, 6777])
"kofam_hits": {'K00033' : [431, 6777],
'K01057' : [133],
'K00036' : [132] },
"genes_to_contigs": {132: 0,
133: 0,
431: 2,
6777: 1 },
"contigs_to_genes": { 0: set([132, 133]),
1: set(6777),
2: set(431) },}
"pathway_completeness": [0.66, 0.66, ...]
"present_nonessential_kos": []
"most_complete_paths": [['K00033','K01057','K02222'], ['K00033','K01057','K00036'], ...]
"percent_complete": 0.66
"complete": False
}
To distill this information into one line, we need to convert the dictionary on-the-fly to a dict of dicts,
where each bin-module-path-kofam_hit-gene_caller_id is keyed by an arbitrary integer. There will be a lot of redundant information
in the rows.
PARAMETERS
==========
kegg_superdict : dictionary of dictionaries of dictionaries
The metabolism superdict containing KO hit and KEGG module information for each bin/genome/metagenome
headers_to_include : list
Which headers to include in the output dictionary
only_complete_modules : boolean
If True, we only put information into the output dictionary for modules whose completeness is above the threshold
exclude_zero_completeness : boolean
If True, we don't put modules with a 0 completeness score in the dictionary
RETURNS
=======
d : dictionary of dictionaries
The output dictionary whose format is compatible for printing to a tab-delimited file
"""
if not headers_to_include:
headers_to_include = set(OUTPUT_MODES['modules']['headers'])
else:
headers_to_include = set(headers_to_include)
# make sure all requested headers are available
avail_headers = set(self.available_headers.keys())
illegal_headers = headers_to_include.difference(avail_headers)
if illegal_headers:
raise ConfigError("Some unavailable headers were requested. These include: %s" % (", ".join(illegal_headers)))
module_level_headers = set(["module_name", "module_class", "module_category", "module_subcategory", "module_definition",
"module_substrates", "module_products", "module_intermediates"])
path_and_ko_level_headers = set(["path_id", "path", "path_completeness", "kofam_hit", "gene_caller_id", "contig"])
keys_not_in_superdict = set([h for h in self.available_headers.keys() if self.available_headers[h]['cdict_key'] is None])
remaining_headers = headers_to_include.difference(keys_not_in_superdict)
remaining_headers = remaining_headers.difference(module_level_headers)
remaining_headers = remaining_headers.difference(path_and_ko_level_headers)
# convert to two-level dict where unique id keys for a dictionary of information for each bin/module pair
d = {}
if not self.modules_unique_id:
self.modules_unique_id = 0
"""
### FIXME ###
The unique_id problematic to deal with when we are generating multiple output files because this self.modules_unique_id
variable is shared between all output files. Since we append to each output file in turn, this means that within an
output file the unique_id column will have 'jumps' in value (such that a row's unique_id is not always 1 greater than the
previous row's unique_id). This does not really cause problems and is not an urgent fix, per se, but it looks weird.
### FIXME ###
"""
for bin, mod_dict in kegg_superdict.items():
for mnum, c_dict in mod_dict.items():
if mnum == "num_complete_modules":
continue
if anvio.DEBUG:
self.run.info("Generating output for module", mnum)
if only_complete_modules and not c_dict["complete"]:
continue
if exclude_zero_completeness and c_dict["percent_complete"] == 0:
continue
# fetch module info from db
metadata_dict = self.get_module_metadata_dictionary(mnum)
definition_list = self.all_modules_in_db[mnum]["DEFINITION"]
if not isinstance(definition_list, list):
definition_list = [definition_list]
module_def = '"' + " ".join(definition_list) + '"'
module_substrate_list, module_intermediate_list, module_product_list = self.kegg_modules_db.get_human_readable_compound_lists_for_module(mnum)
# handle path- and ko-level information
if headers_to_include.intersection(path_and_ko_level_headers):
for p_index in range(len(self.module_paths_dict[mnum])):
p = self.module_paths_dict[mnum][p_index]
# handle ko-level information
for ko in c_dict['kofam_hits']:
# some paths include protein complexes, so we must look for KO within these protein complexes as well
kos_in_path = set([])
for ko_or_complex in p:
split_kos = re.split('\+|\-', ko_or_complex)
kos_in_path.update(split_kos)
if ko not in kos_in_path:
continue
for gc_id in c_dict["kofam_hits"][ko]:
d[self.modules_unique_id] = {}
# kofam hit specific info
if "kofam_hit" in headers_to_include:
d[self.modules_unique_id]["kofam_hit"] = ko
if "gene_caller_id" in headers_to_include:
d[self.modules_unique_id]["gene_caller_id"] = gc_id
if "contig" in headers_to_include:
d[self.modules_unique_id]["contig"] = c_dict["genes_to_contigs"][gc_id]
# add gene coverage if requested
if self.add_coverage:
for s in self.coverage_sample_list:
sample_cov_header = s + "_coverage"
d[self.modules_unique_id][sample_cov_header] = c_dict["genes_to_coverage"][s][gc_id]
sample_det_header = s + "_detection"
d[self.modules_unique_id][sample_det_header] = c_dict["genes_to_detection"][s][gc_id]
# repeated information for each hit
# path specific info
if "path_id" in headers_to_include:
d[self.modules_unique_id]["path_id"] = p_index
if "path" in headers_to_include:
d[self.modules_unique_id]["path"] = ",".join(p)
if "path_completeness" in headers_to_include:
d[self.modules_unique_id]["path_completeness"] = c_dict["pathway_completeness"][p_index]
# top-level keys and keys not in superdict
if self.name_header in headers_to_include:
d[self.modules_unique_id][self.name_header] = bin
if "db_name" in headers_to_include:
d[self.modules_unique_id]["db_name"] = self.database_name
if "kegg_module" in headers_to_include:
d[self.modules_unique_id]["kegg_module"] = mnum
# module specific info
if "module_name" in headers_to_include:
d[self.modules_unique_id]["module_name"] = metadata_dict["module_name"]
if "module_class" in headers_to_include:
d[self.modules_unique_id]["module_class"] = metadata_dict["module_class"]
if "module_category" in headers_to_include:
d[self.modules_unique_id]["module_category"] = metadata_dict["module_category"]
if "module_subcategory" in headers_to_include:
d[self.modules_unique_id]["module_subcategory"] = metadata_dict["module_subcategory"]
if "module_definition" in headers_to_include:
d[self.modules_unique_id]["module_definition"] = module_def
if "module_substrates" in headers_to_include:
if module_substrate_list:
d[self.modules_unique_id]["module_substrates"] = ",".join(module_substrate_list)
else:
d[self.modules_unique_id]["module_substrates"] = "None"
if "module_products" in headers_to_include:
if module_product_list:
d[self.modules_unique_id]["module_products"] = ",".join(module_product_list)
else:
d[self.modules_unique_id]["module_products"] = "None"
if "module_intermediates" in headers_to_include:
if module_intermediate_list:
d[self.modules_unique_id]["module_intermediates"] = ",".join(module_intermediate_list)
else:
d[self.modules_unique_id]["module_intermediates"] = "None"
# comma-separated lists of KOs and gene calls in module
kos_in_mod = sorted(c_dict['kofam_hits'].keys())
# gene call list should be in same order as KO list
gcids_in_mod = []
kos_in_mod_list = []
if kos_in_mod:
for ko in kos_in_mod:
gcids_in_mod += [str(x) for x in c_dict["kofam_hits"][ko]]
kos_in_mod_list += [ko for x in c_dict["kofam_hits"][ko]]
if "kofam_hits_in_module" in headers_to_include:
d[self.modules_unique_id]["kofam_hits_in_module"] = ",".join(kos_in_mod_list)
if "gene_caller_ids_in_module" in headers_to_include:
d[self.modules_unique_id]["gene_caller_ids_in_module"] = ",".join(gcids_in_mod)
# everything else at c_dict level
for h in remaining_headers:
if h not in self.available_headers.keys():
raise ConfigError("Requested header %s not available." % (h))
h_cdict_key = self.available_headers[h]['cdict_key']
if not h_cdict_key:
raise ConfigError("We don't know the corresponding key in metabolism completeness dict for header %s." % (h))
value = c_dict[h_cdict_key]
if isinstance(value, list):
if not value:
value = "None"
else:
value = ",".join(value)
d[self.modules_unique_id][h] = value
self.modules_unique_id += 1
else:
d[self.modules_unique_id] = {}
# top-level keys and keys not in superdict
if self.name_header in headers_to_include:
d[self.modules_unique_id][self.name_header] = bin
if "db_name" in headers_to_include:
d[self.modules_unique_id]["db_name"] = self.database_name
if "kegg_module" in headers_to_include:
d[self.modules_unique_id]["kegg_module"] = mnum
# module specific info
if "module_name" in headers_to_include:
d[self.modules_unique_id]["module_name"] = metadata_dict["module_name"]
if "module_class" in headers_to_include:
d[self.modules_unique_id]["module_class"] = metadata_dict["module_class"]
if "module_category" in headers_to_include:
d[self.modules_unique_id]["module_category"] = metadata_dict["module_category"]
if "module_subcategory" in headers_to_include:
d[self.modules_unique_id]["module_subcategory"] = metadata_dict["module_subcategory"]
if "module_definition" in headers_to_include:
d[self.modules_unique_id]["module_definition"] = module_def
if "module_substrates" in headers_to_include:
if module_substrate_list:
d[self.modules_unique_id]["module_substrates"] = ",".join(module_substrate_list)
else:
d[self.modules_unique_id]["module_substrates"] = "None"
if "module_products" in headers_to_include:
if module_product_list:
d[self.modules_unique_id]["module_products"] = ",".join(module_product_list)
else:
d[self.modules_unique_id]["module_products"] = "None"
if "module_intermediates" in headers_to_include:
if module_intermediate_list:
d[self.modules_unique_id]["module_intermediates"] = ",".join(module_intermediate_list)
else:
d[self.modules_unique_id]["module_intermediates"] = "None"
# comma-separated lists of KOs and gene calls in module
kos_in_mod = sorted(c_dict['kofam_hits'].keys())
# gene call list should be in same order as KO list
gcids_in_mod = []
kos_in_mod_list = []
if kos_in_mod:
for ko in kos_in_mod:
gcids_in_mod += [str(x) for x in c_dict["kofam_hits"][ko]]
kos_in_mod_list += [ko for x in c_dict["kofam_hits"][ko]]
if "kofam_hits_in_module" in headers_to_include:
d[self.modules_unique_id]["kofam_hits_in_module"] = ",".join(kos_in_mod_list)
if "gene_caller_ids_in_module" in headers_to_include:
d[self.modules_unique_id]["gene_caller_ids_in_module"] = ",".join(gcids_in_mod)
# add coverage if requested
if self.add_coverage:
for s in self.coverage_sample_list:
sample_cov_header = s + "_gene_coverages"
sample_det_header = s + "_gene_detection"
sample_avg_cov_header = s + "_avg_coverage"
sample_avg_det_header = s + "_avg_detection"
gene_coverages_in_mod = []
gene_detection_in_mod = []
for gc in gcids_in_mod:
gene_coverages_in_mod.append(c_dict["genes_to_coverage"][s][int(gc)])
gene_detection_in_mod.append(c_dict["genes_to_detection"][s][int(gc)])
d[self.modules_unique_id][sample_cov_header] = ",".join([str(c) for c in gene_coverages_in_mod])
d[self.modules_unique_id][sample_det_header] = ",".join([str(d) for d in gene_detection_in_mod])
d[self.modules_unique_id][sample_avg_cov_header] = c_dict["average_coverage_per_sample"][s]
d[self.modules_unique_id][sample_avg_det_header] = c_dict["average_detection_per_sample"][s]
# everything else at c_dict level
for h in remaining_headers:
if h not in self.available_headers.keys():
raise ConfigError("Requested header %s not available." % (h))
h_cdict_key = self.available_headers[h]['cdict_key']
if not h_cdict_key:
raise ConfigError("We don't know the corresponding key in metabolism completeness dict for header %s." % (h))
value = c_dict[h_cdict_key]
if isinstance(value, list):
if not value:
value = "None"
else:
value = ",".join(value)
d[self.modules_unique_id][h] = value
self.modules_unique_id += 1
return d
def generate_output_dict_for_kofams(self, ko_superdict, headers_to_include=None):
"""This dictionary converts the kofam hits superdict to a two-level dict containing desired headers for output.
The kofam hits superdict is a three-to-four-level dictionary. The first three levels are: genomes/metagenomes/bins, KOs, and KO hit information.
The KO hit dictionary also has some dictionaries in it, and those make up the fourth level.
The structure of the KO hit dictionary is like this example:
{ko: {"gene_caller_ids" : set([431, 6777]),
"modules" : ["M00001", "M00555"], **Can be None if KO does not belong to any KEGG modules
"genes_to_contigs": { 431: 2,
6777: 1 },
"contigs_to_genes": { 1: set(6777),
2: set(431) }}}
To distill this information into one line, we need to convert the dictionary on-the-fly to a dict of dicts,
where each bin-ko-gene_caller_id is keyed by an arbitrary integer.
PARAMETERS
==========
ko_superdict : dictionary of dictionaries of dictionaries
The metabolism superdict containing all KO hits in each bin/genome/metagenome
headers_to_include : list
Which headers to include in the output dictionary
RETURNS
=======
d : dictionary of dictionaries
The output dictionary whose format is compatible for printing to a tab-delimited file
"""
# use the kofam_hits output mode header set by default
if not headers_to_include:
headers_to_include = set(OUTPUT_MODES["kofam_hits"]["headers"])
else:
headers_to_include = set(headers_to_include)
d = {}
if not self.ko_unique_id:
self.ko_unique_id = 0
"""
### FIXME ###
See note in previous function about the weirdness of the unique_id with multiple output files.
### FIXME ###
"""
for bin, ko_dict in ko_superdict.items():
for ko, k_dict in ko_dict.items():
if anvio.DEBUG:
self.run.info("Generating output for KO", ko)
metadata_dict = self.get_ko_metadata_dictionary(ko)
for gc_id in k_dict["gene_caller_ids"]:
d[self.ko_unique_id] = {}
if self.name_header in headers_to_include:
d[self.ko_unique_id][self.name_header] = bin
if "db_name" in headers_to_include:
d[self.ko_unique_id]["db_name"] = self.database_name
if "ko" in headers_to_include:
d[self.ko_unique_id]["ko"] = ko
if "gene_caller_id" in headers_to_include:
d[self.ko_unique_id]["gene_caller_id"] = gc_id
if "contig" in headers_to_include:
d[self.ko_unique_id]["contig"] = k_dict["genes_to_contigs"][gc_id]
if "modules_with_ko" in headers_to_include:
d[self.ko_unique_id]["modules_with_ko"] = metadata_dict["modules_with_ko"]
if "ko_definition" in headers_to_include:
d[self.ko_unique_id]["ko_definition"] = metadata_dict["ko_definition"]
if self.add_coverage:
if not self.profile_db:
raise ConfigError("We're sorry that you came all this way, but it seems the profile database has "
"not been initialized, therefore we cannot add coverage values to your output. "
"This is likely a bug or developer mistake. It is a sad day :(")
for s in self.coverage_sample_list:
sample_cov_header = s + "_coverage"
d[self.ko_unique_id][sample_cov_header] = self.profile_db.gene_level_coverage_stats_dict[gc_id][s]['mean_coverage']
sample_det_header = s + "_detection"
d[self.ko_unique_id][sample_det_header] = self.profile_db.gene_level_coverage_stats_dict[gc_id][s]['detection']
self.ko_unique_id += 1
return d
def generate_subsets_for_matrix_format(self, module_superdict, ko_hits_superdict):
"""Here we extract and return three subsets of data from the superdicts, for matrix formatted output.
The subsets of data that we need are: module completeness scores, module presence/absence, and KO hit frequency.
Each of these is put into a dictionary (one for modules, one for ko hits) and returned.
"""
mod_completeness_presence_subdict = {}
ko_hits_subdict = {}
for bin, mod_dict in module_superdict.items():
mod_completeness_presence_subdict[bin] = {}
for mnum, c_dict in mod_dict.items():
if mnum == "num_complete_modules":
continue
mod_completeness_presence_subdict[bin][mnum] = {}
mod_completeness_presence_subdict[bin][mnum]['percent_complete'] = c_dict['percent_complete']
mod_completeness_presence_subdict[bin][mnum]['complete'] = c_dict['complete']
for bin, ko_dict in ko_hits_superdict.items():
ko_hits_subdict[bin] = {}
for knum, k_dict in ko_dict.items():
ko_hits_subdict[bin][knum] = {}
ko_hits_subdict[bin][knum]['num_hits'] = len(k_dict['gene_caller_ids']) # number of hits to this KO in the bin
return mod_completeness_presence_subdict, ko_hits_subdict
######### OUTPUT GENERATION FUNCTIONS #########
def append_kegg_metabolism_superdicts(self, module_superdict_for_list_of_splits, ko_superdict_for_list_of_splits):
"""This function appends the metabolism superdicts (for a single genome, bin, or contig in metagenome) to existing files
for each output mode.
It appends to the initialized AppendableFile objects in self.output_file_dict.
This is an alternative to store_kegg_metabolism_superdicts(), which prints the entire metabolism superdicts for all
genomes/bins/contigs in metagenome at once.
"""
for mode, file_obj in self.output_file_dict.items():
header_list = self.available_modes[mode]["headers"]
if not header_list:
raise ConfigError("Oh, dear. You've come all this way only to realize that we don't know which headers to use "
"for the %s output mode. Something is terribly wrong, and it is probably a developer's fault. :("
% (mode))
if self.available_modes[mode]["data_dict"] == 'modules':
output_dict = self.generate_output_dict_for_modules(module_superdict_for_list_of_splits, headers_to_include=header_list, \
only_complete_modules=self.only_complete, \
exclude_zero_completeness=self.exclude_zero_modules)
elif self.available_modes[mode]["data_dict"] == 'kofams':
output_dict = self.generate_output_dict_for_kofams(ko_superdict_for_list_of_splits, headers_to_include=header_list)
else:
raise ConfigError(f"Uh oh. You've requested to generate output from the {self.available_modes[mode]['data_dict']} "
"data dictionary, but we don't know about that one.")
file_obj.append(output_dict, key_header="unique_id", headers=header_list)
if anvio.DEBUG:
self.run.warning(f"Appended metabolism dictionary to {file_obj.path}" ,
header='DEBUG OUTPUT', lc='yellow')
def store_kegg_metabolism_superdicts(self, module_superdict, ko_superdict):
"""This function writes the metabolism superdicts (in their entirety) to tab-delimited files depending
on which output the user requested.
The user can request a variety of output 'modes', and for each of these modes we look up the details on the output
format which are stored in self.available_modes, use that information to generate a dictionary of dictionaries,
and store that dictionary as a tab-delimited file.
This is an alternative to append_kegg_metabolism_superdicts(), which adds to the output files
one genome/bin/contig in metagenome at a time for better memory management.
"""
for mode in self.output_modes:
output_path = self.output_file_prefix + "_" + self.available_modes[mode]["output_suffix"]
header_list = self.available_modes[mode]["headers"]
if not header_list:
raise ConfigError("Oh, dear. You've come all this way only to realize that we don't know which headers to use "
"for the %s output mode. Something is terribly wrong, and it is probably a developer's fault. :("
% (mode))
if self.available_modes[mode]["data_dict"] == 'modules':
output_dict = self.generate_output_dict_for_modules(module_superdict, headers_to_include=header_list, \
only_complete_modules=self.only_complete, \
exclude_zero_completeness=self.exclude_zero_modules)
elif self.available_modes[mode]["data_dict"] == 'kofams':
output_dict = self.generate_output_dict_for_kofams(ko_superdict, headers_to_include=header_list)
else:
raise ConfigError(f"Uh oh. You've requested to generate output from the {self.available_modes[mode]['data_dict']} "
"data dictionary, but we don't know about that one.")
utils.store_dict_as_TAB_delimited_file(output_dict, output_path, key_header="unique_id", headers=header_list)
self.run.info("%s output file" % mode, output_path, nl_before=1)
def store_metabolism_superdict_as_json(self, kegg_superdict, file_path):
"""This function writes the metabolism superdict into one json file."""
def set_to_list(obj):
if isinstance(obj, set):
return list(obj)
filesnpaths.is_output_file_writable(file_path)
open(file_path, 'w').write(json.dumps(kegg_superdict, indent=4, default=set_to_list))
self.run.info("JSON Output", file_path)
######### INTERACTIVE VISUALIZATION FUNCTIONS #########
def get_metabolism_data_for_visualization(self):
"""Returns a dictionary of metabolism data for visualization on the interactive interface.
This function should be called from the interactive interface class to obtain metabolism data.
It runs the metabolism estimation function on-the-fly to generate the data.
It then pulls only certain keys from the resulting dictionary and returns them to the interface.
"""
# add keys to this list to include the data in the visualization dictionary
module_data_keys_for_visualization = ['percent_complete']
metabolism_dict, ko_hit_dict = self.estimate_metabolism(skip_storing_data=True, return_superdicts=True)
data_for_visualization = {}
for bin, mod_dict in metabolism_dict.items():
data_for_visualization[bin] = {}
for mnum, c_dict in mod_dict.items():
if mnum == "num_complete_modules":
continue
data_for_visualization[bin][mnum] = {}
for key, value in c_dict.items():
if key in module_data_keys_for_visualization:
data_for_visualization[bin][mnum][key] = value
return data_for_visualization
class KeggMetabolismEstimatorMulti(KeggContext, KeggEstimatorArgs):
"""Class for reconstructing/estimating metabolism for multiple contigs DBs at a time.
Iterates through the provided DBs and estimates metabolism for each one using the KeggMetabolismEstimator class.
==========
args: Namespace object
All the arguments supplied by user to anvi-estimate-metabolism
"""
def __init__(self, args, run=run, progress=progress):
self.args = args
self.run = run
self.progress = progress
if args.contigs_db:
raise ConfigError("You appear to have provided both an input text file and a contigs database, and "
"now anvi'o is not quite sure which one to work on. Please choose only one. :) ")
# init the base class for access to shared paths and such
KeggContext.__init__(self, args)
KeggEstimatorArgs.__init__(self, self.args)
if anvio.DEBUG:
self.run.info("Completeness threshold: multi estimator", self.module_completion_threshold)
self.run.info("Output Modes", ", ".join(self.output_modes))
self.run.info("Matrix format", self.matrix_format)
self.run.info("Matrix will include metadata", self.matrix_include_metadata)
if self.module_specific_matrices:
self.run.info("Matrices for specific modules: ", ", ".join(self.module_specific_matrices))
self.databases = None
# input sanity checks
if (self.external_genomes_file and (self.internal_genomes_file or self.metagenomes_file)) \
or (self.internal_genomes_file and (self.external_genomes_file or self.metagenomes_file)) \
or (self.metagenomes_file and (self.external_genomes_file or self.internal_genomes_file)):
raise ConfigError("Multiple file inputs were provided. Please choose only one at a time to make "
"things easier on everybody.")
if args.estimate_from_json or args.store_json_without_estimation or args.get_raw_data_as_json:
raise ConfigError("You've provided some JSON parameters. We are sorry to say that these parameters don't "
"work for input files with multiple contigs DBs. :( ")
# output sanity checks
if self.matrix_format and args.kegg_output_modes:
raise ConfigError("Please request EITHER long-format output modes OR matrix format. When you ask for both "
"like this, anvi'o is confused. :) ")
if self.matrix_include_metadata and not self.matrix_format:
raise ConfigError("The option --include-metadata is only available when you also use the flag --matrix-format "
"to get matrix output. :) Plz try again.")
if self.module_specific_matrices and not self.matrix_format:
raise ConfigError("The option --module-specific-matrices is only available when you also use the flag --matrix-format "
"to get matrix output. :) Plz try again.")
if self.matrix_format:
for stat in ['completeness', 'presence', 'ko_hits']:
matrix_output_file = '%s-%s-MATRIX.txt' % (self.output_file_prefix, stat)
if filesnpaths.is_file_exists(matrix_output_file, dont_raise=True):
raise ConfigError(f"Uh oh... there is already matrix output (such as {matrix_output_file}) "
"using this file prefix in the current directory. Please either remove these files "
"or give us a different prefix (with -O).")
# set name header
if self.metagenomes_file:
self.name_header = 'contig_name'
elif self.external_genomes_file:
self.name_header = 'genome_name'
elif self.internal_genomes_file:
self.name_header = 'bin_name'
if not self.quiet:
self.run.warning("Anvi'o will reconstruct metabolism for modules in the KEGG MODULE database, as described in "
"Kanehisa and Goto et al (doi:10.1093/nar/gkr988). When you publish your findings, "
"please do not forget to properly credit this work.", lc='green', header="CITATION")
def list_output_modes(self):
"""This function prints out the available output modes for the metabolism estimation script."""
run.warning(None, header="AVAILABLE OUTPUT MODES", lc="green")
for mode, mode_meta in self.available_modes.items():
self.run.info(mode, mode_meta['description'])
def update_available_headers_for_multi(self):
"""This function updates the available headers dictionary to reflect all possibilities in the multiple DB case."""
self.available_headers["db_name"] = {
'cdict_key': None,
'mode_type': 'all',
'description': "Name of contigs DB. Always included in multi-mode output (so no need to specify on the command line)"
}
if self.name_header == 'genome_name':
self.available_headers["genome_name"] = {
'cdict_key': None,
'mode_type': 'all',
'description': "Name of genome in which we find KOfam hits and/or KEGG modules"
}
elif self.name_header == 'bin_name':
self.available_headers["bin_name"] = {
'cdict_key': None,
'mode_type': 'all',
'description': "Name of bin in which we find KOfam hits and/or KEGG modules"
}
elif self.name_header == 'contig_name':
self.available_headers["contig_name"] = {
'cdict_key': None,
'mode_type': 'all',
'description': "Name of contig (in a metagenome) in which we find KOfam hits and/or KEGG modules"
}
# here we make sure db_name is always included in the multi-mode output
for mode in self.available_modes:
if self.available_modes[mode]["headers"] and "db_name" not in self.available_modes[mode]["headers"]:
self.available_modes[mode]["headers"].insert(1, "db_name")
def list_output_headers(self):
"""This function prints out the available output headers for the 'custom' output mode"""
# include all possibilities for genome/bin/metagenome name in the output since we don't know which cases
# we will find in the metagenomes file
self.update_available_headers_for_multi()
run.warning(None, header="AVAILABLE OUTPUT HEADERS", lc="green")
for header, header_meta in self.available_headers.items():
desc_str = header_meta['description']
type_str = header_meta['mode_type']
mode_str = "output modes" if header_meta['mode_type'] == 'all' else "output mode"
self.run.info(header, f"{desc_str} [{type_str} {mode_str}]")
######### DRIVER ESTIMATION FUNCTIONS -- MULTI #########
def init_metagenomes(self):
"""This function parses the input metagenomes file and adjusts class attributes as needed"""
g = MetagenomeDescriptions(self.args, run=self.run, progress=self.progress, enforce_single_profiles=False)
g.load_metagenome_descriptions()
# enforce metagenome mode
if not self.metagenome_mode:
self.metagenome_mode = True
self.databases = copy.deepcopy(g.metagenomes)
self.database_names = copy.deepcopy(g.metagenome_names)
def init_external_internal_genomes(self):
"""This function parses the input internal/external genomes file and adjusts class attributes as needed"""
g = GenomeDescriptions(self.args, run=self.run, progress=progress_quiet)
g.load_genomes_descriptions(skip_functions=True, init=False)
bad_genomes = [v['name'] for v in g.genomes.values() if not v['gene_function_sources'] or 'KOfam' not in v['gene_function_sources']]
if len(bad_genomes):
bad_genomes_txt = [f"'{bad_genome}'" for bad_genome in bad_genomes]
raise ConfigError(f"Bad news :/ It seems {len(bad_genomes)} of your {P('genome', len(g.genomes))} "
f"{P('are', len(bad_genomes), alt='is')} lacking any function annotations for "
f"`KOfam`. This means you either need to run the program `anvi-run-kegg-kofams` "
f"on them, or remove them from your internal and/or external genomes files "
f"before re-running `anvi-estimate-metabolism. Here is the list of offenders: "
f"{', '.join(bad_genomes_txt)}.")
# metagenome mode must be off
if self.metagenome_mode:
self.metagenome_mode = False
self.databases = copy.deepcopy(g.genomes)
if self.external_genomes_file:
self.database_names = copy.deepcopy(g.external_genome_names)
else:
self.database_names = copy.deepcopy(g.internal_genome_names)
def get_args_for_single_estimator(self, db_name):
"""Returns args formatted for an instance of KeggMetabolismEstimator that will work on a contigs DB. Very tricksy.
PARAMETERS
==========
db_name : string
the name of the contigs DB that the estimator will work on
RETURNS
=======
args : Namespace object
a set of arguments modified for use by a single estimator
"""
args = KeggEstimatorArgs(self.args, format_args_for_single_estimator=True)
if db_name not in self.databases:
raise ConfigError("We cannot initialize a single estimator for the contigs DB '%s' because it is not in the metagenomes dictionary."
% (db_name))
args.contigs_db = self.databases[db_name]['contigs_db_path']
if 'profile_db_path' in self.databases[db_name]:
args.profile_db = self.databases[db_name]['profile_db_path']
if 'collection_id' in self.databases[db_name]:
args.collection_name = self.databases[db_name]['collection_id']
if 'bin_id' in self.databases[db_name]:
args.bin_id = self.databases[db_name]['bin_id']
args.metagenome_mode = self.metagenome_mode
args.quiet = True
args.database_name = db_name
args.multi_mode = True
args.include_metadata = self.matrix_include_metadata
self.update_available_headers_for_multi()
if anvio.DEBUG:
self.run.info("Completeness threshold: single estimator", args.module_completion_threshold)
self.run.info("Database name: single estimator", args.database_name)
self.run.info("Matrix metadata: single estimator", args.matrix_include_metadata)
return args
def get_metabolism_superdict_multi(self):
"""The function that calls metabolism on each individual contigs db.
If we need matrix format output, it aggregates the results into one dictionary for modules
and one for KOs, and returns these. (Otherwise, empty dictionaries are returned.)
"""
metabolism_super_dict = {}
ko_hits_super_dict = {}
total_num_metagenomes = len(self.database_names)
self.progress.new("Estimating metabolism for contigs DBs", progress_total_items=total_num_metagenomes)
if not self.matrix_format:
files_dict = self.setup_output_for_appending()
for metagenome_name in self.database_names:
args = self.get_args_for_single_estimator(metagenome_name)
self.progress.update("[%d of %d] %s" % (self.progress.progress_current_item + 1, total_num_metagenomes, metagenome_name))
if not self.matrix_format:
KeggMetabolismEstimator(args, progress=progress_quiet, run=run_quiet).estimate_metabolism(output_files_dictionary=files_dict)
else:
metabolism_super_dict[metagenome_name], ko_hits_super_dict[metagenome_name] = KeggMetabolismEstimator(args, \
progress=progress_quiet, \
run=run_quiet).estimate_metabolism(skip_storing_data=True, \
return_subset_for_matrix_format=True, \
all_modules_in_db=self.all_modules_in_db, \
all_kos_in_db=self.all_kos_in_db)
self.progress.increment()
self.progress.reset()
self.progress.end()
if not self.matrix_format:
for mode, file in files_dict.items():
file.close()
return metabolism_super_dict, ko_hits_super_dict
def estimate_metabolism(self):
"""A driver function to run metabolism estimation on each provided contigs DB."""
if not self.databases:
self.progress.new("Initializing contigs DBs")
self.progress.update("...")
if self.metagenomes_file:
self.progress.reset()
self.run.info("Metagenomes file", self.metagenomes_file)
self.init_metagenomes()
elif self.external_genomes_file:
self.progress.reset()
self.run.info("External genomes file", self.external_genomes_file)
self.init_external_internal_genomes()
elif self.internal_genomes_file:
self.progress.reset()
self.run.info("Internal genomes file", self.internal_genomes_file)
self.init_external_internal_genomes()
else:
self.progress.reset()
raise ConfigError("Whooops. We are not sure how you got to this point without an input file, "
"but you did, and now we have to crash becasue we cannot estimate metabolism "
"without inputs. :/")
self.progress.end()
self.run.info("Num Contigs DBs in file", len(self.database_names))
self.run.info('Metagenome Mode', self.metagenome_mode)
self.kegg_modules_db = KeggModulesDatabase(self.kegg_modules_db_path, args=self.args)
self.init_data_from_modules_db()
# these will be empty dictionaries unless matrix format
kegg_metabolism_superdict_multi, ko_hits_superdict_multi = self.get_metabolism_superdict_multi()
if self.matrix_format:
self.store_metabolism_superdict_multi_matrix_format(kegg_metabolism_superdict_multi, ko_hits_superdict_multi)
self.kegg_modules_db.disconnect()
######### OUTPUT GENERATION FUNCTIONS -- MULTI #########
def write_stat_to_matrix(self, stat_name, stat_header, stat_key, stat_dict, item_list, stat_metadata_headers,
write_rows_with_all_zeros=False, comment_dictionary=None):
"""A generic function to write a statistic to a matrix file.
Accesses the provided stat_dict and writes the statistic to a tab-delimited matrix file.
Should work for module completeness, module presence/absence, and ko hits.
PARAMETERS
==========
stat_name : str
Which statistic we are working on, ie "completeness". Used in output file name.
stat_header : str
The header for the items reporting this statistic in the matrix output, ie 'module' or 'ko'.
stat_key : str
The key used to access this statistic in the stat_dict
stat_dict : dictionary
A multi-level dictionary (a subset of metabolism estimation output) in which the statistic and
relevant metadata can be found.
item_list : list of str
The row (item) names of the matrix. Ideally would be sorted for consistency in the output.
stat_metadata_headers : list of str
A list of the headers for metadata columns (which must also be keys for this metadata in the stat_dict)
that will be included in the matrix output if self.matrix_include_metadata is True
write_rows_with_all_zeros : boolean
If true, rows with all zeros are included in the matrix. Otherwise we leave those out.
comment_dictionary : dictionary
A dictionary in which the item is a (str) comment and the key is the INDEX of the corresponding item (from item_list)
that this comment should be printed before. When we reach this item in the list, the comment str will be
printed (after a '#' character) before printing the item's line. Trailing "\n" should not be in the comment
str but if this needs to be a multi-line comment internal "\n# " strings should separate each line.
"""
output_file_path = '%s-%s-MATRIX.txt' % (self.output_file_prefix, stat_name)
sample_list = list(stat_dict.keys())
# here we figure out if there is more than one bin to work with in any given sample
sample_columns = []
sample_bin_list = {}
for s in sample_list:
bins = list(stat_dict[s].keys())
bins.sort()
sample_bin_list[s] = bins
if len(bins) > 1:
for b in bins:
sample_columns.append(s + "_" + b)
else:
sample_columns.append(s)
if self.matrix_include_metadata:
cols = [stat_header] + stat_metadata_headers + sample_columns
else:
cols = [stat_header] + sample_columns
# we could be fancier with this, but we are not that cool
with open(output_file_path, 'w') as output:
output.write('\t'.join(cols) + '\n')
cur_index = 0
for m in item_list:
# write comment, if necessary
if comment_dictionary and cur_index in comment_dictionary:
comment_line = "# " + comment_dictionary[cur_index] + "\n"
output.write(comment_line)
line = [m]
if self.matrix_include_metadata:
if stat_header == 'module':
metadata_dict = self.get_module_metadata_dictionary(m)
elif stat_header == 'KO':
metadata_dict = self.get_ko_metadata_dictionary(m)
else:
raise ConfigError(f"write_stat_to_matrix() speaking. I need to access metadata for {stat_header} "
"statistics but there is no function defined for this.")
for h in stat_metadata_headers:
if h not in metadata_dict.keys():
raise ConfigError(f"We couldn't find the key '{h}' in the metadata dictionary for {stat_header}s. "
"Please check that your metadata accessor function obtains this data.")
line.append(metadata_dict[h])
for s in sample_list:
bins = sample_bin_list[s]
for b in bins:
# if its not in the dict, we know it is zero
if m not in stat_dict[s][b].keys():
value = 0
else:
value = stat_dict[s][b][m][stat_key]
# handle presence/absence values as integers
if isinstance(value, bool):
line.append(int(value))
else:
line.append(value)
if not write_rows_with_all_zeros:
only_numbers = [n for n in line if isinstance(n, (int, float))]
if sum(only_numbers) == 0:
continue
output.write('\t'.join([str(f) for f in line]) + '\n')
cur_index += 1
self.run.info('Output matrix for "%s"' % stat_name, output_file_path)
def store_metabolism_superdict_multi_matrix_format(self, module_superdict_multi, ko_superdict_multi):
"""Stores the multi-contigs DB metabolism data in several matrices.
Contigs DBs are arranged in columns and KEGG modules/KOs are arranged in rows.
Each module statistic (ie, completeness, presence/absence) will be in a different file.
The parameters to this function are superdictionaries where each top-level entry is one
of the multi-mode sample inputs (ie a metagenome, internal, or external genome) and its
corresponding value comes from running estimate_metabolism() with return_subset_for_matrix_format=True.
That is:
module % completeness = module_superdict_multi[sample][bin][mnum]['percent_complete']
module is complete = module_superdict_multi[sample][bin][mnum]['complete']
# hits for KO = ko_superdict_multi[sample][bin][knum]['num_hits']
If self.matrix_include_metadata was True, these superdicts will also include relevant metadata.
"""
include_zeros = not self.exclude_zero_modules
# module stats that each will be put in separate matrix file
# key is the stat, value is the corresponding header in superdict
module_matrix_stats = {"completeness" : "percent_complete", "presence" : "complete"}
# all samples/bins have the same modules in the dict so we can pull the item list from the first pair
first_sample = list(module_superdict_multi.keys())[0]
first_bin = list(module_superdict_multi[first_sample].keys())[0]
module_list = list(module_superdict_multi[first_sample][first_bin].keys())
module_list.sort()
for stat, key in module_matrix_stats.items():
self.write_stat_to_matrix(stat_name=stat, stat_header='module', stat_key=key, stat_dict=module_superdict_multi, \
item_list=module_list, stat_metadata_headers=MODULE_METADATA_HEADERS, \
write_rows_with_all_zeros=include_zeros)
# now we make a KO hit count matrix
self.setup_ko_dict()
ko_list = list(self.ko_dict.keys())
ko_list.sort()
self.write_stat_to_matrix(stat_name='ko_hits', stat_header='KO', stat_key='num_hits', stat_dict=ko_superdict_multi, \
item_list=ko_list, stat_metadata_headers=KO_METADATA_HEADERS, \
write_rows_with_all_zeros=include_zeros)
# if necessary, make module specific KO matrices
if self.module_specific_matrices:
skipped_mods = []
for mod in self.module_specific_matrices:
if mod not in module_list:
skipped_mods.append(mod)
continue
kos_in_mod = self.kegg_modules_db.get_kos_from_module_definition(mod)
mod_big_steps = self.kegg_modules_db.get_top_level_steps_in_module_definition(mod)
if not self.no_comments:
# determine where to place comments containing module steps
step_comments = {}
lines_with_comment = []
for s in mod_big_steps:
# what is the first KO in this step?
first_k = s.find("K")
# we skip making comments on steps without KOs like '--'
if first_k < 0:
continue
# figure out where this KO is in the list
first_ko = s[first_k:first_k+6]
first_ko_indices = [i for i, x in enumerate(kos_in_mod) if x == first_ko]
if not first_ko_indices:
raise ConfigError(f"Something went wrong while writing a comment for step '{s}' in the "
f"matrix for {mod}. We couldn't find the first KO, {first_ko}, in the "
"KO list for this module.")
# where should we put the step comment?
idx = first_ko_indices[0]
if len(first_ko_indices) > 1:
next_index = 0
while idx in lines_with_comment:
next_index += 1
idx = first_ko_indices[next_index]
step_comments[idx] = s
lines_with_comment.append(idx)
else:
step_comments = None
stat = f"{mod}_ko_hits"
self.write_stat_to_matrix(stat_name=stat, stat_header="KO", stat_key='num_hits', stat_dict=ko_superdict_multi, \
item_list=kos_in_mod, stat_metadata_headers=KO_METADATA_HEADERS, \
write_rows_with_all_zeros=True, comment_dictionary=step_comments)
if skipped_mods:
skipped_list = ", ".join(skipped_mods)
self.run.warning(f"We couldn't recognize the following module(s): {skipped_list}. So we didn't generate "
"output matrices for them. Maybe you made a typo? Or put an extra comma in somewhere?")
class KeggModulesDatabase(KeggContext):
"""To create or access a Modules DB.
This DB should be created in the Kegg Data folder during KEGG setup, and will be populated with information from the
Kegg Module files.
If you want to load an existing database from the python terminal, all you need is the path to the database and an
empty args object:
```
>>> import argparse
>>> from anvio import kegg
>>> path_to_db = "YOUR/PATH/HERE/MODULES.db"
>>> args = argparse.Namespace()
>>> kegg.KeggModulesDatabase(path_to_db, args)
```
"""
def __init__(self, db_path, args, module_dictionary=None, pathway_dictionary=None, run=run, progress=progress, quiet=False):
self.db = None
self.db_path = db_path
self.module_dict = module_dictionary
self.pathway_dict = pathway_dictionary
self.run = run
self.progress = progress
self.quiet = quiet
if anvio.DEBUG:
self.run.info("Modules DB quiet param", self.quiet)
# init the base class for access to shared paths and such
KeggContext.__init__(self, args)
# modules table info
self.module_table_name = t.module_table_name
self.module_table_structure = t.module_table_structure
self.module_table_types = t.module_table_types
# pathway maps table info
self.pathway_table_name = t.pathway_table_name
self.pathway_table_structure = t.pathway_table_structure
self.pathway_table_types = t.pathway_table_types
if os.path.exists(self.db_path):
utils.is_kegg_modules_db(self.db_path)
self.db = db.DB(self.db_path, anvio.__kegg_modules_version__, new_database=False)
if not self.quiet:
self.run.info('Modules database', 'An existing database, %s, has been loaded.' % self.db_path, quiet=self.quiet)
self.run.info('Kegg Modules', '%d found' % self.db.get_meta_value('num_modules'), quiet=self.quiet)
else:
# if self.module_dict is None, then we tried to initialize the DB outside of setup
if not self.module_dict:
raise ConfigError("ERROR - a new KeggModulesDatabase() cannot be initialized without providing a modules dictionary. This "
"usually happens when you try to access a Modules DB before one has been setup. Running `anvi-setup-kegg-kofams` may fix this.")
# This is commented out because we are not yet using pathways. But it should be uncommented when we get to the point of using them :)
# if not self.pathway_dict:
# raise ConfigError("ERROR - a new KeggModulesDatabase() cannot be initialized without providing a pathway dictionary. This "
# "usually happens when you try to access a Modules DB before one has been setup. Running `anvi-setup-kegg-kofams` may fix this.")
######### DB GENERATION FUNCTIONS #########
def touch(self):
"""Creates an empty Modules database on disk, and sets `self.db` to access to it.
At some point self.db.disconnect() must be called to complete the creation of the new db.
"""
# sanity check to avoid overriding previous Modules DB
# this will probably never happen as long as this function is called through the setup script, but we check just in case
if os.path.exists(self.db_path):
raise ConfigError("A modules database at %s already exists. Please use the --reset flag when you restart the setup "
"if you really want to get rid of this one and make a new one." % (self.db_path))
self.db = db.DB(self.db_path, anvio.__kegg_modules_version__, new_database=True)
self.db.create_table(self.module_table_name, self.module_table_structure, self.module_table_types)
def data_vals_sanity_check(self, data_vals, current_data_name, current_module_num):
"""This function checks if the data values were correctly parsed from a line in a KEGG module file.
This is a sadly necessary step because some KEGG module file lines are problematic and don't follow the right format (ie, 2+ spaces
between different fields). So here we check if the values that we parsed look like they are the right format, without any extra bits.
Each data name (ORTHOLOGY, DEFINITION, etc) has a different format to check for.
Note that we don't check the following data name types: NAME, CLASS, REFERENCE
WARNING: The error checking and correction is by no means perfect and may well fail when KEGG is next updated. :(
PARAMETERS
==========
data_vals : str
the data values field (split from the kegg module line)
current_data_name : str
which data name we are working on. It should never be None because we should have already figured this out by parsing the line.
current_module_num : str
which module we are working on. We need this to keep track of which modules throw parsing errors.
RETURNS
=======
is_ok : bool
whether the values look correctly formatted or not
"""
is_ok = True
is_corrected = False
corrected_vals = None
corrected_def = None
if not current_data_name:
raise ConfigError("data_vals_sanity_check() cannot be performed when the current data name is None. Something was not right "
"when parsing the KEGG module line.")
elif current_data_name == "ENTRY":
# example format: M00175
if data_vals[0] != 'M' or len(data_vals) != 6:
is_ok = False
self.parsing_error_dict['bad_kegg_code_format'].append(current_module_num)
elif current_data_name == "DEFINITION":
# example format: (K01647,K05942) (K01681,K01682) (K00031,K00030) (K00164+K00658+K00382,K00174+K00175-K00177-K00176)
# another example: (M00161,M00163) M00165
knums = [x for x in re.split('\(|\)|,| |\+|-',data_vals) if x]
for k in knums:
if k[0] not in ['K','M'] or len(k) != 6:
is_ok = False
if not is_ok: # this goes here to avoid counting multiple errors for the same line
self.parsing_error_dict['bad_kegg_code_format'].append(current_module_num)
elif current_data_name == "ORTHOLOGY":
# example format: K00234,K00235,K00236,K00237
# more complex example: (K00163,K00161+K00162)+K00627+K00382-K13997
# another example: (M00161 [ie, from (M00161 Photosystem II)]
knums = [x for x in re.split('\(|\)|,|\+|-', data_vals) if x]
for k in knums:
if k[0] not in ['K','M'] or len(k) != 6:
is_ok = False
# try to fix it by splitting on first space
if not is_ok:
self.parsing_error_dict['bad_line_splitting'].append(current_module_num)
split_data_vals = data_vals.split(" ", maxsplit=1)
corrected_vals = split_data_vals[0]
corrected_def = split_data_vals[1]
# double check that we don't have a knum in the new definition
if re.match("K\d{5}",corrected_def):
corrected_vals = "".join([corrected_vals,corrected_def])
corrected_def = None
is_corrected = True
elif current_data_name == "PATHWAY":
# example format: map00020
if data_vals[0:3] != "map" or len(data_vals) != 8:
is_ok = False
self.parsing_error_dict['bad_line_splitting'].append(current_module_num)
split_data_vals = data_vals.split(" ", maxsplit=1)
corrected_vals = split_data_vals[0]
corrected_def = split_data_vals[1]
is_corrected = True
elif current_data_name == "REACTION":
# example format: R01899+R00268,R00267,R00709
rnums = [x for x in re.split(',|\+', data_vals) if x]
for r in rnums:
if r[0] != 'R' or len(r) != 6:
is_ok = False
if not is_ok:
self.parsing_error_dict['bad_line_splitting'].append(current_module_num)
split_data_vals = data_vals.split(" ", maxsplit=1)
corrected_vals = split_data_vals[0]
corrected_def = split_data_vals[1]
is_corrected = True
elif current_data_name == "COMPOUND":
# example format: C00024
if data_vals[0] not in ['C','G'] or len(data_vals) != 6:
is_ok = False
self.parsing_error_dict['bad_kegg_code_format'].append(current_module_num)
elif current_data_name == "RMODULE":
# example format: RM003
if data_vals[0:2] != "RM" or len(data_vals) != 5:
is_ok = False
self.parsing_error_dict['bad_kegg_code_format'].append(current_module_num)
if not is_ok and not is_corrected:
self.num_uncorrected_errors += 1
if self.just_do_it:
self.progress.reset()
self.run.warning("While parsing, anvi'o found an uncorrectable issue with a KEGG Module line in module %s, but "
"since you used the --just-do-it flag, anvi'o will quietly ignore this issue and add the line "
"to the MODULES.db anyway. Please be warned that this may break things downstream. In case you "
"are interested, the line causing this issue has data name %s and data value %s."
% (current_module_num, current_data_name, data_vals))
is_ok = True # let's pretend that everything is alright so that the next function will take the original parsed values
else:
raise ConfigError("While parsing, anvi'o found an uncorrectable issue with a KEGG Module line in module %s. The "
"current data name is %s, here is the incorrectly-formatted data value field: %s. If you think "
"this is totally fine and want to ignore errors like this, please re-run the setup with the "
"--just-do-it flag. But if you choose to do that of course we are obliged to inform you that things "
"may eventually break as a result." % (current_module_num, current_data_name, data_vals))
if is_corrected:
self.num_corrected_errors += 1
if anvio.DEBUG and not self.quiet:
self.progress.reset()
self.run.warning("While parsing a KEGG Module line, we found an issue with the formatting. We did our very best to parse "
"the line correctly, but please check that it looks right to you by examining the following values.")
self.run.info("Incorrectly parsed data value field", data_vals)
self.run.info("Corrected data values", corrected_vals)
self.run.info("Corrected data definition", corrected_def)
return is_ok, corrected_vals, corrected_def
def parse_kegg_modules_line(self, line, current_module, line_num=None, current_data_name=None, error_dictionary=None):
"""This function parses information from one line of a KEGG module file.
These files have fields separated by 2 or more spaces. Fields can include data name (not always), data value (always), and data definition (not always).
Lines for pathway module files can have between 1 and 4 fields, but in fact the only situation where there should be 4 lines is the ENTRY data,
which for some inexplicable reason has multiple spaces between "Pathway" and "Module" in the data definition field. We can safely ignore this last "Module", I think.
Some lines will have multiple entities in the data_value field (ie, multiple KOs or reaction numbers) and will be split into multiple db entries.
PARAMETERS
==========
line : str
the line to parse
current_module : str
which module we are working on. We need this to keep track of which modules throw parsing errors
line_num : int
which line number we are working on. We need this to keep track of which entities come from the same line of the file.
current_data_name : str
which data name we are working on. If this is None, we need to parse this info from the first field in the line.
RETURNS
=======
line_entries : list
tuples, each containing information for one db entry, namely data name, data value, data definition, and line number.
Not all parts of the db entry will be included (module num, for instance), so this information must be parsed and combined with
the missing information before being added to the database.
"""
if anvio.DEBUG:
self.progress.reset()
self.run.info("[DEBUG] Parsing line", line, mc='red', lc='yellow')
fields = re.split('\s{2,}', line)
data_vals = None
data_def = None
line_entries = []
# when data name unknown, parse from first field
if not current_data_name:
# sanity check: if line starts with space then there is no data name field and we should have passed a current_data_name
if line[0] == ' ':
raise ConfigError("Oh, please. Some silly developer (you know who you are) has tried to call parse_kegg_modules_line() on "
"a line without a data name field, and forgot to give it the current data name. Shame on you, go fix "
"this. (For reference here is the line: %s)" % (line))
current_data_name = fields[0]
# note that if data name is known, first field still exists but is actually the empty string ''
# so no matter the situation, data value is field 1 (index 0) and data definition (if any) is field 2 (index 1)
# the only exception is that sometimes there is nothing in the data definition field (REFERENCE lines sometimes do this)
if len(fields) > 1:
data_vals = fields[1]
# need to sanity check data value field because SOME modules don't follow the 2-space separation formatting
vals_are_okay, corrected_vals, corrected_def = self.data_vals_sanity_check(data_vals, current_data_name, current_module)
if vals_are_okay and len(fields) > 2: # not all lines have a definition field
data_def = fields[2]
elif not vals_are_okay:
data_vals = corrected_vals
data_def = corrected_def
else: # only the data name was in the line
# these are the data types that we don't care if they have an empty line
data_types_can_be_empty = ['REFERENCE', 'AUTHORS', 'TITLE', 'JOURNAL']
if current_data_name in data_types_can_be_empty or self.just_do_it:
if anvio.DEBUG:
self.run.warning(f"While parsing module {current_module} we found an empty {current_data_name} line. "
"We think it is okay and it probably won't cause issues downstream.",
header="DEBUG OUTPUT", lc='yellow')
else:
raise ConfigError(f"While parsing module {current_module} we found an empty {current_data_name} line. "
"We are quitting here so you can check it, because this data type might be important. "
"However, if you disagree, you can re-run the setup with --just-do-it and we will quietly "
"incorporate this empty line into the MODULES.db (you may also need the --reset flag when you re-run). ")
# some types of information may need to be split into multiple db entries
data_types_to_split = ["ORTHOLOGY","REACTION"] # lines that fall under these categories need to have data_vals split on comma
if current_data_name in data_types_to_split:
# here we should NOT split on any commas within parentheses
vals = [x for x in re.split('\(|\)|,|\+|-', data_vals) if x]
for val in vals:
line_entries.append((current_data_name, val, data_def, line_num))
else:
line_entries.append((current_data_name, data_vals, data_def, line_num))
return line_entries
def create(self):
"""Creates the Modules DB"""
self.touch()
self.progress.new("Loading %s KEGG modules into Modules DB..." % len(self.module_dict.keys()))
# sanity check that we setup the modules previously.
# It shouldn't be a problem since this function should only be called during the setup process after modules download, but just in case.
if not os.path.exists(self.module_data_dir) or len(self.module_dict.keys()) == 0:
raise ConfigError("Appparently, the Kegg Modules were not correctly setup and now all sorts of things are broken. The "
"Modules DB cannot be created from broken things. BTW, this error is not supposed to happen to anyone "
"except maybe developers, so if you do not fall into that category you are likely in deep doo-doo. "
"Maybe re-running setup with --reset will work? (if not, you probably should email/Slack/telepathically "
"cry out for help to the developers). Anyway, if this helps make things any clearer, the number of modules "
"in the module dictionary is currently %s" % len(self.module_dict.keys()))
# init the Modules table
mod_table = KeggModulesTable(self.module_table_name)
# keep track of errors encountered while parsing
self.parsing_error_dict = {"bad_line_splitting" : [], "bad_kegg_code_format" : []}
self.num_corrected_errors = 0
self.num_uncorrected_errors = 0
num_modules_parsed = 0
line_number = 0
for mnum in self.module_dict.keys():
self.progress.update("Parsing KEGG Module %s" % mnum)
mod_file_path = os.path.join(self.module_data_dir, mnum)
f = open(mod_file_path, 'rU')
prev_data_name_field = None
for line in f.readlines():
line = line.strip('\n')
line_number += 1
# check for last line ///. We don't want to send the last line to the parsing function because it will break.
# we also check here that the line is not entirely blank (this happens sometimes in KEGG modules, inexplicably)
if not line == '///' and re.search(r"\S+", line):
# parse the line into a tuple
entries_tuple_list = None
# here is the tricky bit about parsing these files. Not all lines start with the data_name field; those that don't start with a space.
# if this is the case, we need to tell the parsing function what the previous data_name field has been.
if line[0] == ' ':
entries_tuple_list = self.parse_kegg_modules_line(line, mnum, line_number, prev_data_name_field)
else:
entries_tuple_list = self.parse_kegg_modules_line(line, mnum, line_number)
prev_data_name_field = entries_tuple_list[0][0]
for name, val, definition, line in entries_tuple_list:
# there is one situation in which we want to ignore the entry, and that is Modules appearing in the ORTHOLOGY category, like so:
# (M00531 Assimilatory nitrate reduction, nitrate => ammonia)
if not (name == "ORTHOLOGY" and val[0] == '('):
# append_and_store will collect db entries and store every 10000 at a time
mod_table.append_and_store(self.db, mnum, name, val, definition, line)
else:
line -= 1
f.close()
num_modules_parsed += 1
# once we are done parsing all modules, we store whatever db entries remain in the db_entries list
# this is necessary because append_and_store() above only stores every 10000 entries
self.progress.update("Storing final batch of module entries into DB")
mod_table.store(self.db)
self.progress.end()
# warn user about parsing errors
if anvio.DEBUG:
self.run.warning("Several parsing errors were encountered while building the KEGG Modules DB. "
"Below you will see which modules threw each type of parsing error. Note that modules which "
"threw multiple errors will occur in the list as many times as it threw each error.")
self.run.info("Bad line splitting (usually due to rogue or missing spaces)", self.parsing_error_dict["bad_line_splitting"])
self.run.info("Bad KEGG code format (not corrected; possibly problematic)", self.parsing_error_dict["bad_kegg_code_format"])
else: # less verbose
self.run.warning("First things first - don't panic. Several parsing errors were encountered while building the KEGG Modules DB. "
"But that is probably okay, because if you got to this point it is likely that we already fixed all of them "
"ourselves. So don't worry too much. Below you will see how many of each type of error was encountered. If "
"you would like to see which modules threw these errors, please re-run the setup using the `--debug` flag (you "
"will also probably need the `--reset` flag). When doing so, you will also see which lines caused issues; this "
"can be a lot of output, so you can suppress the line-specific output with the `--quiet` flag if that makes things "
"easier to read. So, in summary: You can probably ignore this warning. But if you want more info: run setup again "
"with `--reset --debug --quiet` to see exactly which modules had issues, or run with `--reset --debug` to see exactly "
"which lines in which modules had issues. Anvi'o developers thank you for your attention and patience 😇")
self.run.info("Bad line splitting (usually due to rogue or missing spaces)", len(self.parsing_error_dict["bad_line_splitting"]))
self.run.info("Bad KEGG code format (usually not correctable)", len(self.parsing_error_dict["bad_kegg_code_format"]))
# give some run info
self.run.info('Modules database', 'A new database, %s, has been created.' % (self.db_path), quiet=self.quiet)
self.run.info('Number of KEGG modules', num_modules_parsed, quiet=self.quiet)
self.run.info('Number of entries', mod_table.get_total_entries(), quiet=self.quiet)
self.run.info('Number of parsing errors (corrected)', self.num_corrected_errors, quiet=self.quiet)
self.run.info('Number of parsing errors (uncorrected)', self.num_uncorrected_errors, quiet=self.quiet)
# record some useful metadata
self.db.set_meta_value('db_type', 'modules')
self.db.set_meta_value('num_modules', num_modules_parsed)
self.db.set_meta_value('total_entries', mod_table.get_total_entries())
self.db.set_meta_value('creation_date', time.time())
self.db.set_meta_value('hash', self.get_db_content_hash())
self.db.set_meta_value('version', t.metabolic_modules_db_version)
self.db.disconnect()
def disconnect(self):
self.db.disconnect()
######### SELF TABLE ACCESS FUNCTIONS #########
def get_days_since_creation(self):
"""Returns the time (in days) since MODULES.db was created.
The time units are seconds, and there are 60*60*24 = 86400 seconds per day,
so we do the appropriate division to get the time in days.
"""
return round((time.time() - float(self.db.get_meta_value('creation_date'))) / 86400)
def get_db_content_hash(self):
"""Compute hash of all KOs and module numbers present in the db (used for tracking major changes to db content with future KEGG updates)"""
mods = self.get_all_modules_as_list()
mods.sort()
orths = self.get_all_knums_as_list()
orths.sort()
mods_and_orths = mods + orths
mods_and_orths = "".join(mods_and_orths)
return str(hashlib.sha224(mods_and_orths.encode('utf-8')).hexdigest())[0:12]
######### MODULES TABLE ACCESS FUNCTIONS #########
def get_modules_table_data_values_as_dict(self, data_names_of_interest=[]):
"""This function loads the modules table and returns it as a dictionary (of data values only) keyed by module.
PARAMETERS
==========
data_names_of_interest : list of str
the returned dictionary will contain only data names from this list. If the list is empty,
all data names are returned.
RETURNS
=======
module_dictionary : dict of dicts
data for each module in the modules table. Outer dictionary is keyed by module number and
inner dictionary is keyed by data name
"""
if data_names_of_interest:
data_names_list = [f"'{n}'" for n in data_names_of_interest]
where_clause_string = f"data_name in ({','.join(data_names_list)})"
# this WILL fail if you ask for a data name that doesn't exist, so know your data before you query
dict_from_mod_table = self.db.get_some_rows_from_table_as_dict(self.module_table_name, where_clause_string, row_num_as_key=True)
else:
dict_from_mod_table = self.db.get_table_as_dict(self.module_table_name, row_num_as_key=True)
# the returned dictionary is keyed by an arbitrary integer, and each value is a dict containing one row from the modules table
# ex of one row in this dict: 0: {'module': 'M00001', 'data_name': 'ENTRY', 'data_value': 'M00001', 'data_definition': 'Pathway', 'line': 1}
# now we convert this to a per-module dictionary
module_dictionary = {}
for entry in dict_from_mod_table:
mod = dict_from_mod_table[entry]['module']
data_name = dict_from_mod_table[entry]['data_name']
data_value = dict_from_mod_table[entry]['data_value']
if mod not in module_dictionary:
module_dictionary[mod] = {}
if data_name not in module_dictionary[mod]:
module_dictionary[mod][data_name] = data_value
else:
if isinstance(module_dictionary[mod][data_name], list):
module_dictionary[mod][data_name].append(data_value)
else:
# this is a data name that has multiple values, so we need to convert it to a list
existing_val = module_dictionary[mod][data_name]
module_dictionary[mod][data_name] = [existing_val, data_value]
return module_dictionary
def get_data_value_entries_for_module_by_data_name(self, module_num, data_name):
"""This function returns data_value elements from the modules table for the specified module and data_name pair.
All elements corresponding to the pair (ie, M00001 and ORTHOLOGY) will be returned.
The function relies on the db.get_some_rows_from_table_as_dict() function to first fetch all rows corresponding \
to a particular model, and then parses the resulting dictionary to find all the elements with the given data_name field.
PARAMETERS
==========
module_num : str
the module to fetch data for
data_name : str
which data_name field we want
RETURNS
=======
data_values_to_ret : list of str
the data_values corresponding to the module/data_name pair
"""
where_clause_string = "module = '%s'" % (module_num)
dict_from_mod_table = self.db.get_some_rows_from_table_as_dict(self.module_table_name, where_clause_string, row_num_as_key=True)
# the returned dictionary is keyed by an arbitrary integer, and each value is a dict containing one row from the modules table
# ex of one row in this dict: 0: {'module': 'M00001', 'data_name': 'ENTRY', 'data_value': 'M00001', 'data_definition': 'Pathway', 'line': 1}
data_values_to_ret = []
for key in dict_from_mod_table.keys():
if dict_from_mod_table[key]['data_name'] == data_name:
data_values_to_ret.append(dict_from_mod_table[key]['data_value'])
if not data_values_to_ret:
self.run.warning("Just so you know, we tried to fetch data from the KEGG Modules database for the data_name field %s "
"and KEGG module %s, but didn't come up with anything, so an empty list is being returned. This may "
"cause errors down the line, and if so we're very sorry for that.")
return data_values_to_ret
def get_data_definition_entries_for_module_by_data_name(self, module_num, data_name):
"""This function returns data_definition elements from the modules table for the specified module and data_name pair.
All elements corresponding to the pair (ie, M00001 and ORTHOLOGY) will be returned.
The function relies on the db.get_some_rows_from_table_as_dict() function to first fetch all rows corresponding \
to a particular model, and then parses the resulting dictionary to find all the elements with the given data_name field.
PARAMETERS
==========
module_num : str
the module to fetch data for
data_name : str
which data_name field we want
RETURNS
=======
data_defs_to_ret : list of str
the data_definitions corresponding to the module/data_name pair
"""
where_clause_string = "module = '%s'" % (module_num)
dict_from_mod_table = self.db.get_some_rows_from_table_as_dict(self.module_table_name, where_clause_string, row_num_as_key=True)
data_defs_to_ret = []
for key in dict_from_mod_table.keys():
if dict_from_mod_table[key]['data_name'] == data_name:
data_defs_to_ret.append(dict_from_mod_table[key]['data_definition'])
if not data_defs_to_ret and anvio.DEBUG:
self.run.warning("Just so you know, we tried to fetch data definitions from the KEGG Modules database for the data_name field %s "
"and KEGG module %s, but didn't come up with anything, so an empty list is being returned. This may "
"cause errors down the line, and if so we're very sorry for that.")
return data_defs_to_ret
def get_all_modules_as_list(self):
"""This function returns a list of all modules in the DB."""
return self.db.get_single_column_from_table(self.module_table_name, 'module', unique=True)
def get_all_knums_as_list(self):
"""This function returns a list of all KO numbers in the DB."""
where_clause_string = "data_name = 'ORTHOLOGY'"
return self.db.get_single_column_from_table(self.module_table_name, 'data_value', unique=True, where_clause=where_clause_string)
def get_modules_for_knum(self, knum):
"""This function returns a list of modules that the given KO belongs to."""
where_clause_string = "data_value = '%s'" % (knum)
return self.db.get_single_column_from_table(self.module_table_name, 'module', unique=True, where_clause=where_clause_string)
def get_module_classes_for_knum_as_dict(self, knum):
"""This function returns the classes for the modules that a given KO belongs to in a dictionary of dictionaries keyed by module number."""
mods = self.get_modules_for_knum(knum)
all_mods_classes_dict = {}
for mnum in mods:
all_mods_classes_dict[mnum] = self.get_kegg_module_class_dict(mnum)
return all_mods_classes_dict
def get_module_classes_for_knum_as_list(self, knum):
"""This function returns the classes for the modules that a given KO belongs to as a list of strings."""
mods = self.get_modules_for_knum(knum)
all_mods_classes_list = []
for mnum in mods:
mod_class = self.get_data_value_entries_for_module_by_data_name(mnum, "CLASS")[0]
all_mods_classes_list.append(mod_class)
return all_mods_classes_list
def get_module_name(self, mnum):
"""This function returns the name of the specified KEGG module."""
# there should only be one NAME per module, so we return the first list element
return self.get_data_value_entries_for_module_by_data_name(mnum, "NAME")[0]
def get_module_names_for_knum(self, knum):
"""This function returns all names of each KEGG module that the given KO belongs to in a dictionary keyed by module number."""
mods = self.get_modules_for_knum(knum)
module_names = {}
for mnum in mods:
module_names[mnum] = self.get_module_name(mnum)
return module_names
def parse_kegg_class_value(self, class_data_val):
"""This function takes a data_value string for the CLASS field in the modules table and parses it into a dictionary.
The data_value string of CLASS fields should look something like this: Pathway modules; Amino acid metabolism; Lysine metabolism
so they can be parsed into 3 parts: class, category, and subcategory.
"""
fields = class_data_val.split("; ")
class_dict = {"class" : fields[0], "category" : fields[1], "subcategory" : fields[2] if len(fields) > 2 else None}
return class_dict
def get_kegg_module_class_dict(self, mnum, class_value=None):
"""This function returns a dictionary of values in the CLASS field for a specific module
It really exists only for convenience to put together the data fetch and parsing functions.
PARAMETERS
==========
mnum : str
the module number
class_value : str
The 'CLASS' string for the module. This parameter is optional, and if it is not provided,
the 'CLASS' value will be queried from the modules DB.
"""
if not class_value:
# there should only be one CLASS line per module, so we extract the first list element
class_value = self.get_data_value_entries_for_module_by_data_name(mnum, "CLASS")[0]
return self.parse_kegg_class_value(class_value)
def get_kegg_module_definition(self, mnum):
"""This function returns module DEFINITION fields as one string"""
def_lines = self.get_data_value_entries_for_module_by_data_name(mnum, "DEFINITION")
return " ".join(def_lines)
def get_ko_definition_from_modules_table(self, ko_num):
"""This function returns the definition for the given KO from the modules data table.
Note that the modules table will only contain information for KOs that belong to modules, so this
function returns None for those KOs that are not in modules. If your use case depends on accessing
definitions for all KOs, you are better off calling KeggContext.setup_ko_dict() and taking the
definition from that dictionary.
"""
where_clause_string = "data_name = 'ORTHOLOGY' AND data_value = '%s'" % (ko_num)
dict_from_mod_table = self.db.get_some_rows_from_table_as_dict(self.module_table_name, where_clause_string, row_num_as_key=True, error_if_no_data=False)
if not dict_from_mod_table:
self.run.warning("get_ko_definition() speaking: No ORTHOLOGY entry found for KO %s - returning None."
% (ko_num))
return None
else:
# there could be several rows for the same KO in different modules, but each definition should be
# the same or similar, so we arbitrarily return the first one
return dict_from_mod_table[0]['data_definition']
def get_kos_in_module(self, mnum):
"""This function returns a list of KOs in the given module.
It does this by parsing the ORTHOLOGY lines in the modules database. However,
please note that these KOs are not always in the same order as the module
definition, and may even contain duplicate entries for a KO. A good example
of this is http://rest.kegg.jp/get/M00091 (K00551 is in two ORTHOLOGY lines)
and http://rest.kegg.jp/get/M00176 (see KOs in the first top-level step). If
this will be a problem, you should use the function get_kos_from_module_definition()
instead.
"""
return self.get_data_value_entries_for_module_by_data_name(mnum, "ORTHOLOGY")
def get_kos_from_module_definition(self, mnum):
"""This function returns a list of KOs in the given module, in order of the DEFINITION.
An alternative to get_kos_in_module().
"""
mod_def = self.get_kegg_module_definition(mnum)
ko_list = []
k_indices = [x for x, v in enumerate(mod_def) if v == 'K']
for idx in k_indices:
ko_list.append(mod_def[idx:idx+6])
return ko_list
def get_kegg_module_compound_lists(self, mnum):
"""This function returns a list of substrates, a list of intermediates, and a list of products for the given module.
We define 'substrate' to be any compound that is an input to but not an output from reactions in the module pathway.
Likewise, a 'product' is any compound that is an output from but not an input to reactions in the module pathway.
'Intermediate' is a compound that is both an input to and and output from reactions in the pathway.
Note that this function refers to compounds by their KEGG identifier (format is 'C#####' where # is a digit).
A separate function is used to convert these lists to human-readable compound names.
RETURNS
=======
substrates : list
Compounds that are only inputs to the module's metabolic pathway
intermediates : list
Compounds that are both outputs and inputs in the module's metabolic reactions
products : list
Compunds that are only outputs from the module's metabolic pathway
"""
reactions_list = self.get_data_definition_entries_for_module_by_data_name(mnum, "REACTION")
if not reactions_list:
if anvio.DEBUG:
self.run.warning(f"No REACTION entries found for module {mnum}, so no compounds will be returned by "
"get_kegg_module_compound_lists()")
inputs = set([])
outputs = set([])
for rxn_string in reactions_list:
if '<->' in rxn_string:
split_rxn = rxn_string.split('<->')
else:
split_rxn = rxn_string.split('->')
if len(split_rxn) != 2:
raise ConfigError(f"get_kegg_module_compound_lists('{mnum}') ran into an issue splitting the reaction {rxn_string}"
"into 2 parts. Here is what the split looks like: {split_rxn}")
rxn_inputs = [x.strip() for x in split_rxn[0].split('+')]
rxn_outputs = [x.strip() for x in split_rxn[1].split('+')]
inputs = inputs.union(set(rxn_inputs))
outputs = outputs.union(set(rxn_outputs))
substrates = inputs.difference(outputs)
products = outputs.difference(inputs)
intermediates = inputs.intersection(outputs)
return list(substrates), list(intermediates), list(products)
def get_compound_dict_for_module(self, mnum, raise_error_if_no_data=False):
"""This function returns a dictionary mapping compound identifiers to their human-readable name for the given module
If the module has no compounds, this function will either raise an error or return an empty dictionary depending on raise_error_if_no_data.
If a compound doesn't have a human-readable name, then the compound identifier is used as the 'name'
PARAMETERS
==========
mnum : str
module number to get compounds for
raise_error_if_no_data : bool
whether to quit all things if we don't get what we want
"""
where_clause_string = "data_name = 'COMPOUND' AND module = '%s'" % (mnum)
dict_from_mod_table = self.db.get_some_rows_from_table_as_dict(self.module_table_name, where_clause_string, row_num_as_key=True, error_if_no_data=raise_error_if_no_data)
compound_dict = {}
for key,row in dict_from_mod_table.items():
compound = row['data_value']
compound_name = row['data_definition']
# if compound has no human-readable name in the database, we use the compound ID after all
if not compound_name:
compound_name = compound
compound_dict[compound] = compound_name
return compound_dict
def get_human_readable_compound_lists_for_module(self, mnum):
"""This function returns a human-readable list of substrates, a list of intermediates, and a list of products for the given module.
We define 'substrate' to be any compound that is an input to but not an output from reactions in the module pathway.
Likewise, a 'product' is any compound that is an output from but not an input to reactions in the module pathway.
'Intermediate' is a compound that is both an input to and and output from reactions in the pathway.
RETURNS
=======
substrate_name_list : list of str
List of substrate compounds
intermediate_name_list : list of str
List of intermediate compounds
product_name_list : list of str
List of product compounds
"""
compound_to_name_dict = self.get_compound_dict_for_module(mnum)
substrate_compounds, intermediate_compounds, product_compounds = self.get_kegg_module_compound_lists(mnum)
substrate_name_list = [compound_to_name_dict[c] for c in substrate_compounds]
intermediate_name_list = [compound_to_name_dict[c] for c in intermediate_compounds]
product_name_list = [compound_to_name_dict[c] for c in product_compounds]
return substrate_name_list, intermediate_name_list, product_name_list
######### MODULE DEFINITION UNROLLING FUNCTIONS #########
def get_top_level_steps_in_module_definition(self, mnum):
"""This function access the DEFINITION line of a KEGG Module and returns the top-level steps as a list
A 'top-level' step is one that you get by splitting on spaces (but not spaces in parentheses) just once -
ie, the 'first layer' when unrolling the module.
"""
def_string = self.get_kegg_module_definition(mnum)
return self.split_by_delim_not_within_parens(def_string, " ")
def unroll_module_definition(self, mnum, def_lines = None):
"""This function accesses the DEFINITION line of a KEGG Module, unrolls it into all possible paths through the module, and
returns the list of all paths.
This is a driver for the recursive functions that do the actual unrolling of each definition line.
PARAMETERS
==========
mnum : str
module number
def_lines : list of str
The DEFINITION lines for the module. This parameter is optional, and if it is not passed, the module
definition will be looked up from the modules DB.
"""
if not def_lines:
def_lines = self.get_data_value_entries_for_module_by_data_name(mnum, "DEFINITION")
combined_def_line = ""
for d in def_lines:
d = d.strip()
combined_def_line += d + " "
combined_def_line = combined_def_line.strip()
def_line_paths = self.recursive_definition_unroller(combined_def_line)
return def_line_paths
def split_by_delim_not_within_parens(self, d, delims, return_delims=False):
"""Takes a string, and splits it on the given delimiter(s) as long as the delimeter is not within parentheses.
This function exists because regular expressions don't handle nested parentheses very well. It is used in the
recursive module definition unrolling functions to split module steps, but it is generically written in case
it could have other uses in the future.
The function can also be used to determine if the parentheses in the string are unbalanced (it will return False
instead of the list of splits in this situation)
PARAMETERS
==========
d : str
string to split
delims : str or list of str
a single delimiter, or a list of delimiters, to split on
return_delims : boolean
if this is true then the list of delimiters found between each split is also returned
RETURNS
=======
If parentheses are unbalanced in the string, this function returns False. Otherwise:
splits : list
strings that were split from d
delim_list : list
delimiters that were found between each split (only returned if return_delims is True)
"""
parens_level = 0
last_split_index = 0
splits = []
delim_list = []
for i in range(len(d)):
# only split if not within parentheses
if d[i] in delims and parens_level == 0:
splits.append(d[last_split_index:i])
delim_list.append(d[i])
last_split_index = i + 1 # we add 1 here to skip the space
elif d[i] == "(":
parens_level += 1
elif d[i] == ")":
parens_level -= 1
# if parentheses become unbalanced, return False to indicate this
if parens_level < 0:
return False
splits.append(d[last_split_index:len(d)])
if return_delims:
return splits, delim_list
return splits
def recursive_definition_unroller(self, step):
"""This function recursively splits a module definition into its components.
First, the definition is split into its component steps (separated by spaces).
Each step is either an atomic step (a single KO, module number, '--', or nonessential KO starting with '-'),
a protein complex, or a compound step.
Atomic steps are used to extend each path that has been found so far. Protein complexes are split into
their respective components, which may be split further by the split_paths() function to find all possible
alternative complexes, before being used to extend each path. Compound steps are split and recursively processed
by the split_paths() function before the resulting downstream paths are used to extend each path.
PARAMETERS
==========
step : str
step definition to split into component steps as necessary
RETURNS
=======
paths_list : list
all paths that the input step has been unrolled into
"""
split_steps = self.split_by_delim_not_within_parens(step, " ")
paths_list = [[]] # list to save all paths, with initial empty path list to extend from
for s in split_steps:
# base case: step is a ko, mnum, non-essential step, or '--'
if (len(s) == 6 and s[0] == "K") or (len(s) == 6 and s[0] == "M") or (s == "--") or (len(s) == 7 and s[0] == "-"):
for p in paths_list:
p.extend([s])
else:
if s[0] == "(" and s[-1] == ")":
# here we try splitting to see if removing the outer parentheses will make the definition become unbalanced
# (the only way to figure this out is to try it because regex cannot handle nested parentheses)
comma_substeps = self.split_by_delim_not_within_parens(s[1:-1], ",")
if not comma_substeps: # if it doesn't work, try without removing surrounding parentheses
comma_substeps = self.split_by_delim_not_within_parens(s, ",")
space_substeps = self.split_by_delim_not_within_parens(s[1:-1], " ")
if not space_substeps:
space_substeps = self.split_by_delim_not_within_parens(s, " ")
else:
comma_substeps = self.split_by_delim_not_within_parens(s, ",")
space_substeps = self.split_by_delim_not_within_parens(s, " ")
# complex case: no commas OR spaces outside parentheses so this is a protein complex rather than a compound step
if len(comma_substeps) == 1 and len(space_substeps) == 1:
complex_components, delimiters = self.split_by_delim_not_within_parens(s, ["+","-"], return_delims=True)
complex_strs = [""]
# reconstruct the complex (and any alternate possible complexes) while keeping the +/- structure the same
for i in range(len(complex_components)):
c = complex_components[i]
if c[0] == '(':
alts = self.split_path(c)
new_complex_strs = []
for a in alts:
if len(a) > 1:
raise ConfigError("Uh oh. recursive_definition_unroller() speaking. We found a protein complex with more "
"than one KO per alternative option here: %s" % s)
for cs in complex_strs:
extended_complex = cs + a[0]
new_complex_strs.append(extended_complex)
complex_strs = new_complex_strs
else:
for j in range(len(complex_strs)):
complex_strs[j] += c
if i < len(delimiters):
for j in range(len(complex_strs)):
complex_strs[j] += delimiters[i]
new_paths_list = []
for cs in complex_strs:
for p in paths_list:
p_copy = copy.copy(p)
p_copy.extend([cs])
new_paths_list.append(p_copy)
paths_list = new_paths_list
# compound step case:
else:
alts = self.split_path(s)
new_paths_list = []
for a in alts:
for p in paths_list:
p_copy = copy.copy(p)
p_copy.extend(a)
new_paths_list.append(p_copy)
paths_list = new_paths_list
return paths_list
def split_path(self, step):
"""This function handles compound steps that should be split into multiple alternative paths.
It first splits the input step into substeps, and then since each substep could be its own mini-definition,
it recursively calls the definition unrolling function to parse it. The list of all alternative paths
that can be made from this step is returned.
"""
if step[0] == "(" and step[-1] == ")":
substeps = self.split_by_delim_not_within_parens(step[1:-1], ",")
if not substeps: # if it doesn't work, try without removing surrounding parentheses
substeps = self.split_by_delim_not_within_parens(step, ",")
else:
substeps = self.split_by_delim_not_within_parens(step, ",")
alt_path_list = []
for s in substeps:
alt_paths_from_substep = self.recursive_definition_unroller(s)
for a in alt_paths_from_substep:
alt_path_list.append(a)
return alt_path_list
class KeggModulesTable:
"""This class defines operations for creating the KEGG Modules table in Modules.db"""
def __init__(self, mod_table_name = None):
""""""
self.db_entries = []
self.total_entries = 0
if mod_table_name:
self.module_table_name = mod_table_name
else:
raise ConfigError("Beep Beep. Warning. KeggModulesTable was initialized without knowing its own name.")
def append_and_store(self, db, module_num, data_name, data_value, data_definition=None, line_num=None):
"""This function handles collects db entries (as tuples) into a list, and once we have 10,000 of them it stores that set into the Modules table.
The db_entries list is cleared after each store so that future stores don't add duplicate entries to the table.
"""
db_entry = tuple([module_num, data_name, data_value, data_definition, line_num])
self.db_entries.append(db_entry)
self.total_entries += 1
# we can store chunks of 5000 at a time, so we don't want over 10,000 entries.
if len(self.db_entries) >= 10000:
self.store(db)
self.db_entries = []
def store(self, db):
if len(self.db_entries):
db._exec_many('''INSERT INTO %s VALUES (%s)''' % (self.module_table_name, (','.join(['?'] * len(self.db_entries[0])))), self.db_entries)
def get_total_entries(self):
return self.total_entries
class KeggModuleEnrichment(KeggContext):
"""This class is a driver for anvi-script-enrichment-stats for modules input.
It takes in the modules mode output from anvi-estimate-metabolism, formats it for the enrichment script,
and runs the script.
==========
args: Namespace object
All the arguments supplied by user to anvi-compute-functional-enrichment
"""
def __init__(self, args, run=run, progress=progress):
self.args = args
self.run = run
self.progress = progress
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.modules_txt = A('modules_txt')
self.groups_txt = A('groups_txt')
self.sample_header_in_modules_txt = A('sample_header') or 'db_name'
self.module_completion_threshold = A('module_completion_threshold') or 0.75
self.output_file_path = A('output_file')
self.include_ungrouped = True if A('include_ungrouped') else False
self.include_missing = True if A('include_samples_missing_from_groups_txt') else False
# init the base class
KeggContext.__init__(self, self.args)
# if necessary, assign 0 completion threshold, which evaluates to False above
if A('module_completion_threshold') == 0:
self.module_completion_threshold = 0.0
if not self.quiet:
self.run.warning("Your completion threshold is set to 0, which will make the enrichment results MEANINGLESS. Why? Because "
"with a threshold this low, every module will be considered present in every single sample and therefore will "
"be equally present in every single group. So you should stop what you are doing RIGHT NOW.")
if not self.just_do_it:
raise ConfigError("We are stopping you right there because your completion threshold is 0 and that will make the enrichment results "
"meaningless (see the warnings above, if you haven't suppressed them with --quiet). But if you really really really "
"want to do it, you can run again with --just-do-it and then we won't stop you. You have the right to be without meaning.")
# sanity checkses my precious
if not self.modules_txt:
raise ConfigError("To compute module enrichment, you must provide a modules-txt file (aka modules mode output from "
"`anvi-estimate-metabolism`).")
if not self.groups_txt:
raise ConfigError("To compute module enrichment, you must provide a groups-txt file mapping each sample to a group.")
filesnpaths.is_file_exists(self.modules_txt)
filesnpaths.is_file_plain_text(self.modules_txt)
filesnpaths.is_file_exists(self.groups_txt)
filesnpaths.is_file_plain_text(self.groups_txt)
if filesnpaths.is_file_exists(self.output_file_path, dont_raise=True):
raise ConfigError(f"Whoops... we almost overwrote the existing output file {self.output_file_path}. But we stopped just in time. "
"If you really want us to replace the contents of that file with new enrichment results, then remove this "
"file before you run this program again.")
filesnpaths.is_output_file_writable(self.output_file_path)
if not self.quiet:
self.run.info("modules-txt input file", self.modules_txt)
self.run.info("groups-txt input file", self.groups_txt)
self.run.info("sample column in modules-txt", self.sample_header_in_modules_txt)
self.run.info("module completion threshold", self.module_completion_threshold)
def get_enrichment_input(self, output_file_path):
"""This function converts modules mode output into input for anvi-script-enrichment-stats
The input format for anvi-script-enrichment-stats is described in a comment at the top of that script, and here is
how we get the values for each column:
The first column, 'KEGG_MODULE', and second column 'accession', are already in the modules mode output as 'module_name'
and 'kegg_module', respectively.
The 'N_*' columns are the total number of samples in each group.
For each module, this function determines which samples the module is 'present' in according to the specified completion threshold.
This determines the list of samples for the 'sample_ids' column as well as the 'p_*' proportions for each group of samples.
Finally, the fourth column, 'associated_groups', is computed from the 'p_*' proportions and 'N_*' totals.
PARAMETERS
==========
output_file_path : str
a file path where we will store the (temporary) input file for the enrichment script
"""
filesnpaths.is_output_file_writable(output_file_path)
# read the files into dataframes
modules_df = pd.read_csv(self.modules_txt, sep='\t')
# make sure we have all the columns we need in modules mode output, since this output can be customized
required_modules_txt_headers = ['kegg_module', 'module_completeness', 'module_name']
missing_headers = []
for h in required_modules_txt_headers:
if h not in modules_df.columns:
missing_headers.append(h)
if missing_headers:
missing_string = ", ".join(missing_headers)
self.progress.reset()
raise ConfigError("We cannot go on! *dramatic sweep* We trust that you have provided us with "
"modules mode output, but unfortunately the modules-txt input does not contain "
f"the following required headers: {missing_string} Please re-generate your "
"modules-txt to include these before trying again.")
if 'unique_id' in modules_df.columns:
modules_df = modules_df.drop(columns=['unique_id'])
# samples column sanity check - this column will become the index
if self.sample_header_in_modules_txt not in modules_df.columns:
col_list = ", ".join(modules_df.columns)
self.progress.reset()
raise ConfigError(f"You have specified that your sample names are in the column with header '{self.sample_header_in_modules_txt}' "
"in the modules-txt file, but that column does not exist. :( Please figure out which column is right and submit "
"it using the --sample-header parameter. Just so you know, the columns in modules-txt that you can choose from "
f"are: {col_list}")
samples_to_groups_dict, groups_to_samples_dict = utils.get_groups_txt_file_as_dict(self.groups_txt)
# make sure the samples all have a group
samples_with_none_group = []
for s,g in samples_to_groups_dict.items():
if not g:
samples_with_none_group.append(s)
if self.include_ungrouped:
samples_to_groups_dict[s] = 'UNGROUPED'
if not self.include_ungrouped:
for s in samples_with_none_group:
samples_to_groups_dict.pop(s)
if samples_with_none_group:
self.progress.reset()
none_group_str = ", ".join(samples_with_none_group)
if self.include_ungrouped:
self.run.warning("Some samples in your groups-txt did not have a group, but since you elected to --include-ungrouped, "
"we will consider all of those samples to belong to one group called 'UNGROUPED'. Here are those "
f"UNGROUPED samples: {none_group_str}")
else:
self.run.warning("Some samples in your groups-txt did not have a group, and we will ignore those samples. If you "
"want them to be included in the analysis (but without assigning a group), you can simply re-run "
"this program with the --include-ungrouped flag. Now. Here are the samples we will be ignoring: "
f"{none_group_str}")
# sanity check for mismatch between modules-txt and groups-txt
sample_names_in_modules_txt = set(modules_df[self.sample_header_in_modules_txt].unique())
sample_names_in_groups_txt = set(samples_to_groups_dict.keys())
samples_missing_in_groups_txt = sample_names_in_modules_txt.difference(sample_names_in_groups_txt)
samples_missing_in_modules_txt = sample_names_in_groups_txt.difference(sample_names_in_modules_txt)
if anvio.DEBUG:
self.run.info("Samples in modules-txt", ", ".join(list(sample_names_in_modules_txt)))
self.run.info("Samples in groups-txt", ", ".join(list(sample_names_in_groups_txt)))
self.run.info("Missing samples from groups-txt", ", ".join(list(samples_missing_in_groups_txt)))
self.run.info("Missing samples from modules-txt", ", ".join(list(samples_missing_in_modules_txt)))
if samples_missing_in_groups_txt:
missing_samples_str = ", ".join(samples_missing_in_groups_txt)
if not self.include_missing:
self.progress.reset()
self.run.warning(f"Your groups-txt file does not contain some samples present in your modules-txt ({self.sample_header_in_modules_txt} "
"column). Since you have not elected to --include-samples-missing-from-groups-txt, we are not going to take these samples into consideration at all. "
"Here are the samples that we will be ignoring: "
f"{missing_samples_str}")
# drop the samples that are not in groups-txt
modules_df = modules_df[~modules_df[self.sample_header_in_modules_txt].isin(list(samples_missing_in_groups_txt))]
if anvio.DEBUG:
self.run.info("Samples remaining in modules-txt dataframe after removing ungrouped", ", ".join(modules_df[self.sample_header_in_modules_txt].unique()))
else:
self.progress.reset()
self.run.warning(f"Your groups-txt file does not contain some samples present in your modules-txt ({self.sample_header_in_modules_txt} "
"column). Since you have chosen to --include-samples-missing-from-groups-txt, for the purposes of this analysis we will now consider all of "
"these samples to belong to one group called 'UNGROUPED'. If you wish to ignore these samples instead, please run again "
"without the --include-ungrouped parameter. "
"Here are the UNGROUPED samples that we will consider as one big happy family: "
f"{missing_samples_str}")
# add those samples to the UNGROUPED group
ungrouped_samples = list(samples_missing_in_groups_txt)
for s in ungrouped_samples:
samples_to_groups_dict[s] = 'UNGROUPED'
if samples_missing_in_modules_txt:
missing_samples_str = ", ".join(samples_missing_in_modules_txt)
if not self.just_do_it:
self.progress.reset()
raise ConfigError(f"Your modules-txt file ({self.sample_header_in_modules_txt} column) does not contain some samples that "
"are present in your groups-txt. This is not necessarily a huge deal, it's just that those samples will "
"not be included in the enrichment analysis because, well, you don't have any module information for them. "
"If all of the missing samples belong to groups you don't care about at all, then feel free to ignore this "
"message and re-run using --just-do-it. But if you do care about those groups, you'd better fix this because "
"the enrichment results for those groups will be wrong. Here are the samples in question: "
f"{missing_samples_str}")
else:
self.progress.reset()
self.run.warning(f"Your modules-txt file ({self.sample_header_in_modules_txt} column) does not contain some samples that "
"are present in your groups-txt. This is not necessarily a huge deal, it's just that those samples will "
"not be included in the enrichment analysis because, well, you don't have any module information for them. "
"Since you have used the --just-do-it parameter, we assume you don't care about this and are going to keep "
"going anyway. We hope you know what you are doing :) Here are the samples in question: "
f"{missing_samples_str}")
# drop the samples that are not in modules-txt
for s in list(samples_missing_in_modules_txt):
samples_to_groups_dict.pop(s)
if anvio.DEBUG:
self.run.info("Samples remaining in groups-txt dataframe after removing ungrouped", ", ".join(samples_to_groups_dict.keys()))
modules_df.set_index(self.sample_header_in_modules_txt, inplace=True)
sample_groups_df = pd.DataFrame.from_dict(samples_to_groups_dict, orient="index", columns=['group'])
# convert modules mode output to enrichment input
N_values = sample_groups_df['group'].value_counts()
group_list = N_values.keys()
module_list = modules_df['kegg_module'].unique()
output_dict = {}
header_list = ['KEGG_MODULE', 'accession', 'sample_ids', 'associated_groups']
for c in group_list:
header_list.append(f"p_{c}")
header_list.append(f"N_{c}")
for mod_num in module_list:
query_string = f"kegg_module == '{mod_num}' and module_completeness >= {self.module_completion_threshold}"
samples_with_mod_df = modules_df.query(query_string)
if samples_with_mod_df.shape[0] == 0:
continue
# if we are working with module data from metagenomes, we may have multiple complete copies of the module in
# the same sample. We drop these duplicates before proceeding.
duplicates = samples_with_mod_df.index.duplicated()
samples_with_mod_df = samples_with_mod_df[~duplicates]
# we need to explicitly ignore samples without a group here, because they were taken out of sample_groups_df
# and if only ungrouped samples end up having this module, we will get an index error
samples_with_mod_list = list(samples_with_mod_df.index)
if not self.include_ungrouped:
for s in samples_with_none_group:
if s in samples_with_mod_list:
samples_with_mod_list.remove(s)
if len(samples_with_mod_list) == 0:
continue
mod_name = samples_with_mod_df['module_name'][0]
output_dict[mod_name] = {}
output_dict[mod_name]['KEGG_MODULE'] = mod_name
output_dict[mod_name]['accession'] = mod_num
output_dict[mod_name]['sample_ids'] = ','.join(samples_with_mod_list)
sample_group_subset = sample_groups_df.loc[samples_with_mod_list]
p_values = sample_group_subset['group'].value_counts()
# we need the categories p and N values to be in the same order for finding associated groups
p_vector = np.array([])
N_vector = np.array([])
for c in group_list:
if c not in p_values.index:
p_values[c] = 0
p_vector = np.append(p_vector, p_values[c]/N_values[c])
N_vector = np.append(N_vector, N_values[c])
# compute associated groups for functional enrichment
enriched_groups_vector = utils.get_enriched_groups(p_vector, N_vector)
associated_groups = [c for i,c in enumerate(group_list) if enriched_groups_vector[i]]
output_dict[mod_name]['associated_groups'] = ','.join(associated_groups)
for c in group_list:
output_dict[mod_name]["p_%s" % c] = p_values[c]/N_values[c]
output_dict[mod_name]["N_%s" % c] = N_values[c]
utils.store_dict_as_TAB_delimited_file(output_dict, output_file_path, key_header='accession', headers=header_list)
def run_enrichment_stats(self):
"""This function is the driver for running the enrichment script on the modules data."""
self.progress.new('Enrichment analysis')
self.progress.update('Converting modules mode output into input for enrichment script')
enrichment_input_path = filesnpaths.get_temp_file_path()
if anvio.DEBUG:
self.progress.reset()
self.run.info("Temporary input file for enrichment script", enrichment_input_path)
self.get_enrichment_input(enrichment_input_path)
self.progress.end()
# run the enrichment analysis
enrichment_stats = utils.run_functional_enrichment_stats(enrichment_input_path,
self.output_file_path,
run=self.run,
progress=self.progress)
return enrichment_stats
|
meren/anvio
|
anvio/kegg.py
|
Python
|
gpl-3.0
| 303,026
|
[
"VisIt"
] |
9e72f147d44b269cf7467cb35c5803e0ce91a6885fb077918d9dc42d601560d2
|
#!/usr/bin/python
from __future__ import print_function
import os
import glob
import argparse
import numpy as np
import sys
from dicom_tools.make_histo import make_histo
# from dicom_tools.read_files import read_files
from dicom_tools.FileReader import FileReader
import ROOT
from array import array
from dicom_tools.myroi2roi import myroi2roi
from dicom_tools.info_file_parser import info_file_parser
from dicom_tools.timeflagconverter import timeflagconverter_string2int
# from dicom_tools.getEntropy import getEntropy
# from dicom_tools.getEntropy import getEntropyCircleMask
from dicom_tools.getLayerWithLargerROI import getLayerWithLargerROI
from dicom_tools.make_histo_entropy import make_histo_entropy
from dicom_tools.getEntropy import getEntropyCircleMask
from dicom_tools.intensity_cut import intensity_cut
from dicom_tools.gaussianlaplace import GaussianLaplaceFilter
from scipy.stats import skew
from scipy.stats import kurtosis as sc_kurt
outfname="out.root"
parser = argparse.ArgumentParser()
parser.add_argument("inputdirecotry", help="path of the input direcotry")
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("-o", "--outfile", help="define output file name (default out.root)")
parser.add_argument("-jo", "--justone", help="limit the analisys to one subdirecotry")
parser.add_argument("-ex", "--exclude", help="exclude one subdirecotry from the analisys")
parser.add_argument("-n", "--norm", help="normalize to the mean defined in a myroi file",
action="store_true")
parser.add_argument("-ic", "--icut", help="cut intensity > Imax*icut",default=0,type=float)
parser.add_argument("-f", "--filter", help="apply gaussian laplace filter sigma=2.5pixels",
action="store_true")
parser.add_argument("-s", "--sigma", help="sigma of image filter",default=2.5,type=float)
parser.add_argument("-sc", "--scala", help="normalize a 0,1",
action="store_true")
args = parser.parse_args()
if args.outfile:
outfname = args.outfile
inputdir = args.inputdirecotry
if args.verbose:
print("Verbose dicom_make_histo_indir.py \n")
outfile = ROOT.TFile(outfname,"RECREATE")
patientID= bytearray(64)
timeflag = array('i', [0])
nVoxel = array('i', [0])
ypT = array('i', [0])
ypT2 = array('i', [0])
mean = array('f', [0])
stdDev = array('f', [0])
skewness = array('f', [0])
kurtosis = array('f', [0])
minEntropySide = 3
maxEntropySide = 21
thisEntropySide = {}
meanEntropy = {}
stdDevEntropy = {}
maxEntropy = {}
minEntropy = {}
skewnessEntropy = {}
kurtosisEntropy = {}
thisEntropySideFM = {}
meanEntropyFM = {}
stdDevEntropyFM = {}
maxEntropyFM = {}
minEntropyFM = {}
skewnessEntropyFM = {}
kurtosisEntropyFM = {}
hEntropies = {}
for i in xrange(minEntropySide, maxEntropySide+1, 2):
thisEntropySide[i] = array('i', [0])
meanEntropy[i] = array('f', [0])
stdDevEntropy[i] = array('f', [0])
maxEntropy[i] = array('f', [0])
minEntropy[i] = array('f', [0])
skewnessEntropy[i] = array('f', [0])
kurtosisEntropy[i] = array('f', [0])
thisEntropySideFM[i] = array('i', [0])
meanEntropyFM[i] = array('f', [0])
stdDevEntropyFM[i] = array('f', [0])
maxEntropyFM[i] = array('f', [0])
minEntropyFM[i] = array('f', [0])
skewnessEntropyFM[i] = array('f', [0])
kurtosisEntropyFM[i] = array('f', [0])
nmax = 100
nFette = array('i', [0])
nVoxelPF = array('i', nmax*[0])
meanPF = array('f', nmax*[0])
stdDevPF = array('f', nmax*[0])
skewnessPF = array('f', nmax*[0])
kurtosisPF = array('f', nmax*[0])
#CV AR
fractalPF = array('f', nmax*[0])
fractalCPF = array('f', nmax*[0])
#CV
dissHPF = array('f', nmax*[0])
corrHPF = array('f', nmax*[0])
enerHPF = array('f', nmax*[0])
contHPF = array('f', nmax*[0])
homoHPF = array('f', nmax*[0])
dissVPF = array('f', nmax*[0])
corrVPF = array('f', nmax*[0])
enerVPF = array('f', nmax*[0])
contVPF = array('f', nmax*[0])
homoVPF = array('f', nmax*[0])
dissPQPF = array('f', nmax*[0])
corrPQPF = array('f', nmax*[0])
enerPQPF = array('f', nmax*[0])
contPQPF = array('f', nmax*[0])
homoPQPF = array('f', nmax*[0])
dissMQPF = array('f', nmax*[0])
corrMQPF = array('f', nmax*[0])
enerMQPF = array('f', nmax*[0])
contMQPF = array('f', nmax*[0])
homoMQPF = array('f', nmax*[0])
tree = ROOT.TTree("analisi_T2","analisi_T2")
tree.Branch("patientID",patientID,"patientID/C")
tree.Branch("timeflag",timeflag,"timeflag/I")
tree.Branch("nVoxel",nVoxel,"nVoxel/I")
tree.Branch("ypT",ypT,"ypT/I")
tree.Branch("ypT2",ypT2,"ypT2/I")
tree.Branch("mean",mean,"mean/F")
tree.Branch("stdDev",stdDev,"stdDev/F")
tree.Branch("skewness",skewness,"skewness/F")
tree.Branch("kurtosis",kurtosis,"kurtosis/F")
for i in xrange(minEntropySide, maxEntropySide+1, 2):
if args.verbose:
print("creating branch for entropy with side",i)
tree.Branch("thisEntropySide" +str(i), thisEntropySide[i], "thisEntropySide" +str(i)+"/I" )
tree.Branch("meanEntropy" +str(i), meanEntropy[i], "meanEntropy" +str(i)+"/F" )
tree.Branch("stdDevEntropy" +str(i), stdDevEntropy[i], "stdDevEntropy" +str(i)+"/F" )
tree.Branch("maxEntropy" +str(i), maxEntropy[i], "maxEntropy" +str(i)+"/F" )
tree.Branch("minEntropy" +str(i), minEntropy[i], "minEntropy" +str(i)+"/F" )
tree.Branch("thisEntropySideFM" +str(i), thisEntropySideFM[i], "thisEntropySideFM" +str(i)+"/I" )
tree.Branch("meanEntropyFM" +str(i), meanEntropyFM[i], "meanEntropyFM" +str(i)+"/F" )
tree.Branch("stdDevEntropyFM" +str(i), stdDevEntropyFM[i], "stdDevEntropyFM" +str(i)+"/F" )
tree.Branch("maxEntropyFM" +str(i), maxEntropyFM[i], "maxEntropyFM" +str(i)+"/F" )
tree.Branch("minEntropyFM" +str(i), minEntropyFM[i], "minEntropyFM" +str(i)+"/F" )
tree.Branch("nFette",nFette,"nFette/I")
tree.Branch("nVoxelPF",nVoxelPF,"nVoxelPF[nFette]/I")
tree.Branch("meanPF",meanPF,"meanPF[nFette]/F")
tree.Branch("stdDevPF",stdDevPF,"stdDevPF[nFette]/F")
tree.Branch("skewnessPF",skewnessPF,"skewnessPF[nFette]/F")
tree.Branch("kurtosisPF",kurtosisPF,"kurtosisPF[nFette]/F")
#CV AR
tree.Branch("fractalPF",fractalPF,"fractalPF[nFette]/F")
tree.Branch("fractalCPF",fractalCPF,"fractalCPF[nFette]/F")
#CV
tree.Branch("dissHPF",dissHPF,"dissHPF[nFette]/F")
tree.Branch("corrHPF",corrHPF,"corrHPF[nFette]/F")
tree.Branch("enerHPF",enerHPF,"enerHPF[nFette]/F")
tree.Branch("contHPF",contHPF,"contHPF[nFette]/F")
tree.Branch("homoHPF",homoHPF,"homoHPF[nFette]/F")
tree.Branch("dissVPF",dissVPF,"dissVPF[nFette]/F")
tree.Branch("corrVPF",corrVPF,"corrVPF[nFette]/F")
tree.Branch("enerVPF",enerVPF,"enerVPF[nFette]/F")
tree.Branch("contVPF",contVPF,"contVPF[nFette]/F")
tree.Branch("homoVPF",homoVPF,"homoVPF[nFette]/F")
tree.Branch("dissPQPF",dissPQPF,"dissPQPF[nFette]/F")
tree.Branch("corrPQPF",corrPQPF,"corrPQPF[nFette]/F")
tree.Branch("enerPQPF",enerPQPF,"enerPQPF[nFette]/F")
tree.Branch("contPQPF",contPQPF,"contPQPF[nFette]/F")
tree.Branch("homoPQPF",homoPQPF,"homoPQPF[nFette]/F")
tree.Branch("dissMQPF",dissMQPF,"dissMQPF[nFette]/F")
tree.Branch("corrMQPF",corrMQPF,"corrMQPF[nFette]/F")
tree.Branch("enerMQPF",enerMQPF,"enerMQPF[nFette]/F")
tree.Branch("contMQPF",contMQPF,"contMQPF[nFette]/F")
tree.Branch("homoMQPF",homoMQPF,"homoMQPF[nFette]/F")
patientdirs= glob.glob(inputdir+"*/")
if args.justone:
print("Looking for dir",args.justone)
if args.exclude:
print("Excluding dir",args.exclude)
for patientdir in patientdirs:
print(patientdir)
if args.justone:
if not args.justone in patientdir: continue
if args.exclude:
if args.exclude in patientdir: continue
for i in xrange(minEntropySide, maxEntropySide+1, 2):
hEntropies[i] = ROOT.TH1F("hEntropies"+str(i)+patientdir,"Entropies"+str(i)+patientdir,100,-0.5,5.5)
analasisysdirs=glob.glob(patientdir+"*/")
for analasisysdir in analasisysdirs:
print("\t",analasisysdir)
pathT2 = analasisysdir + "T2/"
nFette[0] = 0
if patientdir[-1]=='/':
patID = patientdir.split('/')[-2].replace('.','')
else:
patID = patientdir.split('/')[-1].replace('.','')
patientID[:63] = patID
print("working on patient: "+patientID)
print("in the directory: "+pathT2)
if os.path.isdir(pathT2):
print("T2 dir found.")
pathROI = analasisysdir + "ROI/"
if os.path.isdir(pathROI):
print("ROI dir found.")
infos = info_file_parser(analasisysdir + "info2.txt")
timeflag[0] = timeflagconverter_string2int(infos["time"])
ypT[0] = int(infos["ypT"])
ypT2[0] = int(infos["ypT2"])
# # data, ROI = read_files(pathT2, pathROI, args.verbose, True)
freader = FileReader(pathT2, pathROI, args.verbose)
try:
data, ROI = freader.read(raw=True)
except NotImplementedError:
# data = freader.readUsingGDCM(raw=True)
dataTMP = freader.readUsingGDCM(raw=False)
data = dataTMP[:,:,:,0]
ROI = freader.readROI()
except ValueError:
continue
roinorm=False
if args.verbose:
print("dicom file read")
if args.norm:
myroifilename = analasisysdir + "roinormmuscle.myroi"
roireader = roiFileHandler(args.verbose)
myroisnorm, roisnormSetted = roireader.read(myroifilename)
roinorm = myroi2roi(myroisnorm, data.shape, args.verbose)
if args.verbose:
print("norm file read")
if args.verbose:
print("data mean:",data.mean(),"min:",data.min(),"max:",data.max(),"shape:",data.shape)
print("ROI mean:",ROI.mean(),"min:",ROI.min(),"max:",ROI.max(),"shape:",ROI.shape)
if len(ROI) is not len(data):
print("skipping this analysis len(data)",len(data),"len(ROI)",len(ROI))
continue
if args.icut:
print("appliyng an intensity cut",str(args.icut))
data = intensity_cut(data, ROI, args.icut, args.verbose)
if args.filter:
print("applying a Gaussian Laplace filter with a sigma of:",args.sigma)
data = GaussianLaplaceFilter(data, args.sigma, args.verbose)
patientsuffix = patID + infos["time"]
his, allhistos, histogiafatti, histogclm = make_histo(data,ROI,patientsuffix,args.verbose,roinorm,args.norm,args.scala)
nVoxel[0] = int(his.GetEntries())
mean[0] = his.GetMean()
stdDev[0] = his.GetStdDev()
skewness[0] = his.GetSkewness()
kurtosis[0] = his.GetKurtosis()
if args.verbose:
print(patientID, timeflag[0], nVoxel[0], ypT[0], mean[0], stdDev[0], skewness[0], kurtosis[0])
his.Write()
nlayer=0 #CV
firstL=0 #CV
count=0 #CV
for thishisto in allhistos:
nlayer=nlayer+1
if thishisto.GetEntries() >0:
if count==0:
firstL=nlayer-1
count=1
nVoxelPF[nFette[0]] = int(thishisto.GetEntries())
meanPF[nFette[0]] = thishisto.GetMean()
stdDevPF[nFette[0]] = thishisto.GetStdDev()
skewnessPF[nFette[0]] = thishisto.GetSkewness()
kurtosisPF[nFette[0]] = thishisto.GetKurtosis()
nFette[0] +=1
thishisto.Write()
for thishisto in histogiafatti:
thishisto.Write()
#CV AR
fettaMax=nFette[0]
scale=0
for n in range(0,nlayer):
if n<(firstL) or n>(firstL+fettaMax-1):
continue
if 'hfra' in thishisto.GetName() or 'hCfra' in thishisto.GetName():
if thishisto.GetBinContent(n)==0:
fettaMax = fettaMax + 1
scale = scale + 1
continue
if 'hfra' in thishisto.GetName() : fractalPF[n-firstL-scale] = thishisto.GetBinContent(n)
if 'hCfra' in thishisto.GetName() : fractalCPF[n-firstL-scale] = thishisto.GetBinContent(n)
if args.verbose:
print(patientID, nFette[0])
# layerMaxROI = getLayerWithLargerROI(ROI, args.verbose)
# for i in xrange(minEntropySide, maxEntropySide+1, 2):
# hisEntropy, allHisEntropy = make_histo_entropy(data, ROI, patientsuffix, i, None, args.verbose, roinorm, args.norm)
# thisEntropySide[i][0] = i
# meanEntropy[i][0] = hisEntropy.GetMean()
# stdDevEntropy[i][0] = hisEntropy.GetStdDev()
# maxEntropy[i][0] = hisEntropy.GetMaximum()
# minEntropy[i][0] = hisEntropy.GetMinimum()
# skewnessEntropy[i][0] = hisEntropy.GetSkewness()
# kurtosisEntropy[i][0] = hisEntropy.GetKurtosis()
# hisEntropy, allHisEntropy = make_histo_entropy(data, ROI, patientsuffix, i, layerMaxROI, args.verbose, roinorm, args.norm)
# thisEntropySideFM[i][0] = i
# meanEntropyFM[i][0] = hisEntropy.GetMean()
# stdDevEntropyFM[i][0] = hisEntropy.GetStdDev()
# maxEntropyFM[i][0] = hisEntropy.GetMaximum()
# minEntropyFM[i][0] = hisEntropy.GetMinimum()
# skewnessEntropyFM[i][0] = hisEntropy.GetSkewness()
# kurtosisEntropyFM[i][0] = hisEntropy.GetKurtosis()
for layer in xrange(0, len(data)):
if args.verbose:
print("working on entropies from",minEntropySide,"to",maxEntropySide, "layer", layer)
for i in xrange(minEntropySide, maxEntropySide+1, 2):
entropyImg = getEntropyCircleMask(data[layer], ROI[layer], i)
nonZeroEntropy= entropyImg[np.nonzero( ROI[layer] )]
for val in nonZeroEntropy:
hEntropies[i].Fill(val)
outfile.cd()
hEntropies[i].Write()
if args.verbose:
print("hEntropies["+str(i)+"].Write()")
thisEntropySide[i] = i
if nonZeroEntropy.any():
meanEntropy[i][0] = np.mean(nonZeroEntropy)
stdDevEntropy[i][0] = np.std(nonZeroEntropy)
maxEntropy[i][0] = np.max(nonZeroEntropy)
minEntropy[i][0] = np.min(nonZeroEntropy[np.nonzero(nonZeroEntropy)])
skewnessEntropy[i][0] = skew(nonZeroEntropy)
kurtosisEntropy[i][0] = sc_kurt(nonZeroEntropy)
if args.verbose:
print("entropy results:",np.mean(nonZeroEntropy),np.std(nonZeroEntropy),np.max(nonZeroEntropy),np.min(nonZeroEntropy))
print("data stored:",meanEntropy[i][0],stdDevEntropy[i][0],maxEntropy[i][0],minEntropy[i][0])
else:
meanEntropyFM[i][0] = -1
stdDevEntropyFM[i][0] = -1
maxEntropyFM[i][0] = -1
minEntropyFM[i][0] = -1
skewnessEntropy[i][0] = -1
kurtosisEntropy[i][0] = -1
layerMaxROI = getLayerWithLargerROI(ROI, args.verbose)
if args.verbose:
print("working on entropies from",minEntropySide,"to",maxEntropySide)
for i in xrange(minEntropySide, maxEntropySide+1, 2):
entropyImg = getEntropyCircleMask(data[layerMaxROI], ROI[layerMaxROI], i)
nonZeroEntropy= entropyImg[np.nonzero( ROI[layerMaxROI] )]
thisEntropySide[i] = i
if nonZeroEntropy.any():
meanEntropyFM[i][0] = np.mean(nonZeroEntropy)
stdDevEntropyFM[i][0] = np.std(nonZeroEntropy)
maxEntropyFM[i][0] = np.max(nonZeroEntropy)
minEntropyFM[i][0] = np.min(nonZeroEntropy[np.nonzero(nonZeroEntropy)])
skewnessEntropyFM[i][0] = skew(nonZeroEntropy)
kurtosisEntropyFM[i][0] = sc_kurt(nonZeroEntropy)
if args.verbose:
print("entropy results:",np.mean(nonZeroEntropy),np.std(nonZeroEntropy),np.max(nonZeroEntropy),np.min(nonZeroEntropy))
print("data stored:",meanEntropyFM[i][0],stdDevEntropyFM[i][0],maxEntropyFM[i][0],minEntropyFM[i][0])
else:
meanEntropyFM[i][0] = -1
stdDevEntropyFM[i][0] = -1
maxEntropyFM[i][0] = -1
minEntropyFM[i][0] = -1
skewnessEntropyFM[i][0] = -1
kurtosisEntropyFM[i][0] = -1
#CV gclm parameters
for k in range(0,len(histogclm)):
thishisto = histogclm[k]
thishisto.Write()
for n in range(0,nlayer):
if n<(firstL) or n>(firstL+nFette[0]-1):
continue
if 'dissH' in thishisto.GetName(): dissHPF[n-firstL] = thishisto.GetBinContent(n)
if 'corrH' in thishisto.GetName(): corrHPF[n-firstL] = thishisto.GetBinContent(n)
if 'enerH' in thishisto.GetName(): enerHPF[n-firstL] = thishisto.GetBinContent(n)
if 'contH' in thishisto.GetName(): contHPF[n-firstL] = thishisto.GetBinContent(n)
if 'homoH' in thishisto.GetName(): homoHPF[n-firstL] = thishisto.GetBinContent(n)
if 'dissV' in thishisto.GetName(): dissVPF[n-firstL] = thishisto.GetBinContent(n)
if 'corrV' in thishisto.GetName(): corrVPF[n-firstL] = thishisto.GetBinContent(n)
if 'enerV' in thishisto.GetName(): enerVPF[n-firstL] = thishisto.GetBinContent(n)
if 'contV' in thishisto.GetName(): contVPF[n-firstL] = thishisto.GetBinContent(n)
if 'homoV' in thishisto.GetName(): homoVPF[n-firstL] = thishisto.GetBinContent(n)
if 'dissPQ' in thishisto.GetName(): dissPQPF[n-firstL] = thishisto.GetBinContent(n)
if 'corrPQ' in thishisto.GetName(): corrPQPF[n-firstL] = thishisto.GetBinContent(n)
if 'enerPQ' in thishisto.GetName(): enerPQPF[n-firstL] = thishisto.GetBinContent(n)
if 'contPQ' in thishisto.GetName(): contPQPF[n-firstL] = thishisto.GetBinContent(n)
if 'homoPQ' in thishisto.GetName(): homoPQPF[n-firstL] = thishisto.GetBinContent(n)
if 'dissMQ' in thishisto.GetName(): dissMQPF[n-firstL] = thishisto.GetBinContent(n)
if 'corrMQ' in thishisto.GetName(): corrMQPF[n-firstL] = thishisto.GetBinContent(n)
if 'enerMQ' in thishisto.GetName(): enerMQPF[n-firstL] = thishisto.GetBinContent(n)
if 'contMQ' in thishisto.GetName(): contMQPF[n-firstL] = thishisto.GetBinContent(n)
if 'homoMQ' in thishisto.GetName(): homoMQPF[n-firstL] = thishisto.GetBinContent(n)
if args.verbose:
print("Filling the TTree")
tree.Fill()
if args.verbose:
print("Writing the TTree")
tree.Write()
# outfile.Write()
outfile.Close()
|
carlomt/dicom_tools
|
bin/dicom_make_histo_indir.py
|
Python
|
mit
| 19,988
|
[
"Gaussian"
] |
0cb4e039485d74630c17bc6f19f604996f709b82c1bce6d5ca6fe7a5609f5e3a
|
'''
Random Field Theory expectations and probabilities.
The core RFT computations are conducted inside **prob.rft**, and the
**RFTCalculator** class serves as a high-level interface to **prob.rft**
'''
# Copyright (C) 2015 Todd Pataky
# version: 0.1.3 (2015/12/27)
from math import pi,log,sqrt,exp
import numpy as np
from scipy import stats,optimize
from scipy.special import gammaln,gamma
import geom
# CONSTANTS:
FOUR_LOG2 = 4*log(2)
SQRT_4LOG2 = sqrt(4*log(2))
SQRT_2 = sqrt(2)
TWO_PI = 2*pi
eps = np.finfo(np.float).eps
def p_bonferroni(STAT, z, df, Q, n=1):
'''
Bonferroni correction.
When fields are very rough a Bonferroni correction might be less severe than
the RFT threshold. This function yields Bonferroni-corrected p values based
on the number of field nodes *Q*.
:Parameters:
*STAT* --- test statistic (one of: "Z", "T", "F", "X2", "T2")
*z* --- field height
*df* --- degrees of freedom [df{interest} df{error}]
*Q* --- number of field nodes (used for Bonferroni comparison)
*n* --- number of test statistic fields in conjunction
:Returns:
The probability of exceeding the specified height.
:Example:
>>> rft1d.prob.p_bonferroni('Z', 3.1, None, 101) #yields 0.098
'''
if STAT=='Z':
p = stats.norm.sf(z)
if STAT=='T':
p = stats.t.sf(z, df[1])
elif STAT=='F':
p = stats.f.sf(z, df[0], df[1])
elif STAT=='X2':
p = stats.chi2.sf(z, df[1])
elif STAT=='T2':
p,m = map(float,df)
v0,v1 = p, m - p + 1
zz = z * ( (m-p+1)/(p*m) )
p = stats.f.sf(zz, v0, v1)
p = Q * (p**n)
return min(p, 1)
def _replaceWithBonferroniIfPossible(STAT, P, c, csize, z, df, Q, n=1):
if (csize is None) or (z is None) or (Q is None) or (n is None):
return P
if (c==1) & (csize==0) :
Pbonf = p_bonferroni(STAT, z, df, Q, n)
P = min(P, Pbonf)
return P
def _replaceWith0DpValueIfPossible(STAT, P, c, csize, z, df, Q, n=1):
if (c>1) or (csize>0):
return P
p = p_bonferroni(STAT, z, df, 1, n)
if p>P:
return p
else:
return P
def ec_density_Z(z):
ec0d = 1 - stats.norm.cdf(z)
ec1d = SQRT_4LOG2 / TWO_PI * exp(-0.5*(z*z))
return [ec0d, ec1d]
def ec_density_T(z, df):
'''
Reference: Worsley KJ et al. (1996) Hum Brain Mapp 4:58-73
Reference: Worsley KJ et al. (2004) [Eqn.2 and Table 2]
'''
v = float(df[1])
a = FOUR_LOG2
b = np.exp((gammaln((v+1)/2) - gammaln(v/2)))
c = (1+z**2/v)**((1-v)/2)
EC = []
EC.append( 1 - stats.t.cdf(z,v) ) #dim: 0
EC.append( a**0.5 / TWO_PI * c ) #dim: 1
return EC
def ec_density_F(z, df):
if z<0:
return [1, np.inf] #to bypass warnings in critical threshold calculation
k,v = map(float, df)
k = max(k, 1.0) #stats.f.cdf will return nan if k is less than 1
a = FOUR_LOG2/TWO_PI
b = gammaln(v/2) + gammaln(k/2)
EC = []
EC.append( 1 - stats.f.cdf(z, k, v) )
EC.append( a**0.5 * np.exp(gammaln((v+k-1)/2)-b)*2**0.5 *(k*z/v)**(0.5*(k-1))*(1+k*z/v)**(-0.5*(v+k-2)) )
return EC
def ec_density_X2(z, df):
v = float(df[1])
a = FOUR_LOG2 / TWO_PI
b = z ** ((v-1)/2) * np.exp(-z/2 -gammaln(v/2)) / (2**((v-2)/2))
EC = []
EC.append( 1 - stats.chi2.cdf(z,v) )
EC.append( a**0.5 * b )
return EC
def ec_density(STAT, z, df):
if STAT=='Z':
return ec_density_Z(z)
if STAT=='T':
return ec_density_T(z, df)
elif STAT=='F':
return ec_density_F(z, df)
elif STAT=='X2':
return ec_density_X2(z, df)
elif STAT=='T2':
p,m = map(float,df)
df_F = p, m - p + 1
zz = z * ( (m-p+1)/(p*m) )
return ec_density_F(zz, df_F)
else:
raise(ValueError('Statistic must be one of: ["Z", "T", "X2", "F", "T2"]'))
def poisson_cdf(a, b):
# return stats.poisson.cdf(a, b)
# returns zero when b<0 to matches spm8 results
if b <= 0:
p = 0.0
else:
p = stats.poisson.cdf(a, b)
return p
def rft(c, k, STAT, Z, df, R, n=1, Q=None, expectations_only=False, version='spm12'):
'''
Random Field Theory probabilities and expectations using unified Euler Characteristic (EC) theory.
This code is based on "spm_P_RF.m" and "spm_P.m" from the spm8 and spm12 Matlab packages
which are available from: http://www.fil.ion.ucl.ac.uk/spm/
:Parameters:
*c* --- number of clusters
*k* --- cluster extent (resels)
*STAT* --- test statistic (one of: "Z", "T", "F", "X2", "T2")
*Z* --- field height
*df* --- degrees of freedom [df{interest} df{error}]
*R* --- resel counts (0D counts, 1D counts) defining search volume
*n* --- number of test statistic fields in conjunction
*Q* --- number of field nodes (used for Bonferroni comparison)
*expectations_only* --- if True only expectations will be returned
*version* --- "spm8" or "spm12" (see below)
:Returns:
*P* --- corrected P value
*p* --- uncorrected P value
*Ec* --- expected number of upcrossings {c}
*Ek* --- expected resels per upcrossing {k}
*EN* --- expected excursion set resels
NOTE! If expectations_only==True, then only (Ec,Ek,EN) are returned.
:Examples:
>>> P,p,Ec,Ek,EN = rft1d.prob.rft(1, 0, 'T', 2.1, [1,8], [1,10])
:Notes:
1. The spm8 and spm12 Matlab functions on which this code is based were
developed by K.Friston and other members of the Wellcome Trust Centre for
Neuroimaging. This function makes minor modifications to those procedures
to take advantage of the simplicity of the 1D case.
2. Results for the spm8 and spm12 versions can be obtained via the
keyword "version". When expected ECs approach zero, the spm8 and spm12
results will diverge slightly, due to a minor modification in spm12:
In spm8: "EC = EC + eps".
In spm12: "EC = np.array([max(ec,eps) for ec in EC])"
3. Setting *c* and *k* in particular manners will yield important
probabilities. Consider these three cases:
(a) rft(1, 0, STAT, Z, R) --- field maximum
(b) rft(1, k, STAT, u, R) --- cluster-based inference
(c) rft(c, k, STAT, u, R) --- set-based inference
(a) is the probability that Gaussian fields will produce 1 upcrossing
with an extent of 0. Thus this pertains to the maximum of height of
Gaussian fields, and can be used, for example, for critical threshold
computations.
(b) is the probability that Gaussian fields, when thresholded at *u*,
will produce 1 upcrossing with an extent of *k*. This is used for
cluster-level inference (i.e. p values for individual upcrossings).
(c) is the probability that Gaussian fields, when thresholded at *u*,
will produce *c* upcrossings with a minimum extent of *k*. This is
used for set-level inference (i.e. p values for the entire result).
.. warning:: Set-based inference (c) is more powerful than cluster-based inference (b), but unlike (b) it has no localizing information; it is a global p value pertaining to the entire excursion set en masse. It will thus always be lower than (b).
4. If Q==None, then no Bonferroni check is made. If Q!=None, the RFT
correction will be compared to Bonferroni correction, and the less
severe correction will be returned. This will only have an effect
for very rough fields, for example: when then second resel count
approaches 0.5*Q.
:References:
1. Hasofer AM (1978) Upcrossings of random fields. Suppl Adv Appl
Prob 10:14-21.
2. Friston KJ et al (1994) Assessing the significance of focal
activations using their spatial extent. Human Brain Mapping 1:
210-220.
3. Worsley KJ et al (1996) A unified statistical approach for
determining significant signals in images of cerebral
activation. Human Brain Mapping 4:58-73.
'''
c = _as_float(c)
k = _as_float(k)
Z = _as_float(Z)
D = float(len(R)) #dimensionality
if R[1]==0: #infinitely smooth field
R = R[0], eps #to make the results numerically stable
R = np.asarray(R, dtype=float)
EC = ec_density(STAT, Z, df)
if version=='spm8':
EC = EC + eps
elif version=='spm12':
EC = np.array([max(ec,eps) for ec in EC])
else:
raise( ValueError('rft1d error: unknown version "%s" (version must be "spm8" or "spm12")'%str(version)) )
if n==1: #take a shortcut (Edit TCP 2014.08.11) -- about 9 times faster than the fast version below
EM = R*EC
EN = EC[0]*R[-1]
else:
### SLOW CODE -- but useful for D>1 (following spm8)
# P = np.linalg.matrix_power( np.triu(linalg.toeplitz(EC*G)), n )
# P = P[0,]
### FASTER CODE (Edit TCP 2013.12.02) -- in 1D case this is about 25 times faster than using np.linalg.matrix_power
# a,b = EC*G
# P = a**n, n*b*a**(n-1)
G = sqrt(pi) / (gamma(0.5*np.arange(1,D+1)))
a,b = EC*G
P = a**n, n*b*a**(n-1)
EM = R/G*P
EN = P[0]*R[-1]
### expected maxima and resels per cluster:
Ec = EM.sum() #previously "Em"
Ek = EN/EM[-1] #previously "En"
if expectations_only:
return Ec,Ek,EN
### compute probabilities: first P{n>k}
D -= 1
if (k==0) or (D==0):
p = 1.0
else:
beta = (gamma(0.5*D+1)/Ek) **(2/D)
p = np.exp( -beta*(k**(2/D)) )
#Poisson clumping heuristic (for multiple clusters)
if p==0:
P = 0
else:
P = 1 - poisson_cdf(c-1, (Ec + eps)*p)
#Non-implemented cases:
if version=='spm8': #non-implemented flags are removed in spm12; rft1d validates all cases (see ./rft1d/examples/val*)
if STAT in ['T','X2']:
if (k>0) and (n>1):
P,p = None, None
elif STAT=='F':
if k>0:
P,p = None, None
P = _replaceWithBonferroniIfPossible(STAT, P, c, k, Z, df, Q, n)
P = _replaceWith0DpValueIfPossible(STAT, P, c, k, Z, df, Q, n)
return P, p, Ec, Ek, EN
################################
# Crtical threshold computations
################################
def _approx_threshold(STAT, alpha, df, resels, n):
# if two_tailed:
# alpha = 0.5*alpha
a = (alpha/sum(resels))**(1.0/n)
if STAT=='Z':
zstar = stats.norm.isf(a)
elif STAT=='T':
zstar = stats.t.isf(a, df[1])
elif STAT=='X2':
zstar = stats.chi2.isf(a, df[1])
elif STAT=='F':
zstar = stats.f.isf(a, df[0], df[1])
elif STAT=='T2':
p,m = map(float,df)
df_F = p, m - p + 1
fstar = stats.f.isf(a, df_F[0], df_F[1])
zstar = fstar / ( (m-p+1)/(p*m) )
else:
raise(ValueError, 'Statistic must be one of: "Z", "T", "X2", "F", "T2"')
return zstar
def isf(STAT, alpha, df, resels, n, Q=None, version='spm12'):
'''
Inverse survival function
'''
if isinstance(alpha, (int,float)):
alpha = [alpha]
zstar = []
for aaa in alpha:
z0 = _approx_threshold(STAT, aaa, df, resels, n)
fn = lambda x : (rft(1, 0, STAT, x[0], df, resels, n, Q, False, version)[0] - aaa)**2
zzz = optimize.fmin(fn, z0, xtol=1e-9, disp=0)[0]
zstar.append(zzz)
return np.asarray(zstar)
################################
# Convienence classes
################################
def _as_float(x):
if isinstance(x, (int,float)):
x = float(x)
elif isinstance(x, np.ndarray):
x = np.asarray(x, dtype=float)
return x
def _float_if_possible(x):
if isinstance(x, np.ndarray):
if x.size==1:
return float(x)
else:
return x
else:
return x
class _Expected(object):
def __init__(self, calc):
self._calc = calc
def nodes_per_upcrossing(self, u):
'''
Number of nodes expected for each uprcrossing at threshold *u*.
:Example:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.expected.nodes_per_upcrossing(2.7)
.. warning:: This is a node count, so is equivalent to: (FWHM x **resels_per_upcrossing**) + 1
'''
x = self._calc.FWHM * self.resels_per_upcrossing(u) + 1
return _float_if_possible(x)
def number_of_upcrossings(self, u):
'''
Number of upcrossings expected for threshold *u*.
:Example:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.expected.number_of_upcrossings(2.7)
'''
x = self._calc._get_all(u, expectations_only=True)[:,0]
return _float_if_possible(x)
def number_of_suprathreshold_nodes(self, u):
'''
Number of nodes expected in the entire excursion set at threshold *u*.
These nodes can come from multiple upcrossings.
:Example:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.expected.number_of_suprathreshold_nodes(2.8)
.. warning:: This is a node count, so is equivalent to: (FWHM x **number_of_suprathreshold_resels**) + **number_of_upcrossings**
'''
return self._calc.FWHM * self.number_of_suprathreshold_resels(u) + self.number_of_upcrossings(u)
def number_of_suprathreshold_resels(self, u):
'''
Number of resels expected in the entire excursion set at threshold *u*.
These resels can come from multiple upcrossings.
One resel contains (1 x FWHM) nodes.
Thus this is equivalent to: (FWHM x number_of_suprathreshold_nodes)
:Example:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.expected.number_of_suprathreshold_resels(2.9)
'''
x = self._calc._get_all(u, expectations_only=True)[:,2]
return _float_if_possible(x)
def resels_per_upcrossing(self, u):
'''
Number of nodes expected for each uprcrossing at threshold *u*.
One resel contains (1 x FWHM) nodes.
Thus this is equivalent to: (FWHM x nodes_per_upcrossing)
:Example:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.expected.resels_per_upcrossing(3.0)
'''
x = self._calc._get_all(u, expectations_only=True)[:,1]
return _float_if_possible(x)
class _Probability(object):
def __init__(self, calc):
self._calc = calc
def cluster(self, k, u):
'''
Cluster-level inference.
Probability that 1D Gaussian fields would produce an upcrossing of extent *k*
when thresholded at *u*.
.. warning:: The threshold *u* should generally be chosen objectively. One possibility is to calculate the *alpha*-based critical threshold using the inverse survival function: **RFTCalculator.isf**
:Parameters:
*k* -- cluster extent (resels)
*u* -- threshold
:Returns:
Cluster-specific probability value.
:Examples:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.p.cluster(0.1, 3.0)
'''
return rft(1, k, self._calc.STAT, u, self._calc.df, self._calc.resels, self._calc.n, self._calc.Q, False, self._calc.version)[0]
def set(self, c, k, u):
'''
Set-level inference.
Probability that 1D Gaussian fields would produce at least *c* upcrossings
with a minimum extent of *k* when thresholded at *u*.
This probability pertains to the entire excursion set.
.. warning:: The threshold *u* should generally be chosen objectively. One possibility is to calculate the *alpha*-based critical threshold using the inverse survival function: **RFTCalculator.isf**
:Parameters:
*c* -- number of upcrossings
*k* -- minimum cluster extent (resels)
*u* -- threshold
:Returns:
Set-specific probability value.
:Examples:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.p.set(2, 0.1, 2.7)
'''
return rft(c, k, self._calc.STAT, u, self._calc.df, self._calc.resels, self._calc.n, self._calc.Q, False, self._calc.version)[0]
def upcrossing(self, u):
'''
Survival function (equivalent to **RFTCalculator.sf**)
Probability that 1D Gaussian fields would produce a 1D statistic field whose maximum exceeds *u*.
:Parameters:
*u* -- threshold (int, float, or sequence of int or float)
:Returns:
The probability of exceeding the specified heights.
:Examples:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.sf(3.5)
'''
x = self._calc._get_all(u)[:,0]
return _float_if_possible(x)
class RFTCalculator(object):
'''
A convenience class for high-level access to RFT probabilities.
:Parameters:
*STAT* --- test statistic (one of: "Z", "T", "F", "X2", "T2")
*df* --- degrees of freedom [df{interest} df{error}]
*nodes* --- number of field nodes (int) OR a binary field (boolean array)
*FWHM* --- field smoothness (float)
*n* --- number of test statistic fields in conjunction
*withBonf* --- use a Bonferroni correction if less severe than the RFT correction
*version* --- "spm8" or "spm12" (see below)
:Returns:
An instance of the RFTCalculator class.
:Attributes:
*expected* --- access to RFT expectations
*p* --- access to RFT probabilities
:Methods:
*isf* --- inverse survival function
*sf* --- survival function
:Examples:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.expected.number_of_upcrossings(1.0) #yields 1.343
>>> calc.expected.number_of_upcrossings(4.5) #yields 0.0223
'''
def __init__(self, STAT='Z', df=None, nodes=101, FWHM=10.0, n=1, withBonf=False, version='spm12'):
self.FWHM = None
self.Q = None
self.STAT = STAT
self.df = df
self.mask = None
self.nNodes = None
self.n = n
self.resels = None
self.version = 'spm12'
self.withBonf = None
self._parse_nodes_argument(nodes)
self.set_fwhm(FWHM)
self.set_bonf(withBonf)
self.expected = _Expected(self)
self.p = _Probability(self)
def __repr__(self):
s = ''
s += 'RFT1D RFTCalculator object:\n'
s += ' STAT : %s\n' %self.STAT
s += ' df : %s\n' %str(self.df)
s += ' nNodes : %d\n' %self.nNodes
s += ' FWHM : %.1f\n' %self.FWHM
s += ' withBonf : %s\n' %self.withBonf
return s
def _get_all(self, u, expectations_only=False):
if isinstance(u, (int,float)):
u = [u]
return np.array([rft(1, 0, self.STAT, uu, self.df, self.resels, self.n, self.Q, expectations_only, self.version) for uu in u])
def _parse_nodes_argument(self, nodes):
if isinstance(nodes, int):
self.nNodes = nodes
elif np.ma.is_mask(nodes):
if nodes.ndim!=1:
raise( ValueError('RFT1D Error: the "nodes" argument must be a 1D boolean array. Received a %dD array'%arg.ndim) )
self.nNodes = nodes.size
self.mask = np.logical_not(nodes)
else:
raise( ValueError('RFT1D Error: the "nodes" argument must be an integer or a 1D boolean array') )
def isf(self, alpha):
'''
Inverse survival function.
(see also the survival function: **RFTCalculator.sf**)
:Parameters:
*alpha* -- upper tail probability (float; 0 < alpha < 1)
:Returns:
Quantile corresponding to upper-tail probability alpha.
Equivalently: critical threshold at a Type I error rate of alpha.
:Examples:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.isf(0.05)
'''
x = isf(self.STAT, alpha, self.df, self.resels, self.n, self.Q, self.version)
return _float_if_possible(x)
def set_bonf(self, wBonf):
self.withBonf = bool(wBonf)
self.Q = float(self.nNodes) if self.withBonf else None
def set_fwhm(self, w):
self.FWHM = float(w)
if self.mask is None:
self.resels = 1, (self.nNodes-1)/self.FWHM #field length is (nNodes - 1)
else:
self.resels = geom.resel_counts(self.mask, fwhm=self.FWHM)
def sf(self, u):
'''
Survival function.
(Equivalent to **RFTCalculator.p.upcrossing**)
Probability that 1D Gaussian fields with a smoothness *FWHM* would produce a 1D statistic field whose maximum exceeds *u*.
:Parameters:
*u* -- threshold (int, float, or sequence of int or float)
:Returns:
The probability of exceeding the specified heights.
:Examples:
>>> calc = rft1d.prob.RFTCalculator('T', (1,8), 101, 15.0)
>>> calc.sf(3.5)
'''
return _float_if_possible( self.p.upcrossing(u) )
class RFTCalculatorResels(RFTCalculator):
'''
A convenience class for high-level access to RFT probabilities (based on resel counts).
:Parameters:
*STAT* --- test statistic (one of: "Z", "T", "F", "X2", "T2")
*df* --- degrees of freedom [df{interest} df{error}]
*resels* --- resolution element counts
*n* --- number of test statistic fields in conjunction
*withBonf* --- use a Bonferroni correction if less severe than the RFT correction
*nNodes* --- number of field nodes (int) (must be specified if "withBonf" is True)
*version* --- "spm8" or "spm12" (see below)
:Returns:
An instance of the RFTCalculator class.
:Attributes:
*expected* --- access to RFT expectations
*p* --- access to RFT probabilities
:Methods:
*isf* --- inverse survival function
*sf* --- survival function
:Examples:
>>> calc = rft1d.prob.RFTCalculatorResels('T', (1,8), [1, 6.667])
>>> calc.expected.number_of_upcrossings(1.0) #yields 1.343
>>> calc.expected.number_of_upcrossings(4.5) #yields 0.0223
'''
def __init__(self, STAT='Z', df=None, resels=[1,10], n=1, withBonf=False, nNodes=None, version='spm12'):
self.FWHM = None
self.Q = None
self.STAT = STAT
self.df = df
self.mask = None
self.nNodes = nNodes
self.n = n
self.resels = tuple(resels)
self.version = 'spm12'
self.withBonf = None
self.set_bonf(withBonf)
self.expected = _Expected(self)
self.p = _Probability(self)
def __repr__(self):
s = ''
s += 'RFT1D RFTCalculatorResels object:\n'
s += ' STAT : %s\n' %self.STAT
s += ' df : %s\n' %str(self.df)
s += ' resels : (%d, %.3f)\n' %self.resels
s += ' FWHM : %.1f\n' %self.FWHM
s += ' withBonf : %s\n' %self.withBonf
return s
def set_bonf(self, wBonf):
self.withBonf = bool(wBonf)
if self.withBonf and (self.nNodes is None):
raise( ValueError('Must specify an integer value for "nNodes" when "withBonf" is True.') )
self.Q = float(self.nNodes) if self.withBonf else None
rftcalc = RFTCalculator() #instantiated only for auto-doc generation
expected = _Expected(None) #instantiated only for auto-doc generation
p = _Probability(None) #instantiated only for auto-doc generation
|
0todd0000/rft1d
|
rft1d/prob.py
|
Python
|
gpl-3.0
| 21,872
|
[
"Gaussian"
] |
7c1e60957680e97e646b5d0d306994040ec14079225403c3865899ab2e0b28a8
|
"""
Tests for get_smiles_map.py.
"""
import shutil
import tempfile
import unittest
from rdkit import Chem
from vs_utils.scripts.get_smiles_map import main, parse_args
from vs_utils.utils import read_pickle
class TestGetSmilesMap(unittest.TestCase):
"""
Tests for get_smiles_map.py.
"""
def setUp(self):
"""
Set up tests.
"""
self.temp_dir = tempfile.mkdtemp()
self.smiles = [
'CC(=O)OC1=CC=CC=C1C(=O)O', 'CC(C)CC1=CC=C(C=C1)C(C)C(=O)O',
'CC1=CC=C(C=C1)C2=CC(=NN2C3=CC=C(C=C3)S(=O)(=O)N)C(F)(F)F']
self.cids = [2244, 3672, 2662]
_, self.input_filename = tempfile.mkstemp(dir=self.temp_dir,
suffix='.smi')
_, self.output_filename = tempfile.mkstemp(dir=self.temp_dir,
suffix='.pkl')
# write SMILES to file
with open(self.input_filename, 'wb') as f:
for smile, cid in zip(self.smiles, self.cids):
f.write('{}\t{}\n'.format(smile, cid))
def tearDown(self):
"""
Clean up tests.
"""
shutil.rmtree(self.temp_dir)
def test_main(self):
"""
Test main.
"""
args = parse_args(['-i', self.input_filename, '-o',
self.output_filename, '-p', 'CID'])
main(args.input, args.output, args.prefix)
data = read_pickle(self.output_filename)
assert len(data) == len(self.smiles)
for smile, cid in zip(self.smiles, self.cids):
assert data['CID{}'.format(cid)] == Chem.MolToSmiles(
Chem.MolFromSmiles(smile), isomericSmiles=True)
def test_failure_on_bare_id(self):
"""
Test failure on bare IDs.
"""
args = parse_args(['-i', self.input_filename, '-o',
self.output_filename])
try:
main(args.input, args.output, args.prefix)
raise AssertionError
except TypeError:
pass
def test_update(self):
"""
Test update existing map.
"""
args = parse_args(['-i', self.input_filename, '-o',
self.output_filename, '-p', 'CID'])
main(args.input, args.output, args.prefix, args.update)
# add another molecule
self.smiles.append('CC(=O)NC1=CC=C(C=C1)O')
self.cids.append(1983)
with open(self.input_filename, 'wb') as f:
for smile, cid in zip(self.smiles, self.cids):
f.write('{}\t{}\n'.format(smile, cid))
# update existing map
main(args.input, args.output, args.prefix, True)
data = read_pickle(self.output_filename)
assert len(data) == len(self.smiles)
for smile, cid in zip(self.smiles, self.cids):
assert data['CID{}'.format(cid)] == Chem.MolToSmiles(
Chem.MolFromSmiles(smile), isomericSmiles=True)
|
rbharath/vs-utils
|
vs_utils/scripts/tests/test_get_smiles_map.py
|
Python
|
gpl-3.0
| 2,991
|
[
"RDKit"
] |
823d7ec4b9e13445a4f0ba22f4a4f4ffd55e6c434636e41889f30be495cee37e
|
"""
Provides some visualization capabilities.
"""
# IMPORTS
try:
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
except ImportError as err:
# Could not import pyplot
# ... do some stuff here
raise err
# DEBUGGING
from IPython import embed as IPS
import numpy as np
import pysofe
from pysofe import utils
def show(obj, *args, **kwargs):
"""
Wrapper function for the visualization of
various pysofe objects.
Parameters
----------
obj
The pysofe object to visualize
"""
# select appropriate visualizer and call its `show()` method
if isinstance(obj, pysofe.elements.base.Element):
V = ElementVisualizer()
V.show(element=obj, **kwargs)
elif isinstance(obj, pysofe.meshes.mesh.Mesh):
V = MeshVisualizer()
V.show(obj, *args, **kwargs)
elif isinstance(obj, pysofe.quadrature.gaussian.GaussQuadSimp):
V = QuadRuleVisualizer()
V.show(obj, *args, **kwargs)
elif isinstance(obj, pysofe.spaces.space.FESpace):
V = FESpaceVisualizer()
V.show(obj, *args, **kwargs)
elif isinstance(obj, pysofe.spaces.functions.FEFunction):
V = FunctionVisualizer()
V.show(obj, **kwargs)
else:
raise NotImplementedError()
class Visualizer(object):
"""
Base class for all visualizers.
"""
def plot(self, *args, **kwargs):
fig, axes = self._plot(*args, **kwargs)
return fig, axes
def _plot(self, *args, **kwargs):
raise NotImplementedError()
def show(self, *args, **kwargs):
fig, axes = self.plot(*args, **kwargs)
fig.show()
class MeshVisualizer(Visualizer):
"""
Visualizes the :py:class:`pysofe.meshes.Mesh` class.
"""
def _plot(self, mesh, *args, **kwargs):
fontsize = kwargs.get('fontsize', 9)
fig = plt.figure()
ax = fig.add_subplot(111)
if mesh.dimension == 1:
nodes = mesh.nodes[:,0]
zeros = np.zeros_like(nodes)
ax.plot(nodes, zeros, '-o')
elif mesh.dimension == 2:
cols = range(3)
ax.triplot(mesh.nodes[:,0], mesh.nodes[:,1], np.asarray(mesh.cells[:,cols] - 1))
else:
raise NotImplementedError()
# zoom out to make outer faces visible
xlim = list(ax.get_xlim()); ylim = list(ax.get_ylim())
xlim[0] -= 0.1; xlim[1] += 0.1
ylim[0] -= 0.1; ylim[1] += 0.1
ax.set_xlim(xlim)
ax.set_ylim(ylim)
show_all = ('all' in args)
# nodes
if 'nodes' in args or show_all:
for i in xrange(mesh.nodes.shape[0]):
if mesh.dimension == 1:
ax.text(x=mesh.nodes[i,0], y=0., s=i+1,
color='red', fontsize=fontsize)
elif mesh.dimension == 2:
ax.text(x=mesh.nodes[i,0], y=mesh.nodes[i,1], s=i+1,
color='red', fontsize=fontsize)
else:
raise NotImplementedError()
# edges
if 'edges' in args or show_all:
edges = mesh.edges
bary = 0.5 * mesh.nodes[edges - 1,:].sum(axis=1)
for i in xrange(edges.shape[0]):
if mesh.dimension == 1:
ax.text(x=bary[i,0], y=0, s=i+1,
color='green', fontsize=fontsize)
elif mesh.dimension == 2:
ax.text(x=bary[i,0], y=bary[i,1], s=i+1,
color='green', fontsize=fontsize)
# elements
if mesh.dimension > 1 and ('cells' in args or show_all):
cells = mesh.cells
bary = mesh.nodes[cells - 1,:].sum(axis=1) / 3.
for i in xrange(cells.shape[0]):
ax.text(x=bary[i,0], y=bary[i,1], s=i+1,
color='blue', fontsize=fontsize)
if 'local vertices' in args:
cells = mesh.cells
cell_nodes = mesh.nodes.take(cells - 1, axis=0)
bary = cell_nodes.sum(axis=1) / 3.
nE = cells.shape[0]
# calculate positions where to put the local vertex numbers
local_1 = cell_nodes[:,0] + 0.4 * (bary - cell_nodes[:,0])
local_2 = cell_nodes[:,1] + 0.4 * (bary - cell_nodes[:,1])
local_3 = cell_nodes[:,2] + 0.4 * (bary - cell_nodes[:,2])
for i in xrange(nE):
ax.text(x=local_1[i,0], y=local_1[i,1], s=1, color='red', fontsize=fontsize)
ax.text(x=local_2[i,0], y=local_2[i,1], s=2, color='red', fontsize=fontsize)
ax.text(x=local_3[i,0], y=local_3[i,1], s=3, color='red', fontsize=fontsize)
return fig, ax
class ElementVisualizer(Visualizer):
"""
Visualizes :py:class:`pysofe.elements.base.Element` classes.
"""
def _plot(self, element, **kwargs):
"""
Plots the basis function or their derivatives of the given element.
Parameters
----------
element : pysofe.base.Element
The finite element of which to plot the basis functions
codim : int
The codimension of the entity for which to plot the respective basis functions
d : int
The derivation order
indices : array_like
Specify certain basis function to show
resolution : int
Resolution of the grid points for the plot
typ : str
The plotting type ('surface' or 'scatter')
shadow : bool
Whether to plot a shadow of the surface
"""
# get arguments
dim = kwargs.get('dim', element.dimension)
d = kwargs.get('d', 0)
indices = kwargs.get('indices', None)
resolution = kwargs.get('resolution', 10*np.ceil(np.log(element.order+1)))
typ = kwargs.get('typ', 'surface')
shadow = kwargs.get('shadow', False)
layout = kwargs.get('layout', None)
if d != 0:
raise NotImplementedError()
if element.dimension > 2:
raise NotImplementedError()
codim = element.dimension - dim
if element.dimension == 1:
project = None
elif element.dimension == 2:
if codim == 0:
project = '3d'
elif codim == 1:
project = None
# create grid points at which to evaluate the basis functions
ls = np.linspace(0., 1., num=resolution)
if element.dimension == 1:
points = ls
elif element.dimension == 2:
if codim == 0:
X,Y = np.meshgrid(ls, ls)
XY = np.vstack([np.hstack(X), np.hstack(Y)])
points = XY.compress(XY.sum(axis=0) <= 1., axis=1)
elif codim == 1:
points = ls
# evaluate all basis function at all points
basis = element.eval_basis(points, deriv=d) # nB x nP
if indices is not None:
assert hasattr(indices, '__iter__')
indices = np.asarray(indices, dtype=int) - 1
assert indices.min() >= 0
basis = basis.take(indices, axis=0)
else:
indices = np.arange(np.size(basis, axis=0))
# create a subplot for each basis function
nB = np.size(basis, axis=0)
fig = plt.figure()
if layout is None:
nB_2 = int(0.5*(nB+1))
for i in xrange(1, nB_2+1):
if codim == 0:
fig.add_subplot(nB_2,2,2*i-1, projection=project)
if 2*i <= nB:
fig.add_subplot(nB_2,2,2*i, projection=project)
elif codim == 1:
fig.add_subplot(nB_2,2,2*i-1, projection=project)
if 2*i <= nB:
fig.add_subplot(nB_2,2,2*i, projection=project)
else:
assert 1 <= len(layout) <= 2
if len(layout) == 1:
layout = (1,layout[0])
assert np.multiply(*layout) >= nB
for j in xrange(nB):
if codim == 0:
fig.add_subplot(layout[0], layout[1], j+1, projection=project)
elif codim == 1:
fig.add_subplot(layout[0], layout[1], j+1, projection=project)
if element.dimension == 1:
for i in xrange(nB):
fig.axes[i].plot(points.ravel(), basis[i].ravel())
#fig.axes[i].set_title(r"$\varphi_{{ {} }}$".format(i+1), fontsize=32)
fig.axes[i].set_title(r"$\varphi_{{ {} }}$".format(indices[i]+1), fontsize=32)
elif element.dimension == 2:
if codim == 0:
for i in xrange(nB):
if typ == 'scatter':
fig.axes[i].scatter(points[0], points[1], basis[i])
elif typ == 'surface':
fig.axes[i].plot_trisurf(points[0], points[1], basis[i],
cmap=cm.jet, linewidth=0., antialiased=False)
if shadow:
c = fig.axes[i].tricontourf(points[0], points[1], basis[i],
zdir='z', offset=0., colors='gray')
fig.axes[i].autoscale_view(True,True,True)
#fig.axes[i].set_title(r"$\varphi_{{ {} }}$".format(i+1), fontsize=32)
fig.axes[i].set_title(r"$\varphi_{{ {} }}$".format(indices[i]+1), fontsize=32)
elif codim == 1:
for i in xrange(nB):
fig.axes[i].plot(points.ravel(), basis[i].ravel())
#fig.axes[i].set_title(r"$\psi_{{ {} }}$".format(i+1), fontsize=32)
fig.axes[i].set_title(r"$\psi_{{ {} }}$".format(indices[i]+1), fontsize=32)
return fig, fig.axes
class FunctionVisualizer(Visualizer):
'''
Base class for visualizing functions.
'''
def _plot(self, fnc, **kwargs):
'''
Plots the function.
Parameters
----------
...
'''
self.fnc = fnc
if fnc.fe_space.mesh.dimension == 1:
mode = '1dplot'
elif fnc.fe_space.mesh.dimension == 2:
mode = kwargs.get('mode', 'trisurface')
else:
raise NotImplementedError()
# get visualization data
#----------------------------------------------------
points, values, cells = self._get_visualizetion_data(mode, **kwargs)
# set up figure and axes
#----------------------------------------------------
# get number of plots
n_values = values.shape[0]
layout = kwargs.get('layout', None)
if layout is None:
if n_values == 1:
nrows = 1
ncols = 1
elif 1 < n_values < 9:
nrows = int(np.ceil(n_values/2.))
ncols = 2
else:
nrows = int(np.ceil(n_values/3.))
ncols = 3
else:
nrows, ncols = layout
# create figure and subplots (if neccessary)
fig = kwargs.get('fig', None)
axes = kwargs.get('ax', None)
if axes is None:
if mode in ('trisurface', 'surface', 'wireframe'):
subplot_kw = {'projection' : '3d'}
else:
subplot_kw = {}
fig, axes = plt.subplots(nrows, ncols, squeeze=False, subplot_kw=subplot_kw)
else:
axes = np.atleast_2d(axes)
assert axes.ndim == 2
assert nrows <= axes.shape[0]
assert ncols <= axes.shape[1]
# called plotting routine specified by `mode`
#----------------------------------------------------
if mode == '1dplot':
axes[0,0].plot(points[0], values[0])
elif mode == 'trisurface':
self._plot_trisurf(axes=axes, X=points[0], Y=points[1], triangles=cells,
Z=values, **kwargs)
elif mode in ('tripcolor', 'heatmap'):
self._plot_tripcolor(axes=axes, X=points[0], Y=points[1], triangles=cells,
Z=values, **kwargs)
return fig, axes
def _get_visualizetion_data(self, mode, **kwargs):
if mode in ('1dplot',):
local_points = np.linspace(0., 1., 10)[None,:]
points = self.fnc.fe_space.mesh.ref_map.eval(local_points)
_, I = np.unique(points.flat, return_index=True)
points = points.ravel().take(I)[None,:]
values = self.fnc(points=local_points, deriv=0).ravel().take(I)
values = np.atleast_2d(values)
cells = np.arange(points.size, dtype='int').repeat(2)[1:-1].reshape((-1,2))
return points, values, cells
elif mode in ('trisurface', 'tripcolor', 'heatmap'):
# get points, values and triangles for the plot
return self._get_triangulation_data(**kwargs)
else:
msg = "Invalid visualization mode for functions! ({})"
raise ValueError(msg.format(mode))
def _get_triangulation_data(self, **kwargs):
# generate local points for the function evaluation
n_sub_grid = kwargs.get('n_sub_grid', self.fnc.order + 1)
local_points = utils.lagrange_nodes(dimension=2, order=n_sub_grid)
# project them to their global counterparts
order = 'C'
points = self.fnc.fe_space.mesh.ref_map.eval(points=local_points,
deriv=0)
points = np.vstack([points[:,:,0].ravel(order=order), points[:,:,1].ravel(order=order)])
# get unique points indices
_, I = utils.unique_rows(points.T, return_index=True)
points = points.take(I, axis=1)
# evaluate the function w.r.t the unique points
d = kwargs.get('d', 0)
if 0:
if isinstance(self.fnc, pysofe.spaces.functions.FEFunction):
eval_local = kwargs.get('eval_local', True)
if eval_local:
values = self.fnc(points=local_points, d=d, local=True)
else:
values = self.fnc(points=points, d=d, local=False)
elif isinstance(self.fnc, pysofe.spaces.functions.MeshFunction):
values = self.fnc(points=points, d=d)
else:
fnc_args = kwargs.get('fnc_args', dict())
if kwargs.get('eval_local', True):
values = self.fnc(points=local_points, deriv=d, **fnc_args)
else:
values = self.fnc(points=points, d=d, local=False, **fnc_args)
if d == 0:
values = values.ravel(order=order).take(I, axis=0)
elif d == 1:
values = np.asarray([values.take(i, axis=-1).ravel(order=order).take(I, axis=0) for i in xrange(values.shape[-1])])
else:
raise ValueError('Invalid derivation order for visualization! ({})'.format(d))
# get cells corresponding to the unique points
from scipy.spatial import Delaunay
cells = Delaunay(points.T).simplices
values = np.atleast_2d(values)
return points, values, cells
def _plot_trisurf(self, axes, X, Y, triangles, Z, **kwargs):
'''
Wrapper for the :py:meth:`plot_trisurf` method of
the :py:class:`Axes3D` class.
Parameters
----------
X, Y : array_like
1D arrays of the triangulation node coordinates
triangles : array_like
Connectivity array of the triangulation
Z : array_like
1D array of the values at the triangulation nodes
'''
# set default values
cmap = kwargs.get('cmap', cm.jet)
# get layout
n_values = Z.shape[0]
nrows, ncols = axes.shape
# iterate over axes and plot
for i in xrange(nrows):
for j in xrange(ncols):
if i * ncols + j < n_values:
# call mpl_toolkit's plot_trisurf
axes[i,j].plot_trisurf(X, Y, triangles, Z[i * ncols + j],
shade=True, cmap=cmap,
linewidth=0., antialiased=False)
def _plot_tripcolor(self, axes, X, Y, triangles, Z, **kwargs):
'''
Wrapper for the :py:meth:`pyplot.tripcolor` method.
Parameters
----------
X, Y : array_like
1D arrays of the triangulation node coordinates
triangles : array_like
Connectivity array of the triangulation
Z : array_like
1D array of the values at the triangulation nodes
'''
# set default values
shading = kwargs.get('shading', 'flat')
cmap = kwargs.get('cmap', cm.jet)
axis_off = kwargs.get('axis_off', True)
# get layout
n_values = Z.shape[0]
nrows, ncols = axes.shape
# iterate over axes and plot
for i in xrange(nrows):
for j in xrange(ncols):
if i * ncols + j < n_values:
# call matplotlib.pyplot's tripcolor
axes[i,j].tripcolor(X, Y, triangles, Z[i * ncols + j],
shading=shading, cmap=cmap)
if axis_off:
# don't show axis
axes[i,j].set_axis_off()
class QuadRuleVisualizer(Visualizer):
"""
Visualizes the numerical integration scheme by plotting the
quadrature points.
"""
def _plot(self, quad_rule, *args, **kwargs):
assert isinstance(quad_rule, pysofe.quadrature.gaussian.GaussQuadSimp)
# get entity dimension for which to plot points
dim = kwargs.get('d', quad_rule.dimension)
if not dim in (1, 2):
msg = "Visualization not supported for this dimension, yet ({})"
raise ValueError(msg.format(dim))
# get quadrature points
points = quad_rule.points[dim]
# check if mesh is given
mesh = kwargs.get('mesh', None)
if mesh is not None and isinstance(mesh, pysofe.meshes.mesh.Mesh):
# is so, plot points on whole mesh
V = MeshVisualizer()
fig, axes = V.plot(mesh)
# transfer local points to global ponts on the mesh
points = np.vstack(mesh.ref_map.eval(points)).T
axes.plot(points[0], points[1], 'r.')
else:
# if not, plot points on reference domain
# set up figure and axes
fig = plt.figure()
axes = fig.add_subplot(111)
if dim == 1:
nodes = np.array([[0.], [1.]])
cells = np.array([[1, 2]])
axes.plot(nodes[:,0], np.zeros_like(nodes[:,0]))
axes.plot(points[0], np.zeros_like(points[0]), 'r.')
elif dim == 2:
nodes = np.array([[0., 0.], [1., 0.], [0., 1.]])
cells = np.array([[1, 2, 3]])
axes.triplot(nodes[:,0], nodes[:,1], cells-1)
axes.plot(points[0], points[1], 'r.')
# zoom out to make outer faces visible
xlim = list(axes.get_xlim()); ylim = list(axes.get_ylim())
xlim[0] -= 0.1; xlim[1] += 0.1
ylim[0] -= 0.1; ylim[1] += 0.1
axes.set_xlim(xlim)
axes.set_ylim(ylim)
return fig, axes
class FESpaceVisualizer(Visualizer):
"""
Visualizes the finite element space by plotting
its degrees of freedom.
"""
def _plot(self, fe_space, *args, **kwargs):
fontsize = kwargs.get('fontsize', 9)
# first plot the mesh
mesh = fe_space.mesh
V = MeshVisualizer()
fig, axes = V.plot(mesh)
# get number of entities for each topological dimension
n_entities = mesh.topology.n_entities
dof_tuple = fe_space.element.dof_tuple
n_dof_per_dim = np.asarray(n_entities) * dof_tuple
dofs = np.arange(fe_space.n_dof) + 1
entity_dofs = [zip(*(arr.reshape((dof_tuple[i], -1))))
for i, arr in
enumerate(np.split(dofs, n_dof_per_dim.cumsum()[:-1]))]
# plot dofs for each topological dimension
# nodes
for i in xrange(mesh.nodes.shape[0]):
if mesh.dimension == 1:
axes.text(x=mesh.nodes[i,0], y=0., s=entity_dofs[0][i],
color='red', fontsize=fontsize)
elif mesh.dimension == 2:
axes.text(x=mesh.nodes[i,0], y=mesh.nodes[i,1],
s=entity_dofs[0][i],
color='red', fontsize=fontsize)
else:
raise NotImplementedError()
# edges
edges = mesh.edges
bary = 0.5 * mesh.nodes[edges - 1,:].sum(axis=1)
for i in xrange(edges.shape[0]):
if mesh.dimension == 1:
axes.text(x=bary[i,0], y=0, s=entity_dofs[1][i],
color='red', fontsize=fontsize)
elif mesh.dimension == 2:
axes.text(x=bary[i,0], y=bary[i,1], s=entity_dofs[1][i],
color='red', fontsize=fontsize)
# elements
if mesh.dimension > 1:
cells = mesh.cells
bary = mesh.nodes[cells - 1,:].sum(axis=1) / 3.
for i in xrange(cells.shape[0]):
axes.text(x=bary[i,0], y=bary[i,1], s=entity_dofs[2][i],
color='red', fontsize=fontsize)
return fig, axes
|
pysofe/pysofe
|
pysofe/visualization.py
|
Python
|
bsd-3-clause
| 22,194
|
[
"Gaussian"
] |
e4468447f734bdbe0c4712c25c56e0c4cae9bf23d4f9402a39811abdbc6980c6
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
# OOo API documentation:
# http://api.openoffice.org/docs/common/ref/com/sun/star/presentation/XSlideShowController.html
# http://wiki.services.openoffice.org/wiki/Documentation/DevGuide/ProUNO/Basic/Getting_Information_about_UNO_Objects#Inspecting_interfaces_during_debugging
# http://docs.go-oo.org/sd/html/classsd_1_1SlideShow.html
# http://www.oooforum.org/forum/viewtopic.phtml?t=5252
# http://wiki.services.openoffice.org/wiki/Documentation/DevGuide/Working_with_Presentations
# http://mail.python.org/pipermail/python-win32/2008-January/006676.html
# http://www.linuxjournal.com/content/starting-stopping-and-connecting-openoffice-python
# http://nxsy.org/comparing-documents-with-openoffice-and-python
import logging
import os
import time
if os.name == u'nt':
from win32com.client import Dispatch
import pywintypes
# Declare an empty exception to match the exception imported from UNO
class ErrorCodeIOException(Exception):
pass
else:
try:
import uno
from com.sun.star.beans import PropertyValue
from com.sun.star.task import ErrorCodeIOException
uno_available = True
except ImportError:
uno_available = False
from PyQt4 import QtCore
from openlp.core.utils import delete_file, get_uno_command, get_uno_instance
from presentationcontroller import PresentationController, PresentationDocument
log = logging.getLogger(__name__)
class ImpressController(PresentationController):
"""
Class to control interactions with Impress presentations.
It creates the runtime environment, loads and closes the presentation as
well as triggering the correct activities based on the users input
"""
log.info(u'ImpressController loaded')
def __init__(self, plugin):
"""
Initialise the class
"""
log.debug(u'Initialising')
PresentationController.__init__(self, plugin, u'Impress', ImpressDocument)
self.supports = [u'odp']
self.alsosupports = [u'ppt', u'pps', u'pptx', u'ppsx']
self.process = None
self.desktop = None
self.manager = None
def check_available(self):
"""
Impress is able to run on this machine
"""
log.debug(u'check_available')
if os.name == u'nt':
return self.get_com_servicemanager() is not None
else:
return uno_available
def start_process(self):
"""
Loads a running version of OpenOffice in the background.
It is not displayed to the user but is available to the UNO interface
when required.
"""
log.debug(u'start process Openoffice')
if os.name == u'nt':
self.manager = self.get_com_servicemanager()
self.manager._FlagAsMethod(u'Bridge_GetStruct')
self.manager._FlagAsMethod(u'Bridge_GetValueObject')
else:
# -headless
cmd = get_uno_command()
self.process = QtCore.QProcess()
self.process.startDetached(cmd)
def get_uno_desktop(self):
"""
On non-Windows platforms, use Uno. Get the OpenOffice desktop
which will be used to manage impress
"""
log.debug(u'get UNO Desktop Openoffice')
uno_instance = None
loop = 0
log.debug(u'get UNO Desktop Openoffice - getComponentContext')
context = uno.getComponentContext()
log.debug(u'get UNO Desktop Openoffice - createInstaneWithContext - UnoUrlResolver')
resolver = context.ServiceManager.createInstanceWithContext(u'com.sun.star.bridge.UnoUrlResolver', context)
while uno_instance is None and loop < 3:
try:
uno_instance = get_uno_instance(resolver)
except:
log.warn(u'Unable to find running instance ')
self.start_process()
loop += 1
try:
self.manager = uno_instance.ServiceManager
log.debug(u'get UNO Desktop Openoffice - createInstanceWithContext'
u' - Desktop')
desktop = self.manager.createInstanceWithContext("com.sun.star.frame.Desktop", uno_instance)
return desktop
except:
log.warn(u'Failed to get UNO desktop')
return None
def get_com_desktop(self):
"""
On Windows platforms, use COM. Return the desktop object which
will be used to manage Impress
"""
log.debug(u'get COM Desktop OpenOffice')
if not self.manager:
return None
desktop = None
try:
desktop = self.manager.createInstance(u'com.sun.star.frame.Desktop')
except (AttributeError, pywintypes.com_error):
log.warn(u'Failure to find desktop - Impress may have closed')
return desktop if desktop else None
def get_com_servicemanager(self):
"""
Return the OOo service manager for windows
"""
log.debug(u'get_com_servicemanager openoffice')
try:
return Dispatch(u'com.sun.star.ServiceManager')
except pywintypes.com_error:
log.warn(u'Failed to get COM service manager. Impress Controller has been disabled')
return None
def kill(self):
"""
Called at system exit to clean up any running presentations
"""
log.debug(u'Kill OpenOffice')
while self.docs:
self.docs[0].close_presentation()
desktop = None
try:
if os.name != u'nt':
desktop = self.get_uno_desktop()
else:
desktop = self.get_com_desktop()
except:
log.warn(u'Failed to find an OpenOffice desktop to terminate')
if not desktop:
return
docs = desktop.getComponents()
cnt = 0
if docs.hasElements():
list = docs.createEnumeration()
while list.hasMoreElements():
doc = list.nextElement()
if doc.getImplementationName() != u'com.sun.star.comp.framework.BackingComp':
cnt += 1
if cnt > 0:
log.debug(u'OpenOffice not terminated as docs are still open')
else:
try:
desktop.terminate()
log.debug(u'OpenOffice killed')
except:
log.warn(u'Failed to terminate OpenOffice')
class ImpressDocument(PresentationDocument):
"""
Class which holds information and controls a single presentation
"""
def __init__(self, controller, presentation):
"""
Constructor, store information about the file and initialise
"""
log.debug(u'Init Presentation OpenOffice')
PresentationDocument.__init__(self, controller, presentation)
self.document = None
self.presentation = None
self.control = None
def load_presentation(self):
"""
Called when a presentation is added to the SlideController.
It builds the environment, starts communcations with the background
OpenOffice task started earlier. If OpenOffice is not present is is
started. Once the environment is available the presentation is loaded
and started.
"""
log.debug(u'Load Presentation OpenOffice')
if os.name == u'nt':
desktop = self.controller.get_com_desktop()
if desktop is None:
self.controller.start_process()
desktop = self.controller.get_com_desktop()
url = u'file:///' + self.filepath.replace(u'\\', u'/').replace(u':', u'|').replace(u' ', u'%20')
else:
desktop = self.controller.get_uno_desktop()
url = uno.systemPathToFileUrl(self.filepath)
if desktop is None:
return False
self.desktop = desktop
properties = []
if os.name != u'nt':
# Recent versions of Impress on Windows won't start the presentation
# if it starts as minimized. It seems OK on Linux though.
properties.append(self.create_property(u'Minimized', True))
properties = tuple(properties)
try:
self.document = desktop.loadComponentFromURL(url, u'_blank',
0, properties)
except:
log.warn(u'Failed to load presentation %s' % url)
return False
if os.name == u'nt':
# As we can't start minimized the Impress window gets in the way.
# Either window.setPosSize(0, 0, 200, 400, 12) or .setVisible(False)
window = self.document.getCurrentController().getFrame().getContainerWindow()
window.setVisible(False)
self.presentation = self.document.getPresentation()
self.presentation.Display = self.controller.plugin.renderer.screens.current[u'number'] + 1
self.control = None
self.create_thumbnails()
return True
def create_thumbnails(self):
"""
Create thumbnail images for presentation
"""
log.debug(u'create thumbnails OpenOffice')
if self.check_thumbnails():
return
if os.name == u'nt':
thumbdirurl = u'file:///' + self.get_temp_folder().replace(u'\\', u'/') \
.replace(u':', u'|').replace(u' ', u'%20')
else:
thumbdirurl = uno.systemPathToFileUrl(self.get_temp_folder())
props = []
props.append(self.create_property(u'FilterName', u'impress_png_Export'))
props = tuple(props)
doc = self.document
pages = doc.getDrawPages()
if not pages:
return
if not os.path.isdir(self.get_temp_folder()):
os.makedirs(self.get_temp_folder())
for idx in range(pages.getCount()):
page = pages.getByIndex(idx)
doc.getCurrentController().setCurrentPage(page)
urlpath = u'%s/%s.png' % (thumbdirurl, unicode(idx + 1))
path = os.path.join(self.get_temp_folder(), unicode(idx + 1) + u'.png')
try:
doc.storeToURL(urlpath, props)
self.convert_thumbnail(path, idx + 1)
delete_file(path)
except ErrorCodeIOException, exception:
log.exception(u'ERROR! ErrorCodeIOException %d' % exception.ErrCode)
except:
log.exception(u'%s - Unable to store openoffice preview' % path)
def create_property(self, name, value):
"""
Create an OOo style property object which are passed into some
Uno methods
"""
log.debug(u'create property OpenOffice')
if os.name == u'nt':
prop = self.controller.manager.Bridge_GetStruct(u'com.sun.star.beans.PropertyValue')
else:
prop = PropertyValue()
prop.Name = name
prop.Value = value
return prop
def close_presentation(self):
"""
Close presentation and clean up objects
Triggered by new object being added to SlideController or OpenLP
being shutdown
"""
log.debug(u'close Presentation OpenOffice')
if self.document:
if self.presentation:
try:
self.presentation.end()
self.presentation = None
self.document.dispose()
except:
log.warn("Closing presentation failed")
self.document = None
self.controller.remove_doc(self)
def is_loaded(self):
"""
Returns true if a presentation is loaded
"""
log.debug(u'is loaded OpenOffice')
if self.presentation is None or self.document is None:
log.debug("is_loaded: no presentation or document")
return False
try:
if self.document.getPresentation() is None:
log.debug("getPresentation failed to find a presentation")
return False
except:
log.warn("getPresentation failed to find a presentation")
return False
return True
def is_active(self):
"""
Returns true if a presentation is active and running
"""
log.debug(u'is active OpenOffice')
if not self.is_loaded():
return False
return self.control.isRunning() if self.control else False
def unblank_screen(self):
"""
Unblanks the screen
"""
log.debug(u'unblank screen OpenOffice')
return self.control.resume()
def blank_screen(self):
"""
Blanks the screen
"""
log.debug(u'blank screen OpenOffice')
self.control.blankScreen(0)
def is_blank(self):
"""
Returns true if screen is blank
"""
log.debug(u'is blank OpenOffice')
if self.control and self.control.isRunning():
return self.control.isPaused()
else:
return False
def stop_presentation(self):
"""
Stop the presentation, remove from screen
"""
log.debug(u'stop presentation OpenOffice')
# deactivate should hide the screen according to docs, but doesn't
#self.control.deactivate()
self.presentation.end()
self.control = None
def start_presentation(self):
"""
Start the presentation from the beginning
"""
log.debug(u'start presentation OpenOffice')
if self.control is None or not self.control.isRunning():
self.presentation.start()
self.control = self.presentation.getController()
# start() returns before the Component is ready.
# Try for 15 seconds
i = 1
while not self.control and i < 150:
time.sleep(0.1)
i += 1
self.control = self.presentation.getController()
else:
self.control.activate()
self.goto_slide(1)
def get_slide_number(self):
"""
Return the current slide number on the screen, from 1
"""
return self.control.getCurrentSlideIndex() + 1
def get_slide_count(self):
"""
Return the total number of slides
"""
return self.document.getDrawPages().getCount()
def goto_slide(self, slideno):
"""
Go to a specific slide (from 1)
"""
self.control.gotoSlideIndex(slideno-1)
def next_step(self):
"""
Triggers the next effect of slide on the running presentation
"""
is_paused = self.control.isPaused()
self.control.gotoNextEffect()
time.sleep(0.1)
if not is_paused and self.control.isPaused():
self.control.gotoPreviousEffect()
def previous_step(self):
"""
Triggers the previous slide on the running presentation
"""
self.control.gotoPreviousSlide()
def get_slide_text(self, slide_no):
"""
Returns the text on the slide.
``slide_no``
The slide the text is required for, starting at 1
"""
return self.__get_text_from_page(slide_no)
def get_slide_notes(self, slide_no):
"""
Returns the text in the slide notes.
``slide_no``
The slide the notes are required for, starting at 1
"""
return self.__get_text_from_page(slide_no, True)
def __get_text_from_page(self, slide_no, notes=False):
"""
Return any text extracted from the presentation page.
``notes``
A boolean. If set the method searches the notes of the slide.
"""
text = ''
pages = self.document.getDrawPages()
page = pages.getByIndex(slide_no - 1)
if notes:
page = page.getNotesPage()
for idx in range(page.getCount()):
shape = page.getByIndex(idx)
if shape.supportsService("com.sun.star.drawing.Text"):
text += shape.getString() + '\n'
return text
|
marmyshev/transitions
|
openlp/plugins/presentations/lib/impresscontroller.py
|
Python
|
gpl-2.0
| 18,222
|
[
"Brian"
] |
b5e83a1300e43d023c86b189f4ba1d12a71aba940df079e39be7abe62b56265d
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.orca.data.pandas.preprocessing import read_csv
from zoo.orca.data.pandas.preprocessing import read_json
from zoo.orca.data.pandas.preprocessing import read_parquet
|
intel-analytics/analytics-zoo
|
pyzoo/zoo/orca/data/pandas/__init__.py
|
Python
|
apache-2.0
| 764
|
[
"ORCA"
] |
3828f185b59818105dd223099fb8042d7f28616369550744dfbbcbf8ff4159c8
|
import os
import logging
from shutil import copyfile
import time
from collections import OrderedDict
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from CIP.logic.SlicerUtil import SlicerUtil
from CIP.logic import Util
from CIP.logic import geometry_topology_data as gtd
#
# CIP_PointsLabelling
#
class CIP_PointsLabelling(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "Points labelling"
self.parent.categories = SlicerUtil.CIP_ModulesCategory
self.parent.dependencies = [SlicerUtil.CIP_ModuleName]
self.parent.contributors = ["Jorge Onieva (jonieva@bwh.harvard.edu)", "Applied Chest Imaging Laboratory", "Brigham and Women's Hospital"]
self.parent.helpText = """Training for a subtype of emphysema done quickly by an expert"""
self.parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText
self.parent.hidden = True # Hide the module. It just works as a parent for child modules
#
# CIP_PointsLabellingWidget
#
class CIP_PointsLabellingWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
from functools import partial
def __onNodeAddedObserver__(self, caller, eventId, callData):
"""Node added to the Slicer scene"""
if callData.GetClassName() == 'vtkMRMLScalarVolumeNode' \
and slicer.util.mainWindow().moduleSelector().selectedModule == self.moduleName:
self.__onNewVolumeLoaded__(callData)
# elif callData.GetClassName() == 'vtkMRMLLabelMapVolumeNode':
# self.__onNewLabelmapLoaded__(callData)
self.__onNodeAddedObserver__ = partial(__onNodeAddedObserver__, self)
self.__onNodeAddedObserver__.CallDataType = vtk.VTK_OBJECT
self.additionalFileTypes = OrderedDict()
self.customFileName = None
def _initLogic_(self):
"""Create a new logic object for the plugin"""
self.logic = CIP_PointsLabellingLogic()
def setup(self):
"""This is called one time when the module GUI is initialized
"""
ScriptedLoadableModuleWidget.setup(self)
# Create objects that can be used anywhere in the module. Example: in most cases there should be just one
# object of the logic class
self._initLogic_()
self.currentVolumeLoaded = None
self.blockNodeEvents = False
##########
# Volume selection
self.volumeSelectionCollapsibleButton = ctk.ctkCollapsibleButton()
self.volumeSelectionCollapsibleButton.text = "Volume selection"
self.layout.addWidget(self.volumeSelectionCollapsibleButton)
self.volumeSelectionLayout = qt.QFormLayout(self.volumeSelectionCollapsibleButton)
# Node selector
# volumeLabel = qt.QLabel("Active volume: ")
# volumeLabel.setStyleSheet("margin-left:5px")
# self.mainLayout.addWidget(volumeLabel, 0, 0)
self.volumeSelector = slicer.qMRMLNodeComboBox()
self.volumeSelector.nodeTypes = ("vtkMRMLScalarVolumeNode", "")
self.volumeSelector.selectNodeUponCreation = True
self.volumeSelector.autoFillBackground = True
self.volumeSelector.addEnabled = False
self.volumeSelector.noneEnabled = False
self.volumeSelector.removeEnabled = False
self.volumeSelector.showHidden = False
self.volumeSelector.showChildNodeTypes = False
self.volumeSelector.setMRMLScene(slicer.mrmlScene)
# self.volumeSelector.setFixedWidth(250)
# self.volumeSelector.setStyleSheet("margin: 15px 0")
# self.volumeSelector.selectNodeUponCreation = False
#self.volumeSelectionLayout.addWidget(self.volumeSelector, 0, 1, 1, 3)
self.volumeSelectionLayout.addRow("Active volume:", self.volumeSelector)
self.volumeSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.__onCurrentNodeChanged__)
##########
# Main area
self.mainAreaCollapsibleButton = ctk.ctkCollapsibleButton()
self.mainAreaCollapsibleButton.text = "Main area"
self.layout.addWidget(self.mainAreaCollapsibleButton, SlicerUtil.ALIGNMENT_VERTICAL_TOP)
self.mainLayout = qt.QGridLayout(self.mainAreaCollapsibleButton)
# Radio buttons frame. This will be filled by every child module
self.radioButtonsFrame = qt.QFrame()
self.mainLayout.addWidget(self.radioButtonsFrame, 2, 0, 1, 3, SlicerUtil.ALIGNMENT_VERTICAL_TOP)
# Load caselist button
self.loadButton = ctk.ctkPushButton()
self.loadButton.text = "Load fiducials file"
self.loadButton.setIcon(qt.QIcon("{0}/open_file.png".format(SlicerUtil.CIP_ICON_DIR)))
self.loadButton.setIconSize(qt.QSize(20, 20))
self.loadButton.setFixedWidth(135)
self.mainLayout.addWidget(self.loadButton, 3, 0)
self.loadButton.connect('clicked()', self.openFiducialsFile)
# Remove fiducial button
self.removeLastFiducialButton = ctk.ctkPushButton()
self.removeLastFiducialButton.text = "Remove last fiducial"
self.removeLastFiducialButton.toolTip = "Remove the last fiducial added"
self.removeLastFiducialButton.setIcon(qt.QIcon("{0}/delete.png".format(SlicerUtil.CIP_ICON_DIR)))
self.removeLastFiducialButton.setIconSize(qt.QSize(20, 20))
self.removeLastFiducialButton.setFixedWidth(200)
self.mainLayout.addWidget(self.removeLastFiducialButton, 3, 1)
self.removeLastFiducialButton.connect('clicked()', self.__onRemoveLastFiducialButtonClicked__)
# Save results button
self.saveResultsButton = ctk.ctkPushButton()
self.saveResultsButton.setText("Save markups")
self.saveResultsButton.toolTip = "Save the markups in the specified directory"
self.saveResultsButton.setIcon(qt.QIcon("{0}/Save.png".format(SlicerUtil.CIP_ICON_DIR)))
self.saveResultsButton.setIconSize(qt.QSize(20,20))
self.saveResultsButton.setFixedWidth(135)
self.mainLayout.addWidget(self.saveResultsButton, 4, 0)
self.saveResultsButton.connect('clicked()', self.__onSaveResultsButtonClicked__)
# Save results directory button
defaultPath = os.path.join(SlicerUtil.getSettingsDataFolder(self.moduleName), "results") # Assign a default path for the results
path = SlicerUtil.settingGetOrSetDefault(self.moduleName, "SaveResultsDirectory", defaultPath)
self.saveResultsDirectoryButton = ctk.ctkDirectoryButton()
self.saveResultsDirectoryButton.directory = path
self.saveResultsDirectoryButton.setMaximumWidth(375)
self.mainLayout.addWidget(self.saveResultsDirectoryButton, 4, 1, 1, 2)
self.saveResultsDirectoryButton.connect("directoryChanged (QString)", self.__onSaveResultsDirectoryChanged__)
#####
# Case navigator
self.caseNavigatorWidget = None
if SlicerUtil.isSlicerACILLoaded():
caseNavigatorAreaCollapsibleButton = ctk.ctkCollapsibleButton()
caseNavigatorAreaCollapsibleButton.text = "Case navigator"
self.layout.addWidget(caseNavigatorAreaCollapsibleButton, 0x0020)
# caseNavigatorLayout = qt.QVBoxLayout(caseNavigatorAreaCollapsibleButton)
# Add a case list navigator
from ACIL.ui import CaseNavigatorWidget
self.caseNavigatorWidget = CaseNavigatorWidget(self.moduleName, caseNavigatorAreaCollapsibleButton)
for key,value in self.additionalFileTypes.items():
self.caseNavigatorWidget.additionalFileTypes[key] = value
self.caseNavigatorWidget.setup()
# Listen for the event of loading a new labelmap
# self.caseNavigatorWidget.addObservable(self.caseNavigatorWidget.EVENT_LABELMAP_LOADED, self.__onNewILDClassificationLabelmapLoaded__)
self.caseNavigatorWidget.addObservable(self.caseNavigatorWidget.EVENT_BUNDLE_CASE_FINISHED, self._onFinishCaseBundleLoad_)
self.layout.addStretch()
# Extra Connections
self._createSceneObservers_()
def _createSceneObservers_(self):
"""
Create the observers for the scene in this module
"""
self.observers = []
self.observers.append(
slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAddedEvent, self.__onNodeAddedObserver__))
self.observers.append(slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.EndCloseEvent, self.__onSceneClosed__))
def saveResultsCurrentNode(self):
""" Get current active node and save the xml fiducials file
"""
try:
d = self.saveResultsDirectoryButton.directory
if not os.path.isdir(d):
# Ask the user if he wants to create the folder
if qt.QMessageBox.question(slicer.util.mainWindow(), "Create directory?",
"The directory '{0}' does not exist. Do you want to create it?".format(d),
qt.QMessageBox.Yes|qt.QMessageBox.No) == qt.QMessageBox.Yes:
try:
os.makedirs(d)
# Make sure that everybody has write permissions (sometimes there are problems because of umask)
os.chmod(d, 0o777)
except:
qt.QMessageBox.warning(slicer.util.mainWindow(), 'Directory incorrect',
'The folder "{0}" could not be created. Please select a valid directory'.format(d))
return
else:
# Abort process
SlicerUtil.logDevelop("Saving results process aborted", includePythonConsole=True)
return
# self.logic.saveCurrentFiducials(d, self.caseNavigatorWidget, self.uploadFileResult)
# qt.QMessageBox.information(slicer.util.mainWindow(), 'Results saved',
# "The results have been saved succesfully")
# else:
if SlicerUtil.isSlicerACILLoaded():
question = qt.QMessageBox.question(slicer.util.mainWindow(), "Save results remotely?",
"Your results will be saved locally. Do you also want to save your results in your remote server? (MAD, etc.)",
qt.QMessageBox.Yes | qt.QMessageBox.No | qt.QMessageBox.Cancel)
if question == qt.QMessageBox.Cancel:
return
saveInRemoteRepo = question == qt.QMessageBox.Yes
else:
saveInRemoteRepo = False
if not self.customFileName:
fileName = self.currentVolumeLoaded.GetName() + Util.file_conventions_extensions[self.logic._xmlFileExtensionKey_]
else:
fileName = self.customFileName
localFilePath = os.path.join(d, fileName)
self.logic.saveCurrentFiducials(localFilePath, caseNavigatorWidget=self.caseNavigatorWidget,
callbackFunction=self.uploadFileResult, saveInRemoteRepo=saveInRemoteRepo)
qt.QMessageBox.information(slicer.util.mainWindow(), 'Results saved',
"The results have been saved succesfully")
except:
Util.print_last_exception()
qt.QMessageBox.critical(slicer.util.mainWindow(), "Error when saving the results",
"Error when saving the results. Please review the console for additional info")
def uploadFileResult(self, result):
"""Callback method that will be invoked by the CaseNavigator after uploading a file remotely"""
if result != Util.OK:
qt.QMessageBox.warning(slicer.util.mainWindow(), "Error when uploading fiducials",
"There was an error when uploading the fiducials file. This doesn't mean that your file wasn't saved locally!\n" +
"Please review the console for more information")
def openFiducialsFile(self):
volumeNode = self.volumeSelector.currentNode()
if volumeNode is None:
qt.QMessageBox.warning(slicer.util.mainWindow(), 'Select a volume', 'Please load a volume first')
return
f = qt.QFileDialog.getOpenFileName()
if f:
self.logic.loadFiducialsXml(volumeNode, f)
self.saveResultsDirectoryButton.directory = os.path.dirname(f)
qt.QMessageBox.information(slicer.util.mainWindow(), "File loaded", "File loaded successfully")
## PROTECTED/PRIVATE METHODS
def _checkNewVolume_(self, newVolumeNode):
""" New volume loaded in the scene in some way.
If it's really a new volume, try to save and close the current one
@param newVolumeNode:
"""
if self.blockNodeEvents:
# "Semaphore" to avoid duplicated events
return
self.blockNodeEvents = True
volume = self.currentVolumeLoaded
if volume is not None and newVolumeNode is not None \
and newVolumeNode.GetID() != volume.GetID() \
and not self.logic.isVolumeSaved(volume.GetName()):
# Ask the user if he wants to save the previously loaded volume
if qt.QMessageBox.question(slicer.util.mainWindow(), "Save results?",
"The fiducials for the volume '{0}' have not been saved. Do you want to save them?"
.format(volume.GetName()),
qt.QMessageBox.Yes|qt.QMessageBox.No) == qt.QMessageBox.Yes:
self.saveResultsCurrentNode()
# Remove all the previously existing nodes
if self.currentVolumeLoaded is not None and newVolumeNode != self.currentVolumeLoaded:
# Remove previously existing node
self.logic.removeMarkupsAndNode(self.currentVolumeLoaded)
if newVolumeNode is not None:
SlicerUtil.setActiveVolumeIds(newVolumeNode.GetID())
SlicerUtil.setFiducialsCursorMode(True, True)
self.currentVolumeLoaded = newVolumeNode
self.updateState()
self.blockNodeEvents = False
def _getColorTable_(self):
""" Color table for this module for a better labelmap visualization.
This must be implemented by child classes"""
raise NotImplementedError("This method should be implemented by child classes")
## EVENTS
def enter(self):
"""This is invoked every time that we select this module as the active module in Slicer (not only the first time)"""
self.blockNodeEvents = False
# if len(self.observers) == 0:
# self.observers.append(slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAddedEvent, self.__onNodeAddedObserver__))
# self.observers.append(slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.EndCloseEvent, self.__onSceneClosed__))
SlicerUtil.setFiducialsCursorMode(True, True)
if self.volumeSelector.currentNodeId != "":
SlicerUtil.setActiveVolumeIds(self.volumeSelector.currentNodeId)
self.currentVolumeLoaded = slicer.mrmlScene.GetNodeByID(self.volumeSelector.currentNodeId)
self.updateState()
def exit(self):
"""This is invoked every time that we switch to another module (not only when Slicer is closed)."""
try:
self.blockNodeEvents = True
SlicerUtil.setFiducialsCursorMode(False)
except:
pass
def cleanup(self):
"""This is invoked as a destructor of the GUI when the module is no longer going to be used"""
try:
for observer in self.observers:
slicer.mrmlScene.RemoveObserver(observer)
self.observers.remove(observer)
except:
pass
def __onNewVolumeLoaded__(self, newVolumeNode):
""" Added a new node in the scene
:param newVolumeNode:
:return:
"""
# Filter the name of the volume to remove possible suffixes added by Slicer
filteredName = SlicerUtil.filterVolumeName(newVolumeNode.GetName())
newVolumeNode.SetName(filteredName)
self._checkNewVolume_(newVolumeNode)
self.blockNodeEvents = True
self.volumeSelector.setCurrentNode(newVolumeNode)
self.blockNodeEvents = False
def __onCurrentNodeChanged__(self, volumeNode):
self._checkNewVolume_(volumeNode)
def _onFinishCaseBundleLoad_(self, result, id, ids, additionalFilePaths):
"""
Event triggered after a volume and the additional files have been loaded for a case.
In this case, it is important to load a previously existing xml file
@param result:
@param id:
@param ids:
@param additionalFilePaths:
@return:
"""
if result == Util.OK and additionalFilePaths and os.path.exists(additionalFilePaths[0]):
# Try to load a previously existing fiducials file downloaded with the ACIL case navigator
self.logic.loadFiducialsXml(SlicerUtil.getNode(id), additionalFilePaths[0])
def __onRemoveLastFiducialButtonClicked__(self):
self.logic.removeLastFiducial()
def __onSaveResultsDirectoryChanged__(self, directory):
# f = qt.QFileDialog.getExistingDirectory()
# if f:
# self.saveResultsDirectoryText.setText(f)
SlicerUtil.setSetting(self.moduleName, "SaveResultsDirectory", directory)
def __onSaveResultsButtonClicked__(self):
self.saveResultsCurrentNode()
def __onSceneClosed__(self, arg1, arg2):
self.currentVolumeLoaded = None
self._initLogic_()
#
# CIP_PointsLabellingLogic
#
class CIP_PointsLabellingLogic(ScriptedLoadableModuleLogic):
def __init__(self):
ScriptedLoadableModuleLogic.__init__(self)
self._params_ = None
self.markupsLogic = slicer.modules.markups.logic()
self.currentVolumeId = None
self.currentTypesList = None
self.savedVolumes = {}
self.currentGeometryTopologyData = None
@property
def _xmlFileExtensionKey_(self):
"""Key of the dictionary of file conventions that will be used in this module"""
raise NotImplementedError("This method should be implemented by child classes")
@property
def params(self):
if self._params_ is None:
raise NotImplementedError("Object _params_ should be initialized in a child class")
return self._params_
def _createFiducialsListNode_(self, nodeName, typesList):
"""
Create a fiducials node based on the types list specified.
Depending on the child class, the number of types-subtypes will change, so every child class should
have its own implementation
:param nodeName:
:param typesList: list of types
:return: fiducials list node
"""
raise NotImplementedError("This method should be implemented by a child class")
def setActiveFiducialsListNode(self, volumeNode, typesList, createIfNotExists=True):
"""
Create a fiducials list node corresponding to this volume and this type list.
Depending on the child class, the number of types-subtypes will change, so every child class should
have its own implementation
:param volumeNode: Scalar volume node
:param typesList: list of types-subtypes. It can be a region-type-artifact or any other combination
:param createIfNotExists: create the fiducials node if it doesn't exist yet for this subtype list
:return: fiducials volume node
"""
raise NotImplementedError("This method should be implemented by a child class")
def getMarkupLabel(self, typesList):
"""
Get the text that will be displayed in the fiducial for the corresponding types-subtypes combination
:param typesList: list of types-subtypes. It can be a region-type-artifact or any other combination
:return: label string for this fiducial
"""
raise NotImplementedError("This method should be implemented by a child class")
def getTypesListFromXmlPoint(self, geometryTopologyDataPoint):
"""
Get a list of types that the module will use to operate from a Point object in a GeometryTopologyData object
:param geometryTopologyDataPoint: GeometryTopologyData.Point object
:return: list of types
"""
raise NotImplementedError("This method should be implemented by a child class")
def loadFiducialsXml(self, volumeNode, fileName):
""" Load from disk a list of fiducials for a particular volume node
:param volumeNode: Volume (scalar node)
:param fileName: full path of the file to load the fiducials where
"""
with open(fileName, "r") as f:
xml = f.read()
self.currentGeometryTopologyData = gtd.GeometryTopologyData.from_xml(xml)
for point in self.currentGeometryTopologyData.points:
# Activate the current fiducials list based on the type list
typesList = self.getTypesListFromXmlPoint(point)
fidListNode = self.setActiveFiducialsListNode(volumeNode, typesList)
# Check if the coordinate system is RAS (and make the corresponding transform otherwise)
if self.currentGeometryTopologyData.coordinate_system == self.currentGeometryTopologyData.LPS:
coord = Util.lps_to_ras(point.coordinate)
elif self.currentGeometryTopologyData.coordinate_system == self.currentGeometryTopologyData.IJK:
coord = Util.ijk_to_ras(volumeNode, point.coordinate)
else:
# Try default mode (RAS)
coord = point.coordinate
# Add the fiducial
fidListNode.AddFiducial(coord[0], coord[1], coord[2], self.getMarkupLabel(typesList))
def getPointMetadataFromFiducialDescription(self, description):
"""
Get the main metadata for a GeometryTopologyObject Point object (region, type, feature, description) from a
fiducial description
:param description: fiducial description
:return: (region, type, feature, description) tuple for a point initalization
"""
raise NotImplementedError("This method should be implemented by a child class")
def saveCurrentFiducials(self, localFilePath, caseNavigatorWidget=None, callbackFunction=None, saveInRemoteRepo=False):
""" Save all the fiducials for the current volume.
The name of the file will be VolumeName_parenchymaTraining.xml"
:param filePath: destination file (local)
:param caseNavigatorWidget: case navigator widget (optional)
:param callbackFunction: function to invoke when the file has been uploaded to the server (optional)
"""
volume = slicer.mrmlScene.GetNodeByID(self.currentVolumeId)
#fileName = volume.GetName() + Util.file_conventions_extensions[self._xmlFileExtensionKey_]
# If there is already a xml file in the results directory, make a copy.
#localFilePath = os.path.join(directory, fileName)
if os.path.isfile(localFilePath):
# Make a copy of the file for history purposes
copyfile(localFilePath, localFilePath + "." + time.strftime("%Y%m%d.%H%M%S"))
# Iterate over all the fiducials list nodes
pos = [0,0,0]
geometryTopologyData = gtd.GeometryTopologyData()
geometryTopologyData.coordinate_system = geometryTopologyData.LPS
# Get the transformation matrix LPS-->IJK
matrix = Util.get_lps_to_ijk_transformation_matrix(volume)
geometryTopologyData.lps_to_ijk_transformation_matrix = Util.convert_vtk_matrix_to_list(matrix)
# Save spacing and origin of the volume
geometryTopologyData.origin = volume.GetOrigin()
geometryTopologyData.spacing = volume.GetSpacing()
geometryTopologyData.dimensions = volume.GetImageData().GetDimensions()
# Get the hashtable and seed from previously loaded GeometryTopologyData object (if available)
if self.currentGeometryTopologyData is None:
hashTable = {}
else:
hashTable = self.currentGeometryTopologyData.get_hashtable()
geometryTopologyData.seed_id = self.currentGeometryTopologyData.seed_id
# Get a timestamp that will be used for all the points
timestamp = gtd.GeometryTopologyData.get_timestamp()
for fidListNode in slicer.util.getNodes("{0}_fiducials_*".format(volume.GetName())).values():
# Get all the markups
for i in range(fidListNode.GetNumberOfMarkups()):
fidListNode.GetNthFiducialPosition(i, pos)
# Get the type from the description (region will always be 0)
desc = fidListNode.GetNthMarkupDescription(i)
# Switch coordinates from RAS to LPS
lps_coords = Util.ras_to_lps(list(pos))
pointMetadata = self.getPointMetadataFromFiducialDescription(desc)
p = gtd.Point(pointMetadata[0], pointMetadata[1], pointMetadata[2], lps_coords, description=pointMetadata[3])
key = p.get_hash()
if key in hashTable:
# Add previously existing point
geometryTopologyData.add_point(hashTable[key], fill_auto_fields=False)
else:
# Add a new point with a precalculated timestamp
geometryTopologyData.add_point(p, fill_auto_fields=True)
p.timestamp = timestamp
# Get the xml content file
xml = geometryTopologyData.to_xml()
# Save the file
with open(localFilePath, 'w') as f:
f.write(xml)
# Use the new object as the current GeometryTopologyData
self.currentGeometryTopologyData = geometryTopologyData
# Upload to MAD if we are using the ACIL case navigator
if saveInRemoteRepo:
caseNavigatorWidget.uploadFile(localFilePath, callbackFunction=callbackFunction)
# Mark the current volume as saved
self.savedVolumes[volume.GetName()] = True
def removeLastFiducial(self):
""" Remove the last markup that was added to the scene. It will remove all the markups if the user wants
"""
fiducialsList = slicer.mrmlScene.GetNodeByID(self.markupsLogic.GetActiveListID())
if fiducialsList is not None:
# Remove the last fiducial
fiducialsList.RemoveMarkup(fiducialsList.GetNumberOfMarkups()-1)
self.savedVolumes[self.currentVolumeId] = False
def isVolumeSaved(self, volumeName):
""" True if there are no markups unsaved for this volume
:param volumeName:
:return:
"""
if volumeName not in self.savedVolumes:
raise Exception("Volume {0} is not in the list of managed volumes".format(volumeName))
return self.savedVolumes[volumeName]
def removeMarkupsAndNode(self, volume):
nodes = slicer.util.getNodes(volume.GetName() + "_*")
for node in nodes.values():
slicer.mrmlScene.RemoveNode(node)
slicer.mrmlScene.RemoveNode(volume)
self.currentGeometryTopologyData = None
class CIP_PointsLabellingTest(ScriptedLoadableModuleTest):
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_CIP_PointsLabelling()
def test_CIP_PointsLabelling(self):
self.fail("Test not implemented!")
|
acil-bwh/SlicerCIP
|
Scripted/CIP_PointsLabelling/CIP_PointsLabelling.py
|
Python
|
bsd-3-clause
| 28,331
|
[
"VTK"
] |
34de21726260dcdfa7384d861ca9079d0a38ab1fcfc391fd3be2e2d152e06e8c
|
#!/usr/bin/env python
"""
Kukkaisvoima a lightweight weblog system.
Copyright (C) 2006-2012 Petteri Klemola
Kukkaisvoima is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License version 3
as published by the Free Software Foundation.
Kukkaisvoima is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with Kukkaisvoima. If not, see
<http://www.gnu.org/licenses/>.
"""
import cgi
import pickle
import os
import sys
from urllib import quote_plus, unquote_plus
from time import localtime, strptime, strftime, time
from sets import Set
from datetime import datetime, timedelta
import cgitb; cgitb.enable()
import smtplib
from email.MIMEText import MIMEText
import re
import locale
import random
# kludge to get md5 hexdigest working on all python versions. Function
# md5fun should be used only to get ascii like this
# md5fun("kukkaisvoima").hexdigest()
try:
from hashlib import md5 as md5fun
except ImportError: # older python (older than 2.5) does not hashlib
import md5
md5fun = md5.new
####################################
from jinja2 import Environment, FileSystemLoader
sys.path.append("../")
import ghShared
import dbSession
import dbShared
import ghLists
import Cookie
linkappend = ''
logged_state = 0
currentUser = ''
emailAddr = ''
loginResult = ''
uiTheme = 'crafter'
####################################
# Config variables
# Url of the blog (without trailing /)
baseurl = 'http://yourdomain/blog/index.cgi'
# Use absolute url for this, like http://yourdomain/blog/kukka.css
stylesheet = 'kukka.css'
# Use absolute url for this, like http://yourdomain/blog/feed-icon-14x14.png
feedicon = 'feed-icon-14x14.png'
blogname = 'Kukkaisvoima'
slogan = 'Default installation'
description = "Jee"
encoding = 'iso-8859-15'
defaultauthor = 'You'
favicon = 'http://yourdomain/favicon.ico'
doctype = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
# Email to send comments to
blogemail = 'you@yourdomain'
# Language for the feed
language = 'en'
# Number of entries per page
numberofentriesperpage = 10
# Directory which contains the blog entries
datadir = '.'
# Directory which contains the index and comments. Must be script
# writable directory
indexdir = 'temp'
# Maximum comments per entry. Use -1 for no comments and 0 for no
# restriction
maxcomments = 30
# answer to spamquestion (question variable is l_nospam_question)
nospamanswer = '5'
# This is admin password to manage comments. password should be
# something other than 'password'
passwd = 'password'
# New in version 10
sidebarcomments = True
# Gravatar support (picture in comments according to email), see
# http://gravatar.com for more information
gravatarsupport = True
# Entry and comment Date format
dateformat = "%F"
# Show only first paragraph when showing many entries
entrysummary = False
# Language variables
l_archives = 'Archives'
l_categories = 'Categories'
l_comments = 'Comments'
l_comments2 = 'Comments'
l_date = 'Date'
l_nextpage = 'Next page'
l_previouspage = 'Previous page'
l_leave_reply = 'Leave reply'
l_no_comments_allowed = 'No comments allowed'
l_no_comments = 'No comments'
l_name_needed = 'Name (needed)'
l_email_needed = 'Email (needed)'
l_webpage = 'Webpage'
l_no_html = 'No html allowed in reply'
l_nospam_question = 'What\'s 2 + 3?'
l_delete_comment = 'Delete comment'
l_passwd = 'Admin password'
l_admin = 'Admin'
l_admin_comments = 'Manage comments'
l_do_you_delete = 'Your about to delete comment this, are you sure you want to that?'
# new in version 8
l_search = "Search"
l_search2 = "No matches"
# new in version 10
l_recent_comments = "Recent comments"
l_notify_comments= "Notify me of follow-up comments via email."
# new in version 11
l_read_more = "<p>Continue reading...</p>"
# new in version 12
l_toggle = "Click year to show months"
# import user settings
from kukkaisvoima_settings import *
# version
version = '14'
# for date collisions
dates = {}
datenow = datetime.now()
datenow_date = datenow.date()
def timeAgo(date):
day_str = ""
edate = date.date()
if edate < datenow_date:
days = (datenow_date - edate).days
if days > 0:
day_str += ", "
years = days/365
if years > 0:
days = days%365
if years == 1:
day_str += "1 year"
else:
day_str += "%d years" % years
if days > 0:
day_str += " and "
if days == 1:
day_str += "1 day"
elif days > 1:
day_str += "%d days" % days
day_str += " ago"
return day_str
def dateToString(date):
return "%s%s" % (strftime(dateformat, date.timetuple()), timeAgo(date))
def generateDate(fileName):
name, date, categories = fileName[:-4].split(':')
mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime = os.stat(fileName)
filedate= datetime(*localtime(mtime)[0:6])
date = "%s %s:%s:%s" % (date,
filedate.hour,
filedate.minute,
filedate.second)
try:
date = datetime(*strptime(
date,'%Y-%m-%d %H:%M:%S')[0:6])
except:
date = filedate
# if date collision happens add seconds to date
if dates.has_key(date) and not dates[date] == fileName:
while dates.has_key(date):
date += timedelta(seconds=1)
dates[date] = fileName
return date
def sendEmail(to, subject, message):
msg = MIMEText(_text=wrapEmail(message), _charset='%s' % encoding)
msg['subject'] = subject
sender = 'Kukkaisvoima blog (%s) <%s>' % (baseurl, blogemail)
s = smtplib.SMTP()
s.connect()
s.sendmail(sender, to, msg.as_string())
s.close()
def wrapEmail(text):
"""Wrap some lines. Long words with no spaces are preserved."""
lines = text.splitlines()
newlines = list()
for line in lines:
if line == '':
newlines.append('')
while len(line) > 0:
if len(line) < 73:
newlines.append(line)
line = ''
else:
nline = line.rfind(' ',0,72)
if nline == -1:
newlines.append(line)
line = ''
else:
nline = nline+1
newlines.append(line[:nline])
line = line[nline:]
return '\n'.join(newlines)
def removeHtmlTags(line):
"""Removes html tags from line, works also for partial tags, so
all < > will be removed.
"""
while line.find("<") > -1 or line.find(">") > -1:
# there are still tags
start = line.find("<")
end = line.find(">")
if start > -1:
# found start tag. Search for end
tmpline = line[start+1:]
end = tmpline.find(">")
if end > -1:
# found end, remove in between
line = line[:start] + line[start+end+2:]
else:
# no end found remove until end of line
line = line[:end]
elif end > -1:
# found > without < tag is open. remove start of the line
line = line[end+1:]
return line
def search(pattern, lines):
matchlines = list()
linenumber = 0
for line in lines:
m = pattern.search(line)
# we don't want to process every line, so remove html
# from only those lines that match our search
if m:
line = removeHtmlTags(line)
# match again since the line has changed
line = line.strip()
m = pattern.search(line)
if not m:
continue
# even the line out with ..starting match ending...
linelength = 74
startline = line[:m.start()]
middleline = line[m.start():m.end()]
endline = line[m.end():].rstrip()
tokenlength = (linelength - len(middleline))/2
if len(startline) >= tokenlength and len(endline) >= tokenlength:
startline = startline[-tokenlength:]
endline = endline[:tokenlength]
elif len(startline) < tokenlength and len(endline) < tokenlength:
pass
elif len(startline) < tokenlength:
endline = endline[:tokenlength + (tokenlength - len(startline))]
elif len(endline) < tokenlength:
actual_le = tokenlength + (tokenlength - len(endline))
startline = startline[-actual_le:]
startline = "..." + startline.lstrip()
endline = endline.rstrip() + "..."
matchlines.append("%04d: %s<div id=\"hit\">%s</div>%s\n" %(
linenumber,
startline,
middleline,
endline))
linenumber += 1
return matchlines
class Comment:
urlre = re.compile('(http|https|ftp)://([A-Za-z0-9/:@_%~#=&\.\-\?\+]+)')
def __init__(self, author, email, url, comment, subscribe):
self.author = author
self.email = email
self.url = url
self.comment = comment
self.date = datenow
self.subscribe = subscribe
random.seed()
self.id = "%016x" % random.getrandbits(128)
def getUrl(self):
url = self.url
if not url:
return None
if not url.startswith('http://'):
url = 'http://%s' % url
return url
def getAuthorLink(self):
url = self.getUrl()
if url is None:
return "%s" % self.author
else:
return "<a href=\"%s\" rel=\"external nofollow\">%s</a>"\
% (url, self.author)
def getText(self):
comment = str(self.comment)
comment = self.comment.replace('\r\n','<br />')
comment = self.urlre.sub(r'<a href="\1://\2">\1://\2</a>',
comment)
return comment
def getFirstLine(self):
return self.getText().split("<br />")[0]
def getEmailMd5Sum(self):
return md5fun(self.email.lower()).hexdigest()
def getSubEmail(self):
try:
if self.subscribe is None:
return None
else:
return self.email
except:
return None
def getId(self):
try:
return self.id
except:
return None
def pickleComment(author, email, url, comment, filename, indexdir, subscribe):
filename = filename.replace('/', '').replace('\\', '')
filename = "%s.txt" % filename
# read the old comments
comments = list()
try:
oldcommentsfile = open(os.path.join(indexdir,'comment-%s' % filename), 'rb')
comments = pickle.load(oldcommentsfile)
oldcommentsfile.close()
except:
pass
comm = Comment(author, email, url, comment, subscribe)
comments.append(comm)
commentfile = open(os.path.join(indexdir,'comment-%s' % filename), 'wb')
pickle.dump(comments, commentfile)
commentfile.close()
updateCommentList()
return comm
def getComments(filename):
comments = list()
comm_file = os.path.join(indexdir,'comment-%s' % filename)
if os.path.exists(comm_file) is False:
return comments
try:
oldcommentsfile = open(comm_file, 'rb')
comments = pickle.load(oldcommentsfile)
oldcommentsfile.close()
except:
pass
return comments
def deleteComment(filename, commentnum):
comments = getComments(filename)
comments.pop(commentnum-1)
commentfile = open(os.path.join(indexdir,'comment-%s' % filename), 'wb')
pickle.dump(comments, commentfile)
commentfile.close()
updateCommentList()
return
def unsubscribeComments(filename, unsubscribe_id):
comments = getComments(filename)
for comment in comments:
if unsubscribe_id == comment.getId():
comment.subscribe = None
commentfile = open(os.path.join(indexdir,'comment-%s' % filename), 'wb')
pickle.dump(comments, commentfile)
commentfile.close()
def getCommentList():
"""Gets list of comments from the comment index"""
commentlist = list()
# generate list of comments if it does not exist
if os.path.exists(os.path.join(indexdir,'recent_comments.index')) is False:
updateCommentList()
try:
comindex = open(os.path.join(indexdir,'recent_comments.index'), 'rb')
commentlist = pickle.load(comindex)
comindex.close()
except:
pass
return commentlist
def updateCommentList():
"""Updates latest comments list"""
commentlist = list()
commentlist_tmp = list()
for cfile in [x for x in os.listdir(indexdir) if x.startswith("comment-")]:
cfile = cfile.replace("comment-", "", 1)
num = 1
comments = list()
for cm in getComments(cfile):
comments.append((cfile, cm, num))
num += 1
commentlist_tmp += comments
# sort and leave 10 latests
commentlist_tmp.sort(key=lambda com: com[1].date, reverse=True)
commentlist_tmp = commentlist_tmp[:10]
for c in commentlist_tmp:
# get subject from commented entry
entry = Entry(c[0], datadir)
commentlist.append({"authorlink" : c[1].getAuthorLink(),
"file" : c[0],
"num" : c[2],
"author" : c[1].author,
"subject" : entry.headline})
commentfile = open(os.path.join(indexdir,'recent_comments.index'), 'wb')
pickle.dump(commentlist, commentfile)
commentfile.close()
def getSubscribedEmails(comments):
"""Get list of email subscriptions from comment list. No
duplicate email"""
emails = dict()
for com in comments:
email = com.getSubEmail()
if email:
emails[email] = com.getId()
return emails
def handleIncomingComment(fs):
"""Handles incoming comment and returns redirect location when
successful and None in case of an error"""
author = fs.getvalue('author')
email = fs.getvalue('email')
url = fs.getvalue('url')
comment = fs.getvalue('comment')
name = fs.getvalue('name')
commentnum = fs.getvalue('commentnum')
headline = fs.getvalue('headline')
nospam = fs.getvalue('nospam')
subscribe = fs.getvalue('subscribe')
filename = "%s.txt" % name
comments_for_entry = getComments(filename)
# validate comment
if not author:
return None
if not email:
return None
if not comment:
return None
if not name:
return None
if maxcomments == -1: # no comments allowed
return None
if len(comments_for_entry) >= maxcomments: # no more comments allowed
return None
if nospam != nospamanswer: # wrong answer
return None
# remove html tags
comment = comment.replace('<','[')
comment = comment.replace('>',']')
# only one subscription per email address
if subscribe and \
email in getSubscribedEmails(comments_for_entry).keys():
subscribe = None
new_comment = \
pickleComment(author, email, url, comment, name, indexdir, subscribe)
comm_url = new_comment.getUrl()
if comm_url:
comm_url = "\nWebsite: %s" % comm_url
else:
comm_url = ""
email_subject = 'New comment in %s' % headline
email_body = 'Name: %s%s\n\n%s\n\nlink:\n%s/%s#comment-%s' \
% (author, comm_url, comment, baseurl, name, commentnum)
# notify blog owner and comment subscribers about the
# new comment. Email sending may fail for some reason,
# so try sending one email at time.
try:
email_body_admin = email_body
if subscribe:
email_body_admin += \
"\n\nNote: commenter subscribed (%s) to follow-up comments" \
% email
sendEmail(blogemail, email_subject, email_body_admin)
except:
pass # TODO log errors, for now just fail silently
# add disclaimer for subscribers
email_body += \
"\n\n******\nYou are receiving this because you have signed up for email notifications. "
email_and_id = getSubscribedEmails(comments_for_entry)
for subscribe_email in email_and_id.iterkeys():
comm_id = email_and_id[subscribe_email]
try:
email_body_comment = email_body
email_body_comment += \
"Click here to unsubscribe instantly: %s/%s?unsubscribe=%s\n" \
% (baseurl, name, comm_id)
sendEmail(subscribe_email, email_subject, email_body_comment)
except:
pass # TODO log errors, for now just fail silently
# redirect
return 'Location: %s/%s#comment-%s\n' % (baseurl, name, commentnum)
class Entry:
firstpre = re.compile("<p.*?(</p>|<p>)", re.DOTALL | re.IGNORECASE)
whitespacere = re.compile("\s")
def __init__(self, fileName, datadir):
self.fileName = fileName
self.fullPath = os.path.join(datadir, fileName)
self.text = open(self.fullPath).readlines()
self.text = [line for line in self.text if not line.startswith('#')]
self.headline = self.text[0]
self.text = self.text[1:]
self.author = defaultauthor
self.cat = ''
name, date, categories = fileName[:-4].split(':')
self.cat = categories.split(',')
self.date = generateDate(self.fullPath)
self.comments = getComments(self.fileName)
self.url = "%s/%s" % (baseurl,
quote_plus(self.fileName[:-4]))
def getFirstParagraph(self):
# look for <p>
text_as_str = "".join(self.text)
m = self.firstpre.search(text_as_str)
if m is not None:
there_is_more = False
there_is_more_str = "<a href=\"%s\">%s</a>" % \
(self.url, l_read_more)
first_paragraph = m.group().split("\n")
text_as_str = re.sub(self.whitespacere, '', text_as_str)
first_paragraph_as_string = re.sub(self.whitespacere, '', m.group())
lastline = first_paragraph.pop()
lastline.strip()
if len(text_as_str) > len(first_paragraph_as_string):
there_is_more = True
if there_is_more and lastline.lower().endswith("</p>"):
lastline = lastline[:-4] # removes the </p>
there_is_more_str = there_is_more_str + "</p>"
if there_is_more: # and tip that there is still more to this entry
lastline = lastline + there_is_more_str
first_paragraph.append(lastline)
return first_paragraph
else:
return self.text
def getText(self, summary=False):
if summary == True:
return self.getFirstParagraph()
else:
return self.text
class Entries:
def __init__(self, indexdir):
self.date = {}
self.categories = {}
self.indexdir = indexdir
def add(self, entry):
self.date[entry.date] = entry
for cat in entry.cat:
if self.categories.has_key(cat):
self.categories[cat][entry.date] = entry
else:
self.categories[cat] = {}
self.categories[cat][entry.date] = entry
def getOne(self, name):
x = list()
x.append(Entry(name, datadir))
return x
def getMany(self, pagenumber=0, cat=None, numberofentriesperpage=10):
indexfile = 'main.index'
if cat is not None:
indexfile = '%s.index' % cat
indexindexfile = open(os.path.join(self.indexdir, indexfile), 'rb')
indexindex = pickle.load(indexindexfile)
indexindexfile.close()
# load the files
ents = list()
swd = indexindex.keys()
swd.sort()
swd.reverse()
if pagenumber == -1: # no limit
pass
elif pagenumber > 0:
sindex = numberofentriesperpage*pagenumber
eindex = (numberofentriesperpage*pagenumber)+numberofentriesperpage
swd = swd[sindex:eindex]
else:
swd = swd[:numberofentriesperpage]
for key in swd:
ents.append(Entry(indexindex[key], datadir))
return ents
def getTotal(self, cat=None):
indexfile = 'main.index'
if cat is not None:
indexfile = '%s.index' % cat
indexindexfile = open(os.path.join(self.indexdir, indexfile), 'rb')
indexindex = pickle.load(indexindexfile)
indexindexfile.close()
swd = indexindex.keys()
return len(swd)
def renderHtmlFooter():
print "<div id=\"footer\">Powered by <a href=\"http://23.fi/kukkaisvoima\">Kukkaisvoima</a> version %s</div>" % version
print "</div>" # content1
################################
print "</div>"
print '<div class="footer">'
print '<a href="/" title="Galaxy Harvester home page">Home</a> | <a href="{{ BASE_SCRIPT_URL }}help.py" title="Get more information about the Galaxy Harvester project and contributors">About</a> | <a href="http://www.ioscode.net:8000/cgi-bin/code" title="Open Source Code for Galaxy Harvester Application">Source</a> | <a href="#" onclick="contactMessage();">Contact</a> | <a href="/help.py" title="Get help using this site">Help</a> | <a href="/feedback.py" title="Submit and rate on suggestions for the site.">Suggestions</a>'
print '<p style="padding: 4px;font-weight:normal;">Galaxy Harvester is a tool for tracking resources spawning in galaxies of the MMO game Star Wars Galaxies Emulator. Serving the SWGEmu Community Since 2010.</p>'
print '</div>'
################################
print "</body>"
print "</html>"
def renderHtmlHeader(title=None, links=[]):
#################################################3
form = cgi.FieldStorage()
# Get Cookies
useCookies = 1
emailAddr = ''
linkappend = ''
logged_state = 0
currentUser = ''
loginResult = ''
uiTheme = ''
cookies = Cookie.SimpleCookie()
try:
cookies.load(os.environ['HTTP_COOKIE'])
except KeyError:
useCookies = 0
if useCookies:
try:
currentUser = cookies['userID'].value
except KeyError:
currentUser = ''
try:
loginResult = cookies['loginAttempt'].value
except KeyError:
loginResult = 'success'
try:
sid = cookies['gh_sid'].value
except KeyError:
sid = form.getfirst('gh_sid', '')
try:
uiTheme = cookies['uiTheme'].value
except KeyError:
uiTheme = ''
else:
currentUser = ''
loginResult = form.getfirst('loginAttempt', '')
sid = form.getfirst('gh_sid', '')
# Get a session
# escape input to prevent sql injection
sid = dbShared.dbInsertSafe(sid)
if loginResult == None:
loginResult = 'success'
sess = dbSession.getSession(sid, 2592000)
if (sess != ''):
logged_state = 1
currentUser = sess
if (uiTheme == ''):
uiTheme = dbShared.getUserAttr(currentUser, 'themeName')
if (useCookies == 0):
linkappend = 'gh_sid=' + sid
conn = dbShared.ghConn()
cursor = conn.cursor()
cursor.execute('SELECT userID, emailAddress FROM tUsers WHERE userID="' + currentUser + '"')
row = cursor.fetchone()
if (row != None):
emailAddr = row[1]
cursor.close()
conn.close()
else:
if (uiTheme == ''):
uiTheme = 'crafter'
pictureName = dbShared.getUserAttr(currentUser, 'pictureName')
env = Environment(loader=FileSystemLoader('templates'))
env.globals['BASE_SCRIPT_URL'] = ghShared.BASE_SCRIPT_URL
env.globals['MOBILE_PLATFORM'] = ghShared.getMobilePlatform(os.environ['HTTP_USER_AGENT'])
template = env.get_template('blog.html')
#################################################
print "Content-Type: text/html; charset=%s\n" % encoding
print doctype
print "<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"%(lang)s\" lang=\"%(lang)s\">" % {'lang':language}
print "<head>"
if title:
print "<title>%s | %s - %s</title>" % (title, blogname, slogan)
else:
print "<title>%s - %s </title>" % (blogname, slogan)
print "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=%s\" />" % encoding
print "<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\" />" % stylesheet
print "<link rel=\"shortcut icon\" href=\"%s\"/>" % favicon
print "<link rel=\"alternate\" type=\"application/rss+xml\" title=\"%s RSS Feed\" href=\"%s/feed/\" />" % (blogname, baseurl)
# print additional links
for i in links:
print i
# Javascript. Used to validate comment form, nice eh :P
print """
<script type="text/javascript">
/* <![CDATA[ */
function toggle_years(id)
{
var elem = document.getElementById(id);
if (elem.style.display != 'none')
{
elem.style.display = 'none';
}
else
{
elem.style.display = '';
}
}
function toggle_categories(classname)
{
var elems = document.getElementsByClassName(classname);
for (var i = 0; i < elems.length; i++)
{
var elem = elems[i];
if (elem.style.display != 'none')
{
elem.style.display = 'none';
}
else
{
elem.style.display = '';
}
}
}
function validate_not_null(field, msg)
{
if (field.value == null || field.value == "")
{
alert(msg);
return false;
}
return true;
}
function validate_email(field, msg)
{
at_index = field.value.indexOf("@");
if ((at_index > 1) && (field.value.lastIndexOf(".") > at_index))
{
return true;
}
alert(msg);
return false;
}
function validate_nospam(field, msg)
{
if (field.value == "%s")
{
return true;
}
alert(msg);
return false;
}
function validate_form(thisform)
{
with (thisform)
{
if (validate_not_null(author, "Name must be filled in") == false)
{
author.focus();
return false;
}
if (validate_email(email,"Email must be filled in and must be valid!") == false)
{
email.focus();
return false;
}
if (validate_nospam(nospam, "Wrong answer!") == false)
{
nospam.focus();
return false;
}
if (validate_not_null(comment, "Comment cannot be empty") == false)
{
comment.focus();
return false;
}
}
}
/* ]]> */
</script>
""" % (nospamanswer)
print "</head>"
print "<body>"
##################################
print template.render(uiTheme=uiTheme, loggedin=logged_state, currentUser=currentUser, loginResult=loginResult, linkappend=linkappend, url='/blog.py', pictureName=pictureName, imgNum=ghShared.imgNum, galaxyList=ghLists.getGalaxyList())
print "<div id=\"blogBox\" class=\"ghWidgetBox\">"
#############################################
print "<div id=\"content1\">"
print "<div id=\"header\">"
print "<h1><a href=\"%s\">%s</a></h1>" % (baseurl, blogname)
print "<div id=\"slogan\">%s</div>" % slogan
print "</div>" #header
def renderComment(entry, comment, numofcomment,
admin=False, pretext=False):
print "<li>"
if gravatarsupport:
print "<img style=\"padding-right:5px;\""
print "src=\"http://gravatar.com/avatar/%s?s=40&d=identicon\" align=\"left\"/>" % (
comment.getEmailMd5Sum())
print "<cite>%s</cite>:" % comment.getAuthorLink()
print "<br />"
delcom = ""
if admin:
delcom = "<a href=\"%s/%s/?delcomment=%s\">(%s)</a>" % \
(baseurl,
quote_plus(entry.fileName[:-4]),
numofcomment,
l_delete_comment)
print "<small><a name =\"comment-%s\" href=\"%s#comment-%s\">%s</a> %s </small>" % \
(numofcomment,
entry.url,
numofcomment,
dateToString(comment.date),
delcom)
if pretext:
print pretext
else:
print "<p>%s</p>" % comment.getText()
print "</li>"
def renderEntryLinks(entries, text=None, comment_tuple_list=None):
# renders also some comments for search results
for entry in entries:
link = "<li><a href=\"%s\">%s</a>" % (
entry.url, entry.headline)
index = 1
for cat in entry.cat:
link += "%s" % cat
if index != len(entry.cat):
link += ", "
index += 1
link += " (%s)" % entry.date
if text:
link += "<br /><pre>%s</pre>" % text
print link
if comment_tuple_list:
print "<ol style=\"list-style-type:none;\">"
numofcomment = 0
for comment, ctext, author in comment_tuple_list:
numofcomment = numofcomment +1
if len(ctext) == 0 and len(author) == 0:
continue
if len(ctext) == 0 and len(author) > 0:
comm_text = comment.getFirstLine()
comm_text = removeHtmlTags(comm_text)
three_dots = ""
if len(comm_text) > (60):
three_dots = "..."
ctext = "<p>%s%s</p>" % (comm_text[:60], three_dots)
elif len(ctext) > 0:
ctext = "<pre>%s</pre>" % ctext
renderComment(entry, comment, numofcomment, False, ctext)
print "</ol>"
print "</li>"
def renderCategories(catelist, ent, path):
renderHtmlHeader("archive")
print "<div id=\"content3\">"
if len(path) == 1 and path[0] == "categories":
sortedcat = catelist.keys()
try:
sortedcat.sort(key=locale.strxfrm)
except: # python < 2.4 fails
sortedcat.sort()
print "<h2>%s</h2>" % l_categories
print "<ul>"
for cat in sortedcat:
print "<li><a href=\"%s/%s\">%s</a> (%s)</li>" % (
baseurl, quote_plus(cat), cat, len(catelist[cat]))
print "<ul>"
renderEntryLinks(ent.getMany(-1, cat))
print "</ul>"
print "</ul>"
elif len(path) == 2 and path[1] in catelist.keys():
print "<h2>%s</h2>" % path[1]
renderEntryLinks(ent.getMany(-1, path[1]))
print "</div>" # content3
renderHtmlFooter()
return
def renderArchive(ent):
entries = ent.getMany(-1)
renderHtmlHeader(l_archives)
print "<div id=\"content3\">"
print "<h2>%s (%d)</h2>" % (l_archives, len(entries))
print "<ul>"
renderEntryLinks(entries)
print "</ul>"
print "</div>" # content3
renderHtmlFooter()
return
def renderSearch(entries, searchstring):
renderHtmlHeader(l_search)
print "<div id=\"content3\">"
# Remove some special character so that one don't exhaust the web
# host with stupid .*? searches
for i in ".^$*+?{[]\|()":
searchstring = searchstring.replace(i,"")
pattern = re.compile(searchstring, re.IGNORECASE)
matchedfiles = {}
for entry in entries:
# first search in the entry
matchedfiles[entry] = dict()
matchedfiles[entry]["lines"] = search(pattern, entry.getText())
matchedfiles[entry]["headline"] = search(pattern, [entry.headline])
# then from the entry's comments
matchedfiles[entry]["comments"] = dict()
comments_matches = False
for comment in entry.comments:
mlines = search(pattern, comment.comment.splitlines())
author = search(pattern, comment.author.splitlines())
if len(mlines) > 0 or len(author) > 0:
comments_matches = True
matchedfiles[entry]["comments"][comment] = dict()
matchedfiles[entry]["comments"][comment]["lines"] = mlines
matchedfiles[entry]["comments"][comment]["author"] = author
if len(matchedfiles[entry]["lines"]) == 0 and \
len(matchedfiles[entry]["headline"]) == 0 and \
comments_matches == False:
# remove entries with no matches in text or in comments
del(matchedfiles[entry])
for entry in matchedfiles.iterkeys():
com_list = list()
for comment in matchedfiles[entry]["comments"].iterkeys():
pline = ""
for line in matchedfiles[entry]["comments"][comment]["lines"]:
pline += line
com_list.append((comment, pline,
matchedfiles[entry]["comments"][comment]["author"]))
pline = ""
for line in matchedfiles[entry]["lines"]:
pline += line
renderEntryLinks([entry], pline, com_list)
if len(matchedfiles) == 0: # no matches
print l_search2
print "</div>" # content3
renderHtmlFooter()
return
def renderDeleteComments(entry, commentnum):
renderHtmlHeader("comments")
print "<div id=\"content3\">"
comments = entry.comments
if len(comments) < commentnum:
print "<p>No comment</p>"
print "</body></html>"
return
comment = comments[commentnum-1]
print "<ol>"
print "<li>"
print "<cite>%s</cite>:" % comment.getAuthorLink()
print "<br />"
print "<small>%s</small>" % (dateToString(comment.date))
print "<p>%s</p>" % comment.getText()
print "</li>"
print "</ol>"
print "<p>%s</p>" % l_do_you_delete
print "<form action=\"%s/%s/?deletecomment\" method=\"post\" id=\"deleteform\">" % (baseurl,
quote_plus(entry.fileName[:-4]))
print "<input type=\"hidden\" name=\"commentnum\" id=\"commentnum\" value=\"%s\"/>" % (commentnum)
print "<input type=\"hidden\" name=\"name\" id=\"name\" value=\"%s\"/>" % entry.fileName[:-4]
print "<p><input type=\"password\" name=\"password\" id=\"password\" size=\"22\" tabindex=\"1\" />"
print "<label for=\"password\"><small>%s</small></label></p>" % l_passwd
print "<p><input name=\"submit\" type=\"submit\" id=\"submit\" tabindex=\"5\" value=\"Submit\" />"
print "</p></form>"
print "</div>" # content3
renderHtmlFooter()
return
def renderSidebarCategories(catelist, rss_categories):
categories = catelist.keys()
try:
categories.sort(key=locale.strxfrm)
except: # python < 2.4 fails
categories.sort()
print "<h2><a href=\"%s/categories\">%s</a></h2>" % (baseurl, l_categories)
hide_cat_str = ""
topcategories = list()
if len(categories) > 5:
print "<a href=\"#\" onclick=\"toggle_categories('tcategory'); return false;\">Show more categories</a>"
topcategories = categories[:]
topcategories.sort(key=lambda cat: len(catelist[cat]), reverse=True)
topcategories = topcategories[:5]
for cat in rss_categories:
if cat not in topcategories:
topcategories.append(cat)
hide_cat_str = " class=\"tcategory\" style=\"display:none;\""
print "<ul>"
for cat in categories:
add_str = ""
if len(topcategories) > 0 and cat not in topcategories:
add_str = hide_cat_str
print "<li%s><a href=\"%s/%s\">%s</a> (%s)" % (
add_str, baseurl, quote_plus(cat), cat, len(catelist[cat]))
if cat in rss_categories:
print "<a href=\"%s/%s/feed\"><img alt=\"RSS Feed Icon\" src=\"%s\" style=\"vertical-align:top; border:none;\"/></a>" % \
(baseurl, cat, feedicon)
print "</li>"
print "</ul>"
def renderSidebarSearch():
print "<h2>%s</h2>" % l_search
print "<form action=\"%s\" method=\"get\" id=\"searchform\">" % baseurl
print "<input type=\"text\" name=\"search\" id=\"search\" size=\"15\" /><br />"
print "<input type=\"submit\" value=\"%s\" />" % l_search
print "</form>"
def renderSidebarCommments():
if sidebarcomments:
print "<h2>%s</h2>" % l_recent_comments
comlist = getCommentList()
if len(comlist) == 0:
print "No comments yet"
else:
print "<ul>"
for com in comlist:
print "<li>%s on <a href=\"%s/%s#comment-%d\">%s</a>"\
% (com["author"], baseurl,
quote_plus(com["file"][:-4]), com["num"], com["subject"])
print "</li>"
print "</ul>"
def renderSidebarArchive(arclist):
print "<h2><a href=\"%s/archive\">%s</a> (%d)</h2>" % \
(baseurl, l_archives,
# total number of entries
sum([len(l) for l in [i for i in arclist.itervalues()]]))
print l_toggle
print "<ul>"
sortedarc = arclist.keys()
sortedarc.sort()
sortedarc.reverse()
# get years from archive and sort them
years = dict()
for d in sortedarc:
year = d.split("-", 1)[0]
if years.has_key(year) is False:
years[year] = list()
years[year].append(d)
years_keys = years.keys()
years_keys.sort()
years_keys.reverse()
# display each year at top lovel and if visiability is toggled
# then show months
for year in years_keys:
print "<li><a href=\"#\" onclick=\"toggle_years('con-year-%s'); return false;\">%s</a> (%d)" %\
(year, year,
# number of entries per year
sum([len(arclist[dat]) for dat in years[year]]))
print "<ul id=\"con-year-%s\" style=\"display:none;\">" % year
for dat in years[year]:
print "<li><a href=\"%s/%s\">%s</a> (%s)</li>" % (
baseurl, dat, dat, len(arclist[dat]))
print "</ul></li>"
print "</ul>"
def renderSidebarAdmin(entries):
if len(entries) == 1:
print "<h2>%s</h2>" % l_admin
print "<ul>"
print "<li><a href=\"%s/%s/?admin\" rel=\"nofollow\">%s</a>" % \
(baseurl,
quote_plus(entries[0].fileName[:-4]),
l_admin_comments)
print "</ul>"
def renderHtml(entries, path, catelist, arclist, admin, page, mode):
"""Render the blog. Some template stuff might be nice :D"""
categories = list()
if len(path) >= 1 and path[0] in catelist.keys():
categories.append(path[0])
elif len(entries) == 1:
categories = entries[0].cat
# title
title = None
summary = False
if len(entries) == 1:
title = entries[0].headline
elif len(categories) == 1:
title = categories[0]
if len(entries) > 1:
summary = entrysummary
rss = list()
# additional rss feeds
for cat in categories:
rss.append("<link rel=\"alternate\" type=\"application/rss+xml\" title=\"%s: %s RSS Feed\" href=\"%s/%s/feed/\" />" % \
(blogname,cat,baseurl,quote_plus(cat)))
if mode == "standalone":
print "Content-Type: text/html; charset=%s\n" % encoding
print doctype
print "<html>"
print "<head>"
if title:
print "<title>%s | %s - %s</title>" % (title, blogname, slogan)
else:
print "<title>%s - %s </title>" % (blogname, slogan)
print "<meta http-equiv=\"Content-Type\" content=\"text/html; charset=%s\" />" % encoding
print "<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\" />" % stylesheet
print "<link rel=\"shortcut icon\" href=\"%s\"/>" % favicon
print "</head><body>"
else:
renderHtmlHeader(title, rss)
print "<div id=\"content2\">"
for entry in entries:
print "<h2><a href=\"%s\">%s</a></h2>" % (
entry.url,
entry.headline)
print "<div class=\"post\">"
for line in entry.getText(summary):
print line,
print "</div>"
if len(entries) > 1 and maxcomments > -1:
nc = len(entry.comments)
if nc > 0:
print "<div class=\"comlink\">%s <a href=\"%s#comments\">%s</a></div>" % (
nc,
entry.url,
l_comments2)
else:
print "<div class=\"comlink\"><a href=\"%s#leave_acomment\">%s</a></div>" % (
entry.url,
l_no_comments)
print "<div class=\"categories\">%s:" % l_categories
num = 0
for cat in entry.cat:
num=num+1
comma = ''
if len(entry.cat) > num:
comma = ', '
print "<a href=\"%s/%s\">%s</a>%s" % (baseurl, quote_plus(cat), cat, comma)
print "</div>"
print "<div class=\"date\">%s: %s</div>" % \
(l_date, dateToString(entry.date))
# comments
if len(entries) == 1:
numofcomment = 0
if len(entry.comments) > 0 and maxcomments > -1:
print "<h3><a name=\"comments\"></a>%s</h3>" % l_comments
print "<ol style=\"list-style-type:none;\">"
for comment in entry.comments:
numofcomment = numofcomment +1
renderComment(entry, comment, numofcomment, admin)
print "</ol>"
if maxcomments == -1 or len(entry.comments) >= maxcomments:
print "<h3>%s</h3>" % l_no_comments_allowed
else:
print "<h3><a name=\"leave_acomment\"></a>%s</h3>" % l_leave_reply
print "<form action=\"%s/%s/?postcomment\" method=\"post\"" % (
baseurl,
entry.fileName[:-4])
print "id=\"commentform\" onsubmit=\"return validate_form(this)\">" # form
print "<input type=\"hidden\" name=\"name\" id=\"name\" value=\"%s\"/>" % entry.fileName[:-4]
print "<input type=\"hidden\" name=\"headline\" id=\"headline\" value=\"%s\"/>" % entry.headline
print "<input type=\"hidden\" name=\"commentnum\" id=\"commentnum\" value=\"%s\"/>" % (numofcomment+1)
print "<p><input type=\"text\" name=\"author\" id=\"author\" size=\"22\" tabindex=\"1\" />"
print "<label for=\"author\"><small>%s</small></label></p>" % l_name_needed
print "<p><input type=\"text\" name=\"email\" id=\"email\" size=\"22\" tabindex=\"2\" />"
print "<label for=\"email\"><small>%s</small></label></p>" % l_email_needed
print "<p><input type=\"text\" name=\"url\" id=\"url\" size=\"22\" tabindex=\"3\" />"
print "<label for=\"url\"><small>%s</small></label></p>" % l_webpage
print "<p><input type=\"text\" name=\"nospam\" id=\"nospam\" size=\"22\" tabindex=\"4\" />"
print "<label for=\"nospam\"><small>%s</small></label></p>" % l_nospam_question
print "<p>%s</p>" % l_no_html
print "<p><textarea name=\"comment\" id=\"comment\" cols=\"40\" rows=\"7\" tabindex=\"4\"></textarea></p>"
print "<p><input name=\"submit\" type=\"submit\" id=\"submit\" tabindex=\"5\" value=\"Submit\" />"
print "<input type=\"hidden\" name=\"comment_post_ID\" value=\"11\" />"
print "</p>"
print "<p><input type=\"checkbox\" name=\"subscribe\" id=\"subscribe\" tabindex=\"6\" value=\"subscribe\">%s</label></p>" % l_notify_comments
print "</form>"
if len(entries) > 1:
print "<div class=\"navi\">"
if page > 0:
print "<a href=\"%s/%s?page=%s\">%s</a>" % (
baseurl,
'/'.join(path),
page-1,
l_previouspage
)
if len(entries) == numberofentriesperpage:
print "<a href=\"%s/%s?page=%s\">%s</a>" % (
baseurl,
'/'.join(path),
page+1,
l_nextpage
)
print "</div>"
print "</div>" # content2
# sidebar
if mode == "standalone":
print "</body></html"
else:
print "<div id=\"sidebar\">"
#######################################print "<a href=\"%s/feed\">Subscribe <img alt=\"RSS Feed Icon\" src=\"%s\" style=\"vertical-align:top; border:none;\"/></a>" % (baseurl, feedicon)
print ' <p><a href="http://feeds.feedburner.com/GalaxyHarvesterNews" title="Subscribe to my feed" rel="alternate" type="application/rss+xml"><img src="http://www.feedburner.com/fb/images/pub/feed-icon32x32.png" alt="" style="border:0"/></a><a href="http://feeds.feedburner.com/GalaxyHarvesterNews" title="Subscribe to my feed" rel="alternate" type="application/rss+xml">Subscribe in a reader</a></p>'
#########################################
renderSidebarCategories(catelist, categories)
renderSidebarSearch()
renderSidebarCommments()
renderSidebarArchive(arclist)
renderSidebarAdmin(entries)
print "</div>" # sidebar
renderHtmlFooter()
def renderFeed(entries, path, categorieslist, totalCount):
rfc822time = "%a, %d %b %Y %H:%M:%S -0800"
print "Content-Type: text/xml; charset=%s\n" % encoding
print "<?xml version=\"1.0\" encoding=\"%s\"?>" % encoding
print "<!-- generator=\"Kukkaisvoima version %s\" -->" % version
print "<rss version=\"2.0\""
print "xmlns:content=\"http://purl.org/rss/1.0/modules/content/\""
print "xmlns:wfw=\"http://wellformedweb.org/CommentAPI/\""
print "xmlns:dc=\"http://purl.org/dc/elements/1.1/\""
print ">"
print "<channel items=\"%s\">" % totalCount
if len(path) >= 1 and path[0] in categorieslist.keys():
print "<title>%s: %s</title>" % (blogname, path[0])
else:
print "<title>%s</title>" % blogname
print "<link>%s</link>" % baseurl
print "<description>%s</description>" % description
print "<pubDate>%s</pubDate>" % strftime(rfc822time, entries[0].date.timetuple())
print "<lastBuildDate>%s</lastBuildDate>" % strftime(rfc822time, entries[0].date.timetuple())
print "<serverTime>%s</serverTime>" % datetime.fromtimestamp(time()).strftime("%Y-%m-%d %H:%M:%S")
print "<generator>http://23.fi/kukkaisvoima/</generator>"
print "<language>%s</language>" % language
# print entries
for entry in entries:
print "<item>"
print "<title>%s</title>" % entry.headline
print "<link>%s</link>" % entry.url
print "<comments>%s#comments</comments>" % entry.url
print "<pubDate>%s</pubDate>" % strftime(rfc822time, entry.date.timetuple())
print "<dc:creator>%s</dc:creator>" % entry.author
for cat in entry.cat:
print "<category>%s</category>" % cat
print "<guid isPermaLink=\"false\">%s/</guid>" % entry.url
print "<description><![CDATA[ %s [...]]]></description>" % entry.text[0]
print "<content:encoded><![CDATA["
for line in entry.text:
print line,
print "]]></content:encoded>"
print "<wfw:commentRss>%s/feed/</wfw:commentRss>" % entry.url
print "</item>"
print "</channel>"
print "</rss>"
###############################
def renderSummary(entries, path, categorieslist):
shorttime = "%d %b %Y %H:%M"
print "Content-Type: text/html; charset=%s\n" % encoding
# print entries
for entry in entries:
print "<div class=\"newsItem\">"
print "<a href=\"%s\">" % entry.url
print "<h3>%s</h3></a>" % entry.headline
print "<span>%s</span>" % strftime(shorttime, entry.date.timetuple())
print "</div>"
################################
# main program starts here
def main():
path = ['']
if os.environ.has_key('PATH_INFO'):
path = os.environ['PATH_INFO'].split('/')[1:]
path = [p for p in path if p != '']
page = 0
admin = False
delcomment = 0
postcomment = False
deletecomment = False
search = False
searchstring = ""
unsubscribe = False
unsubscribe_id = ""
mode = "normal"
if os.environ.has_key('QUERY_STRING'):
querystr = os.environ['QUERY_STRING'].split('=')
if len(querystr) == 2 and querystr[0] == 'page':
try:
page = int(querystr[1])
except:
page = 0
elif querystr[0] == 'admin':
admin = True
elif querystr[0] == 'postcomment':
postcomment = True
elif querystr[0] == 'deletecomment':
deletecomment = True
elif len(querystr) == 2 and querystr[0] == 'delcomment':
try:
delcomment = int(querystr[1])
except:
delcomment = 0
elif len(querystr) == 2 and querystr[0] == 'search':
search = True
searchstring = querystr[1]
elif len(querystr) == 2 and querystr[0] == 'unsubscribe':
unsubscribe = True
unsubscribe_id = querystr[1]
elif len(querystr) == 2 and querystr[0] == 'mode':
mode = querystr[1]
files = os.listdir(datadir)
# read and validate the txt files
entries = list()
for entry in files:
if not entry.endswith(".txt"):
continue
if not len(entry.split(":")) == 3:
continue
try:
year, month, day = entry.split(":")[1].split("-")
if int(year) == 0 or \
(int(month) < 1 or int(month) > 12) or \
(int(day) < 1 or int(day) > 31):
continue
except:
continue
entries.append(entry)
filelist = {}
for file in entries:
filelist[generateDate(os.path.join(datadir,file))] = file
# Read the main index
indexold = list()
try:
indexoldfile = open(os.path.join(indexdir,'main.index'), 'rb')
indexoldd = pickle.load(indexoldfile)
indexoldfile.close()
indexold = indexoldd.values()
except:
pass
# generate categorieslist and archivelist
categorieslist = {}
archivelist = {}
for file in filelist:
name, date, categories = filelist[file][:-4].split(':')
adate = date[:7]
if adate.endswith('-'):
adate = "%s-0%s" % (adate[:4], adate[5])
date = file
categories = categories.split(',')
for cat in categories:
if categorieslist.has_key(cat):
categorieslist[cat][date] = filelist[file]
else:
categorieslist[cat] = {}
categorieslist[cat][date] = filelist[file]
if archivelist.has_key(adate):
archivelist[adate][date] = filelist[file]
else:
archivelist[adate] = {}
archivelist[adate][date] = filelist[file]
# Compare the index
newarticles = Set(entries)^Set(indexold)
if len(newarticles) > 0:
# Pickle the categories
for cat in categorieslist.keys():
oldcategorieslist = None
try:
oldcatindex = open(os.path.join(indexdir,'%s.index' %cat), 'rb')
oldcategorieslist = pickle.load(oldcatindex)
oldcatindex.close()
except:
pass # :P
# No old index or new articles in category, update the index
if not oldcategorieslist or \
(oldcategorieslist and \
len(Set(oldcategorieslist.values())\
^Set(categorieslist[cat].values())) > 0):
catindex = open(os.path.join(indexdir,'%s.index' %cat), 'wb')
pickle.dump(categorieslist[cat], catindex)
catindex.close()
# Pickle the date archives
for arc in archivelist.keys():
oldarchivelist = None
try:
oldarcindex = open(os.path.join(indexdir,'%s.index' %arc), 'rb')
oldarchivelist = pickle.load(oldarcindex)
oldarcindex.close()
except:
pass # :P
if not oldarchivelist or \
(oldarchivelist and \
len(Set(oldarchivelist.values())\
^Set(archivelist[arc].values())) > 0):
arcindex = open(os.path.join(indexdir,'%s.index' %arc), 'wb')
pickle.dump(archivelist[arc], arcindex)
arcindex.close()
# Pickle the main index
index = open(os.path.join(indexdir,'main.index'), 'wb')
pickle.dump(filelist, index)
index.close()
feed = False
numberofentriesperpage = 10 # default
if len(path) > 0 and path[len(path)-1] == 'feed':
feed = True
numberofentriesperpage = 10 # feed always has 10
#####################################
smy = False
if len(path) > 0 and path[len(path)-1] == 'smy':
smy = True
numberofentriesperpage = 5 # summary always has 5
page = 0
######################################
ent = Entries(indexdir)
if len(path) == 1 and path[0] == "archive":
return renderArchive(ent)
if len(path) >= 1 and path[0] == "categories":
return renderCategories(categorieslist, ent, path)
elif len(path) == 1 and search == True and searchstring != "":
return renderSearch(ent.getMany(-1), unquote_plus(searchstring))
elif len(path) >= 1 and path[0] in categorieslist.keys():
try:
entries = ent.getMany(page, path[0])
except:
entries = ent.getMany(page)
elif len(path) == 1 and path[0] in archivelist.keys():
try:
entries = ent.getMany(page, path[0])
except:
entries = ent.getMany(page)
elif len(path) == 1 and postcomment:
try:
redirect = handleIncomingComment(cgi.FieldStorage(keep_blank_values=1))
if redirect:
print redirect
return
else:
entries = ent.getOne("%s.txt" % unquote_plus(path[0]))
except:
entries = ent.getMany(page)
elif len(path) == 1 and deletecomment:
# check if this is incoming comment
fs = cgi.FieldStorage(keep_blank_values=1)
commentnum = int(fs.getvalue('commentnum'))
password = fs.getvalue('password')
name = fs.getvalue('name')
filename = "%s.txt" % name
if commentnum and name and password == passwd and passwd != 'password':
deleteComment(filename, commentnum)
print 'Location: %s/%s\n' % (baseurl, name)
elif len(path) == 1 and unsubscribe and unsubscribe_id:
name = unquote_plus(path[0])
filename = "%s.txt" % name
unsubscribeComments(filename, unsubscribe_id)
print 'Location: %s/%s#comments\n' % (baseurl, name)
elif len(path) == 1:
try:
entries = ent.getOne("%s.txt" % unquote_plus(path[0]))
except:
entries = ent.getMany(page, None, numberofentriesperpage)
else:
entries = ent.getMany(page, None, numberofentriesperpage)
if delcomment > 0 and len(entries) == 1:
renderDeleteComments(entries[0], delcomment)
elif feed:
renderFeed(entries, path, categorieslist, ent.getTotal())
###########################################
elif smy:
renderSummary(entries, path, categorieslist)
###########################################
else:
renderHtml(entries, path, categorieslist, archivelist, admin, page, mode)
if __name__ == "__main__":
main()
|
druss316/G-Harvestor
|
html/blog.py
|
Python
|
gpl-3.0
| 57,034
|
[
"Galaxy"
] |
b91619528dad9945d3bd7451501e992f7fe3c28d1ef67e9fec3fcea5258270ec
|
"""
Explore integration of rotationally symmetric shapes
"""
from __future__ import print_function, division
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import numpy as np
from numpy import pi, sin, cos, sqrt, exp, expm1, degrees, log10
from numpy.polynomial.legendre import leggauss
from scipy.integrate import dblquad, simps, romb, romberg
import pylab
from sasmodels.special import square
from sasmodels.special import Gauss20Wt, Gauss20Z
from sasmodels.special import Gauss76Wt, Gauss76Z
from sasmodels.special import Gauss150Wt, Gauss150Z
from sasmodels.special import sas_2J1x_x, sas_sinx_x, sas_3j1x_x
SLD = 3.0
SLD_SOLVENT = 6
CONTRAST = SLD - SLD_SOLVENT
def make_cylinder(radius, length):
def cylinder(qab, qc):
return sas_2J1x_x(qab*radius) * sas_sinx_x(qc*0.5*length)
cylinder.__doc__ = "cylinder radius=%g, length=%g"%(radius, length)
volume = pi*radius**2*length
norm = CONTRAST**2*volume/10000
return norm, cylinder
def make_long_cylinder(radius, length):
def long_cylinder(q):
return norm/q * sas_2J1x_x(q*radius)**2
long_cylinder.__doc__ = "long cylinder radius=%g, length=%g"%(radius, length)
volume = pi*radius**2*length
norm = CONTRAST**2*volume/10000*pi/length
return long_cylinder
def make_sphere(radius):
def sphere(qab, qc):
q = sqrt(qab**2 + qc**2)
return sas_3j1x_x(q*radius)
sphere.__doc__ = "sphere radius=%g"%(radius,)
volume = 4*pi*radius**3/3
norm = CONTRAST**2*volume/10000
return norm, sphere
THETA_LOW, THETA_HIGH = 0, pi/2
SCALE = 1
def kernel_1d(q, theta):
"""
S(q) kernel for paracrystal forms.
"""
qab = q*sin(theta)
qc = q*cos(theta)
return NORM*KERNEL(qab, qc)**2
def gauss_quad_1d(q, n=150):
"""
Compute the integral using gaussian quadrature for n = 20, 76 or 150.
"""
z, w = leggauss(n)
theta = (THETA_HIGH-THETA_LOW)*(z + 1)/2 + THETA_LOW
sin_theta = abs(sin(theta))
Zq = kernel_1d(q=q, theta=theta)
return np.sum(Zq*w*sin_theta)*(THETA_HIGH-THETA_LOW)/2
def gridded_1d(q, n=300):
"""
Compute the integral on a regular grid using rectangular, trapezoidal,
simpsons, and romberg integration. Romberg integration requires that
the grid be of size n = 2**k + 1.
"""
theta = np.linspace(THETA_LOW, THETA_HIGH, n)
Zq = kernel_1d(q=q, theta=theta)
Zq *= abs(sin(theta))
dx = theta[1]-theta[0]
print("rect-%d"%n, np.sum(Zq)*dx*SCALE)
print("trapz-%d"%n, np.trapz(Zq, dx=dx)*SCALE)
print("simpson-%d"%n, simps(Zq, dx=dx)*SCALE)
print("romb-%d"%n, romb(Zq, dx=dx)*SCALE)
def scipy_romberg_1d(q):
"""
Compute the integral using romberg integration. This function does not
complete in a reasonable time. No idea if it is accurate.
"""
evals = [0]
def outer(theta):
evals[0] += 1
return kernel_1d(q, theta=theta)*abs(sin(theta))
result = romberg(outer, THETA_LOW, THETA_HIGH, divmax=100)*SCALE
print("scipy romberg", evals[0], result)
def plot_1d(q, n=300):
"""
Plot the function that needs to be integrated in order to compute
the I(q) at a particular q. *n* is the number of points in the grid.
"""
theta = np.linspace(THETA_LOW, THETA_HIGH, n)
Zq = kernel_1d(q=q, theta=theta)
Zq *= abs(sin(theta))
pylab.semilogy(degrees(theta), np.fmax(Zq, 1.e-6), label="Q=%g"%q)
pylab.title("%s I(q, theta) sin(theta)" % (KERNEL.__doc__,))
pylab.xlabel("theta (degrees)")
pylab.ylabel("Iq 1/cm")
def Iq_trapz(q, n):
theta = np.linspace(THETA_LOW, THETA_HIGH, n)
Zq = kernel_1d(q=q, theta=theta)
Zq *= abs(sin(theta))
dx = theta[1]-theta[0]
return np.trapz(Zq, dx=dx)*SCALE
def plot_Iq(q, n, form="trapz"):
if form == "trapz":
Iq = np.array([Iq_trapz(qk, n) for qk in q])
elif form == "gauss":
Iq = np.array([gauss_quad_1d(qk, n) for qk in q])
pylab.loglog(q, Iq, label="%s, n=%d"%(form, n))
pylab.xlabel("q (1/A)")
pylab.ylabel("Iq (1/cm)")
pylab.title(KERNEL.__doc__ + " I(q) circular average")
return Iq
radius = 10.
length = 1e5
NORM, KERNEL = make_cylinder(radius=radius, length=length)
long_cyl = make_long_cylinder(radius=radius, length=length)
#NORM, KERNEL = make_sphere(radius=50.)
if __name__ == "__main__":
Q = 0.386
for n in (20, 76, 150, 300, 1000): #, 10000, 30000):
print("gauss-%d"%n, gauss_quad_1d(Q, n=n))
for k in (8, 10, 13, 16, 19):
gridded_1d(Q, n=2**k+1)
#print("inf cyl", 0, long_cyl(Q))
#scipy_romberg(Q)
plot_1d(0.386, n=2000)
plot_1d(0.5, n=2000)
plot_1d(0.8, n=2000)
pylab.legend()
pylab.figure()
q = np.logspace(-3, 0, 400)
I1 = long_cyl(q)
I2 = plot_Iq(q, n=2**19+1, form="trapz")
#plot_Iq(q, n=2**16+1, form="trapz")
#plot_Iq(q, n=2**10+1, form="trapz")
plot_Iq(q, n=1024, form="gauss")
#plot_Iq(q, n=300, form="gauss")
#plot_Iq(q, n=150, form="gauss")
#plot_Iq(q, n=76, form="gauss")
pylab.loglog(q, long_cyl(q), label="limit")
pylab.legend()
pylab.figure()
pylab.semilogx(q, (I2 - I1)/I1)
pylab.show()
|
SasView/sasmodels
|
explore/symint.py
|
Python
|
bsd-3-clause
| 5,187
|
[
"Gaussian"
] |
424fc51bd5c786a5ce37000243547130af721e5c1a4fc0455048eece5622879f
|
"""
Views for user API
"""
from completion.exceptions import UnavailableCompletionData
from completion.utilities import get_key_to_last_completed_block
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.contrib.auth.signals import user_logged_in
from django.db import transaction
from django.shortcuts import redirect
from django.utils import dateparse
from django.utils.decorators import method_decorator
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import UsageKey
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.permissions import SAFE_METHODS
from rest_framework.response import Response
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from common.djangoapps.student.models import CourseEnrollment, User # lint-amnesty, pylint: disable=reimported
from lms.djangoapps.courseware.access import is_mobile_available_for_user
from lms.djangoapps.courseware.access_utils import ACCESS_GRANTED
from lms.djangoapps.courseware.courses import get_current_child
from lms.djangoapps.courseware.model_data import FieldDataCache
from lms.djangoapps.courseware.module_render import get_module_for_descriptor
from lms.djangoapps.courseware.views.index import save_positions_recursively_up
from lms.djangoapps.mobile_api.utils import API_V1, API_V05
from openedx.features.course_duration_limits.access import check_course_expired
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
from xmodule.modulestore.exceptions import ItemNotFoundError # lint-amnesty, pylint: disable=wrong-import-order
from .. import errors
from ..decorators import mobile_course_access, mobile_view
from .serializers import CourseEnrollmentSerializer, CourseEnrollmentSerializerv05, UserSerializer
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and access other resources
the user has permissions for.
Users are redirected to this endpoint after they sign in.
You can use the **course_enrollments** value in the response to get a
list of courses the user is enrolled in.
**Example Request**
GET /api/mobile/{version}/users/{username}
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* course_enrollments: The URI to list the courses the currently signed
in user is enrolled in.
* email: The email address of the currently signed in user.
* id: The ID of the user.
* name: The full name of the currently signed in user.
* username: The username of the currently signed in user.
"""
queryset = (
User.objects.all().select_related('profile')
)
serializer_class = UserSerializer
lookup_field = 'username'
def get_serializer_context(self):
context = super().get_serializer_context()
context['api_version'] = self.kwargs.get('api_version')
return context
@mobile_view(is_user=True)
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class UserCourseStatus(views.APIView):
"""
**Use Cases**
Get or update the ID of the module that the specified user last
visited in the specified course.
Get ID of the last completed block in case of version v1
**Example Requests**
GET /api/mobile/{version}/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/{version}/users/{username}/course_status_info/{course_id}
**PATCH Parameters**
The body of the PATCH request can include the following parameters.
* last_visited_module_id={module_id}
* modification_date={date}
The modification_date parameter is optional. If it is present, the
update will only take effect if the modification_date in the
request is later than the modification_date saved on the server.
**Response Values**
If the request is successful, the request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* last_visited_module_id: The ID of the last module that the user
visited in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
For version v1 GET request response includes the following values.
* last_visited_block_id: ID of the last completed block.
"""
http_method_names = ["get", "patch"]
def dispatch(self, request, *args, **kwargs):
if request.method in SAFE_METHODS:
return super().dispatch(request, *args, **kwargs)
else:
with transaction.atomic():
return super().dispatch(request, *args, **kwargs)
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
path = [course_module] if course_module else []
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [str(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(
request.user, request, module_descriptor, field_data_cache, course.id, course=course
)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name='position'
)
original_store_date = field_data_cache.last_modified(key)
if original_store_date is not None and modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module, course=course)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
user_course_status = self._get_course_info(request, course)
api_version = self.kwargs.get("api_version")
if api_version == API_V1:
# Get ID of the block that the specified user last visited in the specified course.
try:
block_id = str(get_key_to_last_completed_block(request.user, course.id))
except UnavailableCompletionData:
block_id = ""
user_course_status.data["last_visited_block_id"] = block_id
return user_course_status
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # lint-amnesty, pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.data.get("last_visited_module_id")
modification_date_string = request.data.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses that the currently signed in user is
enrolled in.
v1 differs from v0.5 version by returning ALL enrollments for
a user rather than only the enrollments the user has access to (that haven't expired).
An additional attribute "expiration" has been added to the response, which lists the date
when access to the course will expire or null if it doesn't expire.
**Example Request**
GET /api/mobile/v1/users/{username}/course_enrollments/
**Response Values**
If the request for information about the user is successful, the
request returns an HTTP 200 "OK" response.
The HTTP 200 response has the following values.
* expiration: The course expiration date for given user course pair
or null if the course does not expire.
* certificate: Information about the user's earned certificate in the
course.
* course: A collection of the following data about the course.
* courseware_access: A JSON representation with access information for the course,
including any access errors.
* course_about: The URL to the course about page.
* course_sharing_utm_parameters: Encoded UTM parameters to be included in course sharing url
* course_handouts: The URI to get data for course handouts.
* course_image: The path to the course image.
* course_updates: The URI to get data for course updates.
* discussion_url: The URI to access data for course discussions if
it is enabled, otherwise null.
* end: The end date of the course.
* id: The unique ID of the course.
* name: The name of the course.
* number: The course number.
* org: The organization that created the course.
* start: The date and time when the course starts.
* start_display:
If start_type is a string, then the advertised_start date for the course.
If start_type is a timestamp, then a formatted date for the start of the course.
If start_type is empty, then the value is None and it indicates that the course has not yet started.
* start_type: One of either "string", "timestamp", or "empty"
* subscription_id: A unique "clean" (alphanumeric with '_') ID of
the course.
* video_outline: The URI to get the list of all videos that the user
can access in the course.
* created: The date the course was created.
* is_active: Whether the course is currently active. Possible values
are true or false.
* mode: The type of certificate registration for this course (honor or
certified).
* url: URL to the downloadable version of the certificate, if exists.
"""
queryset = CourseEnrollment.objects.all()
lookup_field = 'username'
# In Django Rest Framework v3, there is a default pagination
# class that transmutes the response data into a dictionary
# with pagination information. The original response data (a list)
# is stored in a "results" value of the dictionary.
# For backwards compatibility with the existing API, we disable
# the default behavior by setting the pagination_class to None.
pagination_class = None
def is_org(self, check_org, course_org):
"""
Check course org matches request org param or no param provided
"""
return check_org is None or (check_org.lower() == course_org.lower())
def get_serializer_context(self):
context = super().get_serializer_context()
context['api_version'] = self.kwargs.get('api_version')
return context
def get_serializer_class(self):
api_version = self.kwargs.get('api_version')
if api_version == API_V05:
return CourseEnrollmentSerializerv05
return CourseEnrollmentSerializer
def get_queryset(self):
api_version = self.kwargs.get('api_version')
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
org = self.request.query_params.get('org', None)
same_org = (
enrollment for enrollment in enrollments
if enrollment.course_overview and self.is_org(org, enrollment.course_overview.org)
)
mobile_available = (
enrollment for enrollment in same_org
if is_mobile_available_for_user(self.request.user, enrollment.course_overview)
)
not_duration_limited = (
enrollment for enrollment in mobile_available
if check_course_expired(self.request.user, enrollment.course) == ACCESS_GRANTED
)
if api_version == API_V05:
# for v0.5 don't return expired courses
return list(not_duration_limited)
else:
# return all courses, with associated expiration
return list(mobile_available)
@api_view(["GET"])
@mobile_view()
def my_user_info(request, api_version):
"""
Redirect to the currently-logged-in user's info page
"""
# update user's last logged in from here because
# updating it from the oauth2 related code is too complex
user_logged_in.send(sender=User, user=request.user, request=request)
return redirect("user-detail", api_version=api_version, username=request.user.username)
|
eduNEXT/edx-platform
|
lms/djangoapps/mobile_api/users/views.py
|
Python
|
agpl-3.0
| 15,353
|
[
"VisIt"
] |
647d7b19af18947bbeda79fb01b576f4aa56995a31a47ee3a975842fa4b8f8d8
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------
# fx2_tube_cartridge_adapter
# --------------------------
# by Phillip Pearson
# Adapter board to make it easy to connect an LCSoft Mini board to either an
# Acorn Electron via a Plus 1 cartridge slot, or a BBC Micro or BBC Master via
# the Tube connector, to run Sigrok and David Banks' decode6502 software.
# This also allows connecting a Raspberry Pi running PiTubeDirect to a BBC or
# Electron. When connected to an Electron, it drives the /TUBE line low on the
# Pi when &FCEx addresses are being accessed (i.e. nINFC='0' and A7:4 = x"E").
# When connected to a BBC, it just buffers the /TUBE line from the Tube
# connector.
# TODO(r2) move the jumpers over a little because they block the Pi power cable.
# TODO(r2) Consider allowing the Pi to be powered from the Electron (safe?) or
# from a micro USB socket in a more convenient location. Right now the 5V pins
# on the Pi are disconnected; add a 5V_PI net or just wire it up to 5V.
# TODO(r2) add pullups on elk_nINFC and bbc_nTUBE to make it easier to
# autodetect what we're plugged into.
# TODO(r2) add silkscreen or BOM notes to explain that TUBE, PI1, FX2 are male,
# female, female respectively and that TUBE and PI1 go on top and FX2 goes
# underneath the board.
# DONE(r1) add a second tube header so BBC folks can pass signals through to a 5V
# tube device and debug the Tube connection. DECISION: skip this, because a
# two-drive IDE cable will allow putting the board inline with a Tube device
# anyway.
# DONE(r1) hook up both bbc_nTUBE and tube_nTUBE to the CPLD, to allow using a Pi on
# a BBC or Master.
# DONE(r1) add jumpers from 3V3_PI and 3V3_FX2 to 3V3, to allow not populating the
# regulator
# DONE(r1) think about whether we can autodetect stuff (by averaging over a few
# clocks...). if elk is connected, we'll get cpu_CLK and elk_16MHz, but if a
# beeb is connected, we'll only get cpu_CLK. counting elk_16MHz crossings per
# cpu_CLK cycle might let us detect elk or bbc. in fact we can safely say an
# elk is connected if we get *one* edge on elk_16MHz. so no need for a series
# resistor or jumper on beeb_nTUBE.
# DONE(r1) remove /RESET from the CPLD and use a schottky diode + pullup to convert
# that down to 3v3. It's a slow signal (generated by the keyboard) so it
# doesn't need to go through the CPLD.
# DONE(r1) figure out if it's easier to put the tube connector on the same side of
# the board as the cartridge interface -- if it makes the wiring easier or
# harder. it probably looks nicest up top. DECISION: next to the cartridge
# interface; otherwise this gets way too complicated.
# DONE(r1) Trivial, possibly pointless addition: This can also double as a very
# simple Tube adapter for the Electron, when logic is fitted to match the Tube
# addresses and generate the /TUBE signal). DECISION: going a bit further and
# making it a PiTubeDirect adapter too.
# DONE(r1) check AP6 thread on stardot to see if external tube devices require a
# buffered clock. If so, there's no point doing this simple option. DECISION:
# adding a buffered clock because the logic analyzer needs it on the Electron.
# ARCHIVE(r1): not doing this any more; just using a CPLD:
# nTUBE <= '0' when nINFC = '0' and A(7 downto 4) = x"E" else '1'
# i.e. nTUBE = !(!nINFC and A7 and A6 and A5 and !A4)
# = (((nINFC nand 1) and (A4 nand 1)) and (A7 and A6)) nand (A5)
# A 74HCT00 for the nands and a 74HCT08 for the ands should do this nicely.
# nand0 = nINFC nand 1 (= !nINFC)
# nand1 = A4 nand 1 (= !A4)
# and0 = nand0 and nand1 (= !nINFC and !A4)
# and1 = A7 and A6
# and2 = and0 and and1 (= !nINFC and A7 and A6 and !A4)
# nTUBE = nand2 = and2 nand A5 (= !(!nINFC and A7 and A6 and A5 and !A4))
# - 74hct00 + capacitor
# - 74hct08 + capacitor
# - 330R/1k output resistor on nTUBE to limit current if plugged into both
# Electron and BBC for some reason.
import sys, os
here = os.path.dirname(sys.argv[0])
sys.path.insert(0, os.path.join(here, "../../third_party/myelin-kicad.pretty"))
import myelin_kicad_pcb
Pin = myelin_kicad_pcb.Pin
# Cartridge connector
cart_front = myelin_kicad_pcb.Component(
footprint="myelin-kicad:acorn_electron_cartridge_edge_connector",
identifier="CART",
value="edge connector",
pins=[
# front of cartridge / bottom layer of PCB
Pin( "B1", "5V", "5V"),
Pin( "B2", "A10"),
Pin( "B3", "D3", "cpu_D3"),
Pin( "B4", "A11"),
Pin( "B5", "A9"),
Pin( "B6", "D7", "cpu_D7"),
Pin( "B7", "D6", "cpu_D6"),
Pin( "B8", "D5", "cpu_D5"),
Pin( "B9", "D4", "cpu_D4"),
Pin("B10", "nOE2"),
Pin("B11", "BA7", "cpu_A7"),
Pin("B12", "BA6", "cpu_A6"),
Pin("B13", "BA5", "cpu_A5"),
Pin("B14", "BA4", "cpu_A4"),
Pin("B15", "BA3", "cpu_A3"),
Pin("B16", "BA2", "cpu_A2"),
Pin("B17", "BA1", "cpu_A1"),
Pin("B18", "BA0", "cpu_A0"),
Pin("B19", "D0", "cpu_D0"),
Pin("B20", "D2", "cpu_D2"),
Pin("B21", "D1", "cpu_D1"),
Pin("B22", "GND", "GND"),
# rear of cartridge / top layer of PCB
Pin( "A1", "5V", "5V"),
Pin( "A2", "nOE"),
Pin( "A3", "nRST", "cpu_nRST"),
# A4 is RnW on the Elk, and on the master for &FCxx, so it's safe to use
# this for the Tube, but not for the logic analyzer.
Pin( "A4", "RnW", "cpu_RnW"),
Pin( "A5", "A8"),
Pin( "A6", "A13"),
Pin( "A7", "A12"),
Pin( "A8", "PHI0", "cpu_CLK"),
Pin( "A9", "-5V"),
Pin("A10", "NC"),
Pin("A11", "READY", "elk_READY_master_RnW"),
Pin("A12", "nNMI", "cpu_nNMI"),
Pin("A13", "nIRQ", "cpu_nIRQ"),
Pin("A14", "nINFC", "elk_nINFC"),
Pin("A15", "nINFD"),
Pin("A16", "ROMQA"),
Pin("A17", "16MHZ", "elk_16MHz"),
Pin("A18", "nROMSTB"),
Pin("A19", "ADOUT"),
Pin("A20", "ADGND"),
Pin("A21", "ADIN"),
Pin("A22", "GND", "GND"),
],
)
# TODO(r2) add a visible pin 1 indicator for TUBE
tube = myelin_kicad_pcb.Component(
footprint="myelin-kicad:header_2x20_100mil",
identifier="TUBE",
value="tube",
pins=[
Pin( 1, "0V", "GND"),
Pin( 2, "RnW", "cpu_RnW"),
Pin( 3, "0V", "GND"),
Pin( 4, "2MHzE", "cpu_CLK"),
Pin( 5, "0V", "GND"),
Pin( 6, "/IRQ", "cpu_nIRQ"),
Pin( 7, "0V", "GND"),
Pin( 8, "/TUBE", "bbc_nTUBE"),
Pin( 9, "0V", "GND"),
Pin(10, "/RST", "cpu_nRST"),
Pin(11, "0V", "GND"),
Pin(12, "D0", "cpu_D0"),
Pin(13, "0V", "GND"),
Pin(14, "D1", "cpu_D1"),
Pin(15, "0V", "GND"),
Pin(16, "D2", "cpu_D2"),
Pin(17, "0V", "GND"),
Pin(18, "D3", "cpu_D3"),
Pin(19, "0V", "GND"),
Pin(20, "D4", "cpu_D4"),
Pin(21, "0V", "GND"),
Pin(22, "D5", "cpu_D5"),
Pin(23, "0V", "GND"),
Pin(24, "D6", "cpu_D6"),
Pin(25, "0V", "GND"),
Pin(26, "D7", "cpu_D7"),
Pin(27, "0V", "GND"),
Pin(28, "A0", "cpu_A0"),
Pin(29, "0V", "GND"),
Pin(30, "A1", "cpu_A1"),
Pin(31, "+5V", "5V"),
Pin(32, "A2", "cpu_A2"),
Pin(33, "+5V", "5V"),
Pin(34, "A3", "cpu_A3"),
Pin(35, "+5V", "5V"),
Pin(36, "A4", "cpu_A4"),
Pin(37, "+5V", "5V"),
Pin(38, "A5", "cpu_A5"),
Pin(39, "+5V", "5V"),
Pin(40, "A6", "cpu_A6"),
],
)
# 25 pins from the cartridge + tube port
# minus IRQ, NMI, READY, /RESET, /TUBE = 20
# 14 pins to the Raspberry Pi
# total 34, which is exactly what we have
cpld = myelin_kicad_pcb.Component(
footprint="myelin-kicad:xilinx_vqg44",
identifier="PL1", # TODO(r2) rename to CPLD
value="XC9572XL",
buses=["cpu_D", "tube_D"],
pins=[
Pin(39, "P1.2", "cpu_D4"),
Pin(40, "P1.5", "cpu_D5"),
Pin(41, "P1.6", "cpu_D6"),
Pin(42, "P1.8", "elk_nINFC"),
Pin(43, "P1.9-GCK1", "cpu_D7"),
Pin(44, "P1.11-GCK2", "cpu_A0"),
Pin( 1, "P1.14-GCK3", "elk_16MHz"),
Pin( 2, "P1.15", "cpu_A1"),
Pin( 3, "P1.17", "cpu_A2"),
Pin( 4, "GND", "GND"),
Pin( 5, "P3.2", "cpu_A4"),
Pin( 6, "P3.5", "cpu_A5"),
Pin( 7, "P3.8", "cpu_A6"),
Pin( 8, "P3.9", "tube_CLK"),
Pin( 9, "TDI", "cpld_TDI"),
Pin(10, "TMS", "cpld_TMS"),
Pin(11, "TCK", "cpld_TCK"),
Pin(12, "P3.11", "tube_D0"),
Pin(13, "P3.14", "tube_D3"),
Pin(14, "P3.15", "tube_D1"),
Pin(15, "VCCINT_3V3", "3V3"),
Pin(16, "P3.17", "tube_D7"),
Pin(17, "GND", "GND"),
Pin(18, "P3.16", "tube_D2"),
Pin(19, "P4.2", "tube_D6"),
Pin(20, "P4.5", "tube_D4"),
Pin(21, "P4.8", "tube_D5"),
Pin(22, "P4.11", "tube_A0"),
Pin(23, "P4.14", "tube_nTUBE"),
Pin(24, "TDO", "cpld_TDO"),
Pin(25, "GND", "GND"),
Pin(26, "VCCIO_2V5_3V3", "3V3"),
Pin(27, "P4.15", "tube_RnW"),
Pin(28, "P4.17", "tube_A2"),
Pin(29, "P2.2", "tube_A1"),
Pin(30, "P2.5", "cpu_RnW"),
Pin(31, "P2.6", "cpu_CLK"),
Pin(32, "P2.8", "bbc_nTUBE"),
Pin(33, "P2.9-GSR", "cpu_D0"),
Pin(34, "P2.11-GTS2", "cpu_D1"),
Pin(35, "VCCINT_3V3", "3V3"),
Pin(36, "P2.14-GTS1", "cpu_A7"),
Pin(37, "P2.15", "cpu_D2"),
Pin(38, "P2.17", "cpu_D3"),
],
)
cpld_cap1 = myelin_kicad_pcb.C0805("100n", "3V3", "GND", ref="C1", handsoldering=False)
cpld_cap2 = myelin_kicad_pcb.C0805("100n", "3V3", "GND", ref="C2", handsoldering=False)
cpld_cap3 = myelin_kicad_pcb.C0805("1u", "3V3", "GND", ref="C3", handsoldering=False)
myelin_kicad_pcb.update_xilinx_constraints(cpld, os.path.join(here, "../cpld/constraints.ucf"))
regulator = myelin_kicad_pcb.Component(
footprint="TO_SOT_Packages_SMD:SOT-89-3",
identifier="U1", # TODO(r2) rename to REG
value="MCP1700T-3302E/MB",
pins=[
Pin(2, "VIN", ["5V"]),
Pin(3, "VOUT", ["3V3"]),
Pin(1, "GND", ["GND"]),
],
)
reg_in_cap = myelin_kicad_pcb.C0805("1u", "GND", "5V", ref="C4")
reg_out_cap = myelin_kicad_pcb.C0805("1u", "3V3", "GND", ref="C5")
# TODO(r2) explain the purpose of PIPWR and FX2PWR; note that neither should
# be fitted if U1 is fitted.
pi_power_jumper = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_1x02_Pitch2.54mm",
identifier="PIPWR",
value="Power from Pi",
pins=[
Pin(1, "", "3V3_PI"),
Pin(2, "", "3V3"),
],
)
fx2_power_jumper = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_1x02_Pitch2.54mm",
identifier="FX2PWR",
value="Power from FX2",
pins=[
Pin(1, "", "3V3_FX2"),
Pin(2, "", "3V3"),
],
)
pi_zero = myelin_kicad_pcb.Component(
footprint="myelin-kicad:raspberry_pi_zero_flipped",
identifier="PI1", # TODO(r2) rename to PI
value="Raspberry Pi Zero",
pins=[
Pin(1, "3V3", "3V3_PI"), # for when we want the Pi to power the CPLD
Pin(2, "5V"),
Pin(3, "GPIO0-2", ["tube_A1"]),
Pin(4, "5V"),
Pin(5, "GPIO1-3", ["tube_A2"]),
Pin(6, "ser_GND", ["pi_serial_GND"]),
Pin(7, "GPIO4", ["tube_nRST"]),
Pin(8, "ser_TX", ["pi_serial_TX"]),
Pin(9, "GND", ["GND"]),
Pin(10, "ser_RX", ["pi_serial_RX"]),
Pin(11, "GPIO17", ["tube_nTUBE"]),
Pin(12, "GPIO18", ["tube_RnW"]),
Pin(13, "GPIO21-27", ["tube_A0"]),
Pin(14, "GND", ["GND"]),
Pin(15, "GPIO22", ["tube_D4"]),
Pin(16, "GPIO23", ["tube_D5"]),
Pin(17, "3V3"),
Pin(18, "GPIO24", ["tube_D6"]),
Pin(19, "GPIO10", ["tube_D2"]),
Pin(20, "GND", ["GND"]),
Pin(21, "GPIO9", ["tube_D1"]),
Pin(22, "GPIO25", ["tube_D7"]),
Pin(23, "GPIO11", ["tube_D3"]),
Pin(24, "GPIO8", ["tube_D0"]),
Pin(25, "GND", ["GND"]),
Pin(26, "GPIO7", ["tube_CLK"]),
],
)
serial_port = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_1x03_Pitch2.54mm",
identifier="SERIAL1", # TODO(r2) rename to SERIAL
value="Pi Serial",
pins=[
Pin(1, "GND", ["pi_serial_GND"]),
Pin(2, "TX", ["pi_serial_TX"]),
Pin(3, "RX", ["pi_serial_RX"]),
],
)
# TODO(r2) explain pinout; maybe add to silkscreen
# altera jtag header, like in the lc-electronics xc9572xl board
# left column: tck tdo tms nc tdi
# right column: gnd vcc nc nc gnd
cpld_jtag = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_2x05_Pitch2.54mm",
identifier="JTAG1", # TODO(r2) rename to JTAG
value="jtag",
pins=[
Pin(1, "TCK", ["cpld_TCK"]), # top left
Pin(2, "GND", ["GND"]), # top right
Pin(3, "TDO", ["cpld_TDO"]),
Pin(4, "3V3", ["3V3"]),
Pin(5, "TMS", ["cpld_TMS"]),
Pin(6, "NC"),
Pin(7, "NC"),
Pin(8, "NC"),
Pin(9, "TDI", ["cpld_TDI"]),
Pin(10, "GND", ["GND"]),
],
)
# Reset level conversion using diode + pullup
reset_3v3_pullup = myelin_kicad_pcb.R0805("10k", "3V3_PI", "tube_nRST", ref="R5")
reset_3v3_diode = myelin_kicad_pcb.DSOD323("BAT54", "cpu_nRST", "tube_nRST", ref="D1")
# DONE(r1) lcsoft mini footprint. need to flip it to look like this, so the lcsoft
# mini board can plug in to the top of the cartridge.
# R2 R1 L2 L1
# R4 R3 L4 L3
# ... ...
# R20 R19 L20 L19
# DONE(r1) double check against real lcsoft PCB
analyzer = myelin_kicad_pcb.Component(
footprint="myelin-kicad:lcsoft_mini_flipped",
identifier="FX2",
value="lcsoft mini",
pins=[
# left side, top to bottom, left to right, with board face up
# and USB socket upward
Pin( "L1", "PD5", "cpu_nNMI"), # DONE add header for BBC test clip
Pin( "L2", "PD6", "cpu_nRST"),
Pin( "L3", "PD7", "tube_A0"),
Pin( "L4", "GND", "GND"),
Pin( "L5", "CLK"),
Pin( "L6", "GND", "GND"),
Pin( "L7", "RDY0", "analyzer_RDY0"), # tie to 3V3 via 1k resistor
Pin( "L8", "RDY1", "tube_CLK_jumper"), # via jumper to tube_CLK
Pin( "L9", "GND", "GND"),
Pin("L10", "GND", "GND"),
Pin("L11", "GND", "GND"),
Pin("L12", "FCLK"),
Pin("L13", "SCL"),
Pin("L14", "SDA"),
Pin("L15", "PB0", "tube_D0"),
Pin("L16", "PB1", "tube_D1"),
Pin("L17", "PB2", "tube_D2"),
Pin("L18", "PB3", "tube_D3"),
Pin("L19", "3V3", "3V3_FX2"), # generated from USB
Pin("L20", "3V3", "3V3_FX2"), # generated from USB
# right side, top to bottom, left to right
Pin( "R1", "PD4", "cpu_nIRQ"),
Pin( "R2", "PD3", "tube_CLK"),
Pin( "R3", "PD2", "analyzer_PD2_READY"), # DONE add header for BBC test clip
Pin( "R4", "PD1", "cpu_SYNC"), # DONE add header for test clip
Pin( "R5", "PD0", "analyzer_PD0_RnW"), # DONE add header to switch Elk pin A4/Master pin A11
Pin( "R6", "PA7"),
Pin( "R7", "PA6", "analyzer_PA6"), # tie to 3V3 via 1k resistor
Pin( "R8", "PA5", "analyzer_PA5"), # tie to 3V3 via 1k resistor
Pin( "R9", "PA4", "analyzer_PA4"), # tie to GND via 1k resistor
Pin("R10", "PA3"),
Pin("R11", "PA2", "analyzer_PA2"), # tie to 3V3 via 1k resistor
Pin("R12", "PA1"),
Pin("R13", "PA0"),
Pin("R14", "CTL2"),
Pin("R15", "CTL1"),
Pin("R16", "CTL0"),
Pin("R17", "PB7", "tube_D7"),
Pin("R18", "PB6", "tube_D6"),
Pin("R19", "PB5", "tube_D5"),
Pin("R20", "PB4", "tube_D4"),
],
)
pull_resistors = [
myelin_kicad_pcb.R0805("1k", "analyzer_PA6", "3V3_FX2", ref="R1"),
myelin_kicad_pcb.R0805("1k", "analyzer_PA5", "3V3_FX2", ref="R2"),
myelin_kicad_pcb.R0805("1k", "analyzer_PA4", "GND", ref="R3"),
myelin_kicad_pcb.R0805("1k", "analyzer_PA2", "3V3_FX2", ref="R4"),
myelin_kicad_pcb.R0805("1k", "analyzer_RDY0", "3V3_FX2", ref="R6"),
]
# DONE add headers and jumpers to select READY/RnW/nNMI/SYNC everywhere.
# Elk: elk_READY_master_RnW, tube_RnW, cpu_nNMI, test clip for SYNC
# Beeb: test clip for READY, tube_RnW, test clip for nNMI, test clip for SYNC
# Master: test clip for READY, elk_READY_master_RnW, cpu_nNMI, test clip for SYNC
# L8: capture clock
# L1/PD5/nNMI: 1-pin header (already connected for Elk/Master)
# R4/PD1/SYNC: 1-pin header
# R3/PD2/READY 2-pin header: bridge for Elk, add lead for everyone else
# [PD2] [elk_READY_master_RnW]
# R5/PD0/RnW: 3-pin header: bridge left for Elk/Beeb, up for Master
# [tube_RnW] [PD0]
sync_clk_jumper = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_2x04_Pitch2.54mm",
identifier="LEADS", # TODO(r2) rename to OPTS
value="Options",
pins=[
Pin(1, "", "tube_CLK"),
Pin(2, "", "tube_CLK_jumper"),
Pin(3, "", "cpu_SYNC"),
Pin(4, "", "cpu_nNMI"),
Pin(5, "", "analyzer_PD2_READY"),
Pin(6, "", "elk_READY_master_RnW"),
Pin(7, "", "tube_RnW"),
Pin(8, "", "analyzer_PD0_RnW"),
],
)
# Just in case we want to connect up an ext PSU for CPLD programming
ext_power = myelin_kicad_pcb.Component(
footprint="Pin_Headers:Pin_Header_Straight_1x03_Pitch2.54mm",
identifier="EXTPWR",
value="ext pwr",
pins=[
Pin(1, "A", ["GND"]),
Pin(2, "B", ["3V3"]),
Pin(3, "C", ["5V"]),
],
)
for n in range(32):
single_staple = myelin_kicad_pcb.Component(
footprint="myelin-kicad:via_single",
identifier="staple_single%d" % (n+1),
value="",
pins=[Pin(1, "GND", ["GND"])],
)
myelin_kicad_pcb.dump_netlist("fx2_tube_cartridge_adapter.net")
|
google/myelin-acorn-electron-hardware
|
fx2_tube_cartridge_adapter/pcb/fx2_tube_cartridge_adapter.py
|
Python
|
apache-2.0
| 18,242
|
[
"Elk"
] |
8e7d724819f7e3a5df281609b6cfd7429f2f52ab12caa90d629f036f97fe5747
|
"""
Test functions for multivariate normal distributions.
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_array_almost_equal, assert_equal,
assert_raises, run_module_suite, TestCase)
from test_continuous_basic import check_distribution_rvs
import numpy
import numpy as np
import scipy.linalg
from scipy.stats._multivariate import _PSD, _lnB
from scipy.stats import multivariate_normal
from scipy.stats import dirichlet, beta
from scipy.stats import wishart, invwishart, chi2, invgamma
from scipy.stats import norm
from scipy.integrate import romb
from common_tests import check_random_state_property
class TestMultivariateNormal(TestCase):
def test_input_shape(self):
mu = np.arange(3)
cov = np.identity(2)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1), mu, cov)
assert_raises(ValueError, multivariate_normal.pdf, (0, 1, 2), mu, cov)
def test_scalar_values(self):
np.random.seed(1234)
# When evaluated on scalar data, the pdf should return a scalar
x, mean, cov = 1.5, 1.7, 2.5
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
# When evaluated on a single vector, the pdf should return a scalar
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5)) # Diagonal values for cov. matrix
pdf = multivariate_normal.pdf(x, mean, cov)
assert_equal(pdf.ndim, 0)
def test_logpdf(self):
# Check that the log of the pdf is in fact the logpdf
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
d1 = multivariate_normal.logpdf(x, mean, cov)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, np.log(d2))
def test_rank(self):
# Check that the rank is detected correctly.
np.random.seed(1234)
n = 4
mean = np.random.randn(n)
for expected_rank in range(1, n + 1):
s = np.random.randn(n, expected_rank)
cov = np.dot(s, s.T)
distn = multivariate_normal(mean, cov, allow_singular=True)
assert_equal(distn.cov_info.rank, expected_rank)
def test_degenerate_distributions(self):
def _sample_orthonormal_matrix(n):
M = np.random.randn(n, n)
u, s, v = scipy.linalg.svd(M)
return u
for n in range(1, 5):
x = np.random.randn(n)
for k in range(1, n + 1):
# Sample a small covariance matrix.
s = np.random.randn(k, k)
cov_kk = np.dot(s, s.T)
# Embed the small covariance matrix into a larger low rank matrix.
cov_nn = np.zeros((n, n))
cov_nn[:k, :k] = cov_kk
# Define a rotation of the larger low rank matrix.
u = _sample_orthonormal_matrix(n)
cov_rr = np.dot(u, np.dot(cov_nn, u.T))
y = np.dot(u, x)
# Check some identities.
distn_kk = multivariate_normal(np.zeros(k), cov_kk,
allow_singular=True)
distn_nn = multivariate_normal(np.zeros(n), cov_nn,
allow_singular=True)
distn_rr = multivariate_normal(np.zeros(n), cov_rr,
allow_singular=True)
assert_equal(distn_kk.cov_info.rank, k)
assert_equal(distn_nn.cov_info.rank, k)
assert_equal(distn_rr.cov_info.rank, k)
pdf_kk = distn_kk.pdf(x[:k])
pdf_nn = distn_nn.pdf(x)
pdf_rr = distn_rr.pdf(y)
assert_allclose(pdf_kk, pdf_nn)
assert_allclose(pdf_kk, pdf_rr)
logpdf_kk = distn_kk.logpdf(x[:k])
logpdf_nn = distn_nn.logpdf(x)
logpdf_rr = distn_rr.logpdf(y)
assert_allclose(logpdf_kk, logpdf_nn)
assert_allclose(logpdf_kk, logpdf_rr)
def test_large_pseudo_determinant(self):
# Check that large pseudo-determinants are handled appropriately.
# Construct a singular diagonal covariance matrix
# whose pseudo determinant overflows double precision.
large_total_log = 1000.0
npos = 100
nzero = 2
large_entry = np.exp(large_total_log / npos)
n = npos + nzero
cov = np.zeros((n, n), dtype=float)
np.fill_diagonal(cov, large_entry)
cov[-nzero:, -nzero:] = 0
# Check some determinants.
assert_equal(scipy.linalg.det(cov), 0)
assert_equal(scipy.linalg.det(cov[:npos, :npos]), np.inf)
assert_allclose(np.linalg.slogdet(cov[:npos, :npos]),
(1, large_total_log))
# Check the pseudo-determinant.
psd = _PSD(cov)
assert_allclose(psd.log_pdet, large_total_log)
def test_broadcasting(self):
np.random.seed(1234)
n = 4
# Construct a random covariance matrix.
data = np.random.randn(n, n)
cov = np.dot(data, data.T)
mean = np.random.randn(n)
# Construct an ndarray which can be interpreted as
# a 2x3 array whose elements are random data vectors.
X = np.random.randn(2, 3, n)
# Check that multiple data points can be evaluated at once.
for i in range(2):
for j in range(3):
actual = multivariate_normal.pdf(X[i, j], mean, cov)
desired = multivariate_normal.pdf(X, mean, cov)[i, j]
assert_allclose(actual, desired)
def test_normal_1D(self):
# The probability density function for a 1D normal variable should
# agree with the standard normal distribution in scipy.stats.distributions
x = np.linspace(0, 2, 10)
mean, cov = 1.2, 0.9
scale = cov**0.5
d1 = norm.pdf(x, mean, scale)
d2 = multivariate_normal.pdf(x, mean, cov)
assert_allclose(d1, d2)
def test_marginalization(self):
# Integrating out one of the variables of a 2D Gaussian should
# yield a 1D Gaussian
mean = np.array([2.5, 3.5])
cov = np.array([[.5, 0.2], [0.2, .6]])
n = 2 ** 8 + 1 # Number of samples
delta = 6 / (n - 1) # Grid spacing
v = np.linspace(0, 6, n)
xv, yv = np.meshgrid(v, v)
pos = np.empty((n, n, 2))
pos[:, :, 0] = xv
pos[:, :, 1] = yv
pdf = multivariate_normal.pdf(pos, mean, cov)
# Marginalize over x and y axis
margin_x = romb(pdf, delta, axis=0)
margin_y = romb(pdf, delta, axis=1)
# Compare with standard normal distribution
gauss_x = norm.pdf(v, loc=mean[0], scale=cov[0, 0] ** 0.5)
gauss_y = norm.pdf(v, loc=mean[1], scale=cov[1, 1] ** 0.5)
assert_allclose(margin_x, gauss_x, rtol=1e-2, atol=1e-2)
assert_allclose(margin_y, gauss_y, rtol=1e-2, atol=1e-2)
def test_frozen(self):
# The frozen distribution should agree with the regular one
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.abs(np.random.randn(5))
norm_frozen = multivariate_normal(mean, cov)
assert_allclose(norm_frozen.pdf(x), multivariate_normal.pdf(x, mean, cov))
assert_allclose(norm_frozen.logpdf(x),
multivariate_normal.logpdf(x, mean, cov))
def test_pseudodet_pinv(self):
# Make sure that pseudo-inverse and pseudo-det agree on cutoff
# Assemble random covariance matrix with large and small eigenvalues
np.random.seed(1234)
n = 7
x = np.random.randn(n, n)
cov = np.dot(x, x.T)
s, u = scipy.linalg.eigh(cov)
s = 0.5 * np.ones(n)
s[0] = 1.0
s[-1] = 1e-7
cov = np.dot(u, np.dot(np.diag(s), u.T))
# Set cond so that the lowest eigenvalue is below the cutoff
cond = 1e-5
psd = _PSD(cov, cond=cond)
psd_pinv = _PSD(psd.pinv, cond=cond)
# Check that the log pseudo-determinant agrees with the sum
# of the logs of all but the smallest eigenvalue
assert_allclose(psd.log_pdet, np.sum(np.log(s[:-1])))
# Check that the pseudo-determinant of the pseudo-inverse
# agrees with 1 / pseudo-determinant
assert_allclose(-psd.log_pdet, psd_pinv.log_pdet)
def test_exception_nonsquare_cov(self):
cov = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, _PSD, cov)
def test_exception_nonfinite_cov(self):
cov_nan = [[1, 0], [0, np.nan]]
assert_raises(ValueError, _PSD, cov_nan)
cov_inf = [[1, 0], [0, np.inf]]
assert_raises(ValueError, _PSD, cov_inf)
def test_exception_non_psd_cov(self):
cov = [[1, 0], [0, -1]]
assert_raises(ValueError, _PSD, cov)
def test_exception_singular_cov(self):
np.random.seed(1234)
x = np.random.randn(5)
mean = np.random.randn(5)
cov = np.ones((5, 5))
e = np.linalg.LinAlgError
assert_raises(e, multivariate_normal, mean, cov)
assert_raises(e, multivariate_normal.pdf, x, mean, cov)
assert_raises(e, multivariate_normal.logpdf, x, mean, cov)
def test_R_values(self):
# Compare the multivariate pdf with some values precomputed
# in R version 3.0.1 (2013-05-16) on Mac OS X 10.6.
# The values below were generated by the following R-script:
# > library(mnormt)
# > x <- seq(0, 2, length=5)
# > y <- 3*x - 2
# > z <- x + cos(y)
# > mu <- c(1, 3, 2)
# > Sigma <- matrix(c(1,2,0,2,5,0.5,0,0.5,3), 3, 3)
# > r_pdf <- dmnorm(cbind(x,y,z), mu, Sigma)
r_pdf = np.array([0.0002214706, 0.0013819953, 0.0049138692,
0.0103803050, 0.0140250800])
x = np.linspace(0, 2, 5)
y = 3 * x - 2
z = x + np.cos(y)
r = np.array([x, y, z]).T
mean = np.array([1, 3, 2], 'd')
cov = np.array([[1, 2, 0], [2, 5, .5], [0, .5, 3]], 'd')
pdf = multivariate_normal.pdf(r, mean, cov)
assert_allclose(pdf, r_pdf, atol=1e-10)
def test_multivariate_normal_rvs_zero_covariance(self):
mean = np.zeros(2)
covariance = np.zeros((2, 2))
model = multivariate_normal(mean, covariance, allow_singular=True)
sample = model.rvs()
assert_equal(sample, [0, 0])
def test_rvs_shape(self):
# Check that rvs parses the mean and covariance correctly, and returns
# an array of the right shape
N = 300
d = 4
sample = multivariate_normal.rvs(mean=np.zeros(d), cov=1, size=N)
assert_equal(sample.shape, (N, d))
sample = multivariate_normal.rvs(mean=None,
cov=np.array([[2, .1], [.1, 1]]),
size=N)
assert_equal(sample.shape, (N, 2))
u = multivariate_normal(mean=0, cov=1)
sample = u.rvs(N)
assert_equal(sample.shape, (N, ))
def test_large_sample(self):
# Generate large sample and compare sample mean and sample covariance
# with mean and covariance matrix.
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
size = 5000
sample = multivariate_normal.rvs(mean, cov, size)
assert_allclose(numpy.cov(sample.T), cov, rtol=1e-1)
assert_allclose(sample.mean(0), mean, rtol=1e-1)
def test_entropy(self):
np.random.seed(2846)
n = 3
mean = np.random.randn(n)
M = np.random.randn(n, n)
cov = np.dot(M, M.T)
rv = multivariate_normal(mean, cov)
# Check that frozen distribution agrees with entropy function
assert_almost_equal(rv.entropy(), multivariate_normal.entropy(mean, cov))
# Compare entropy with manually computed expression involving
# the sum of the logs of the eigenvalues of the covariance matrix
eigs = np.linalg.eig(cov)[0]
desired = 1 / 2 * (n * (np.log(2 * np.pi) + 1) + np.sum(np.log(eigs)))
assert_almost_equal(desired, rv.entropy())
def test_lnB(self):
alpha = np.array([1, 1, 1])
desired = .5 # e^lnB = 1/2 for [1, 1, 1]
assert_almost_equal(np.exp(_lnB(alpha)), desired)
class TestDirichlet(TestCase):
def test_frozen_dirichlet(self):
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
assert_equal(d.var(), dirichlet.var(alpha))
assert_equal(d.mean(), dirichlet.mean(alpha))
assert_equal(d.entropy(), dirichlet.entropy(alpha))
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_equal(d.pdf(x[:-1]), dirichlet.pdf(x[:-1], alpha))
assert_equal(d.logpdf(x[:-1]), dirichlet.logpdf(x[:-1], alpha))
def test_numpy_rvs_shape_compatibility(self):
np.random.seed(2846)
alpha = np.array([1.0, 2.0, 3.0])
x = np.random.dirichlet(alpha, size=7)
assert_equal(x.shape, (7, 3))
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
dirichlet.pdf(x.T, alpha)
dirichlet.pdf(x.T[:-1], alpha)
dirichlet.logpdf(x.T, alpha)
dirichlet.logpdf(x.T[:-1], alpha)
def test_alpha_with_zeros(self):
np.random.seed(2846)
alpha = [1.0, 0.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_with_negative_entries(self):
np.random.seed(2846)
alpha = [1.0, -2.0, 3.0]
x = np.random.dirichlet(alpha, size=7).T
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_zeros(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 0.0, 0.2, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_negative_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, -0.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_with_too_large_entries(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.array([0.1, 1.1, 0.3, 0.7])
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_too_deep_c(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((2, 7, 7)) / 14
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_too_deep(self):
alpha = np.array([[1.0, 2.0], [3.0, 4.0]])
x = np.ones((2, 2, 7)) / 4
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_alpha_correct_depth(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 3
dirichlet.pdf(x, alpha)
dirichlet.logpdf(x, alpha)
def test_non_simplex_data(self):
alpha = np.array([1.0, 2.0, 3.0])
x = np.ones((3, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_short(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((2, 7)) / 2
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_data_vector_too_long(self):
alpha = np.array([1.0, 2.0, 3.0, 4.0])
x = np.ones((5, 7)) / 5
assert_raises(ValueError, dirichlet.pdf, x, alpha)
assert_raises(ValueError, dirichlet.logpdf, x, alpha)
def test_simple_values(self):
alpha = np.array([1, 1])
d = dirichlet(alpha)
assert_almost_equal(d.mean(), 0.5)
assert_almost_equal(d.var(), 1. / 12.)
b = beta(1, 1)
assert_almost_equal(d.mean(), b.mean())
assert_almost_equal(d.var(), b.var())
def test_K_and_K_minus_1_calls_equal(self):
# Test that calls with K and K-1 entries yield the same results.
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
assert_almost_equal(d.pdf(x[:-1]), d.pdf(x))
def test_multiple_entry_calls(self):
# Test that calls with multiple x vectors as matrix work
np.random.seed(2846)
n = np.random.randint(1, 32)
alpha = np.random.uniform(10e-10, 100, n)
d = dirichlet(alpha)
num_tests = 10
num_multiple = 5
xm = None
for i in range(num_tests):
for m in range(num_multiple):
x = np.random.uniform(10e-10, 100, n)
x /= np.sum(x)
if xm is not None:
xm = np.vstack((xm, x))
else:
xm = x
rm = d.pdf(xm.T)
rs = None
for xs in xm:
r = d.pdf(xs)
if rs is not None:
rs = np.append(rs, r)
else:
rs = r
assert_array_almost_equal(rm, rs)
def test_2D_dirichlet_is_beta(self):
np.random.seed(2846)
alpha = np.random.uniform(10e-10, 100, 2)
d = dirichlet(alpha)
b = beta(alpha[0], alpha[1])
num_tests = 10
for i in range(num_tests):
x = np.random.uniform(10e-10, 100, 2)
x /= np.sum(x)
assert_almost_equal(b.pdf(x), d.pdf([x]))
assert_almost_equal(b.mean(), d.mean()[0])
assert_almost_equal(b.var(), d.var()[0])
def test_multivariate_normal_dimensions_mismatch():
# Regression test for GH #3493. Check that setting up a PDF with a mean of
# length M and a covariance matrix of size (N, N), where M != N, raises a
# ValueError with an informative error message.
mu = np.array([0.0, 0.0])
sigma = np.array([[1.0]])
assert_raises(ValueError, multivariate_normal, mu, sigma)
# A simple check that the right error message was passed along. Checking
# that the entire message is there, word for word, would be somewhat
# fragile, so we just check for the leading part.
try:
multivariate_normal(mu, sigma)
except ValueError as e:
msg = "Dimension mismatch"
assert_equal(str(e)[:len(msg)], msg)
class TestWishart(TestCase):
def test_scale_dimensions(self):
# Test that we can call the Wishart with various scale dimensions
# Test case: dim=1, scale=1
true_scale = np.array(1, ndmin=2)
scales = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2) # 2-dim
]
for scale in scales:
w = wishart(1, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# Test case: dim=2, scale=[[1,0]
# [0,2]
true_scale = np.array([[1,0],
[0,2]])
scales = [
[1,2], # iterable
np.r_[1,2], # 1-dim
np.array([[1,0], # 2-dim
[0,2]])
]
for scale in scales:
w = wishart(2, scale)
assert_equal(w.scale, true_scale)
assert_equal(w.scale.shape, true_scale.shape)
# We cannot call with a df < dim
assert_raises(ValueError, wishart, 1, np.eye(2))
# We cannot call with a 3-dimension array
scale = np.array(1, ndmin=3)
assert_raises(ValueError, wishart, 1, scale)
def test_quantile_dimensions(self):
# Test that we can call the Wishart rvs with various quantile dimensions
# If dim == 1, consider x.shape = [1,1,1]
X = [
1, # scalar
[1], # iterable
np.array(1), # 0-dim
np.r_[1], # 1-dim
np.array(1, ndmin=2), # 2-dim
np.array([1], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array(1, ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 1, consider x.shape = [1,1,*]
X = [
[1,2,3], # iterable
np.r_[1,2,3], # 1-dim
np.array([1,2,3], ndmin=3) # 3-dim
]
w = wishart(1,1)
density = w.pdf(np.array([1,2,3], ndmin=3))
for x in X:
assert_equal(w.pdf(x), density)
# If dim == 2, consider x.shape = [2,2,1]
# where x[:,:,*] = np.eye(1)*2
X = [
2, # scalar
[2,2], # iterable
np.array(2), # 0-dim
np.r_[2,2], # 1-dim
np.array([[2,0],
[0,2]]), # 2-dim
np.array([[2,0],
[0,2]])[:,:,np.newaxis] # 3-dim
]
w = wishart(2,np.eye(2))
density = w.pdf(np.array([[2,0],
[0,2]])[:,:,np.newaxis])
for x in X:
assert_equal(w.pdf(x), density)
def test_frozen(self):
# Test that the frozen and non-frozen Wishart gives the same answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim * (dim-1) // 2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
w = wishart(df, scale)
assert_equal(w.var(), wishart.var(df, scale))
assert_equal(w.mean(), wishart.mean(df, scale))
assert_equal(w.mode(), wishart.mode(df, scale))
assert_equal(w.entropy(), wishart.entropy(df, scale))
assert_equal(w.pdf(x), wishart.pdf(x, df, scale))
def test_1D_is_chisquared(self):
# The 1-dimensional Wishart with an identity scale matrix is just a
# chi-squared distribution.
# Test variance, mean, entropy, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(1, 10, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
w = wishart(df, scale)
c = chi2(df)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
def test_is_scaled_chisquared(self):
# The 2-dimensional Wishart with an arbitrary scale matrix can be
# transformed to a scaled chi-squared distribution.
# For :math:`S \sim W_p(V,n)` and :math:`\lambda \in \mathbb{R}^p` we have
# :math:`\lambda' S \lambda \sim \lambda' V \lambda \times \chi^2(n)`
np.random.seed(482974)
sn = 500
df = 10
dim = 4
# Construct an arbitrary positive definite matrix
scale = np.diag(np.arange(4)+1)
scale[np.tril_indices(4, k=-1)] = np.arange(6)
scale = np.dot(scale.T, scale)
# Use :math:`\lambda = [1, \dots, 1]'`
lamda = np.ones((dim,1))
sigma_lamda = lamda.T.dot(scale).dot(lamda).squeeze()
w = wishart(df, sigma_lamda)
c = chi2(df, scale=sigma_lamda)
# Statistics
assert_allclose(w.var(), c.var())
assert_allclose(w.mean(), c.mean())
assert_allclose(w.entropy(), c.entropy())
# PDF
X = np.linspace(0.1,10,num=10)
assert_allclose(w.pdf(X), c.pdf(X))
# rvs
rvs = w.rvs(size=sn)
args = (df,0,sigma_lamda)
alpha = 0.01
check_distribution_rvs('chi2', args, alpha, rvs)
class TestInvwishart(TestCase):
def test_frozen(self):
# Test that the frozen and non-frozen inverse Wishart gives the same
# answers
# Construct an arbitrary positive definite scale matrix
dim = 4
scale = np.diag(np.arange(dim)+1)
scale[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
scale = np.dot(scale.T, scale)
# Construct a collection of positive definite matrices to test the PDF
X = []
for i in range(5):
x = np.diag(np.arange(dim)+(i+1)**2)
x[np.tril_indices(dim, k=-1)] = np.arange(dim*(dim-1)/2)
x = np.dot(x.T, x)
X.append(x)
X = np.array(X).T
# Construct a 1D and 2D set of parameters
parameters = [
(10, 1, np.linspace(0.1, 10, 5)), # 1D case
(10, scale, X)
]
for (df, scale, x) in parameters:
iw = invwishart(df, scale)
assert_equal(iw.var(), invwishart.var(df, scale))
assert_equal(iw.mean(), invwishart.mean(df, scale))
assert_equal(iw.mode(), invwishart.mode(df, scale))
assert_allclose(iw.pdf(x), invwishart.pdf(x, df, scale))
def test_1D_is_invgamma(self):
# The 1-dimensional inverse Wishart with an identity scale matrix is
# just an inverse gamma distribution.
# Test variance, mean, pdf
# Kolgomorov-Smirnov test for rvs
np.random.seed(482974)
sn = 500
dim = 1
scale = np.eye(dim)
df_range = np.arange(5, 20, 2, dtype=float)
X = np.linspace(0.1,10,num=10)
for df in df_range:
iw = invwishart(df, scale)
ig = invgamma(df/2, scale=1./2)
# Statistics
assert_allclose(iw.var(), ig.var())
assert_allclose(iw.mean(), ig.mean())
# PDF
assert_allclose(iw.pdf(X), ig.pdf(X))
# rvs
rvs = iw.rvs(size=sn)
args = (df/2, 0, 1./2)
alpha = 0.01
check_distribution_rvs('invgamma', args, alpha, rvs)
def test_wishart_invwishart_2D_rvs(self):
dim = 3
df = 10
# Construct a simple non-diagonal positive definite matrix
scale = np.eye(dim)
scale[0,1] = 0.5
scale[1,0] = 0.5
# Construct frozen Wishart and inverse Wishart random variables
w = wishart(df, scale)
iw = invwishart(df, scale)
# Get the generated random variables from a known seed
np.random.seed(248042)
w_rvs = wishart.rvs(df, scale)
np.random.seed(248042)
frozen_w_rvs = w.rvs()
np.random.seed(248042)
iw_rvs = invwishart.rvs(df, scale)
np.random.seed(248042)
frozen_iw_rvs = iw.rvs()
# Manually calculate what it should be, based on the Bartlett (1933)
# decomposition of a Wishart into D A A' D', where D is the Cholesky
# factorization of the scale matrix and A is the lower triangular matrix
# with the square root of chi^2 variates on the diagonal and N(0,1)
# variates in the lower triangle.
np.random.seed(248042)
covariances = np.random.normal(size=3)
variances = np.r_[
np.random.chisquare(df),
np.random.chisquare(df-1),
np.random.chisquare(df-2),
]**0.5
# Construct the lower-triangular A matrix
A = np.diag(variances)
A[np.tril_indices(dim, k=-1)] = covariances
# Wishart random variate
D = np.linalg.cholesky(scale)
DA = D.dot(A)
manual_w_rvs = np.dot(DA, DA.T)
# inverse Wishart random variate
# Supposing that the inverse wishart has scale matrix `scale`, then the
# random variate is the inverse of a random variate drawn from a Wishart
# distribution with scale matrix `inv_scale = np.linalg.inv(scale)`
iD = np.linalg.cholesky(np.linalg.inv(scale))
iDA = iD.dot(A)
manual_iw_rvs = np.linalg.inv(np.dot(iDA, iDA.T))
# Test for equality
assert_allclose(w_rvs, manual_w_rvs)
assert_allclose(frozen_w_rvs, manual_w_rvs)
assert_allclose(iw_rvs, manual_iw_rvs)
assert_allclose(frozen_iw_rvs, manual_iw_rvs)
def test_random_state_property():
scale = np.eye(3)
scale[0,1] = 0.5
scale[1,0] = 0.5
dists = [
[multivariate_normal, ()],
[dirichlet, (np.array([1.]), )],
[wishart, (10, scale)],
[invwishart, (10, scale)]
]
for distfn, args in dists:
check_random_state_property(distfn, args)
if __name__ == "__main__":
run_module_suite()
|
ales-erjavec/scipy
|
scipy/stats/tests/test_multivariate.py
|
Python
|
bsd-3-clause
| 30,527
|
[
"Gaussian"
] |
10506ab6d578bee9f6d2b35f504e95892c3e5d0e39a7d2898788cdece96c6ff3
|
"""
Calculate driving_force due to ZPF tielines.
The general approach is similar to the PanOptimizer rough search method.
1. With all phases active, calculate the chemical potentials of the tieline
endpoints via ``equilibrium`` calls. Done in ``estimate_hyperplane``.
2. Calculate the target chemical potentials, which are the average chemical
potentials of all of the current chemical potentials at the tieline endpoints.
3. Calculate the current chemical potentials of the desired single phases
4. The error is the difference between these chemical potentials
There's some special handling for tieline endpoints where we do not know the
composition conditions to calculate chemical potentials at.
"""
import logging
from dataclasses import dataclass
from collections import OrderedDict
from typing import Sequence, Dict, Any, Union, List, Tuple, Type, Optional
import numpy as np
from numpy.typing import ArrayLike
from scipy.stats import norm
import tinydb
from pycalphad import Database, Model, variables as v
from pycalphad.codegen.callables import build_phase_records
from pycalphad.core.utils import instantiate_models, filter_phases, unpack_components
from pycalphad.core.phase_rec import PhaseRecord
from espei.utils import PickleableTinyDB
from espei.shadow_functions import equilibrium_, calculate_, no_op_equilibrium_, update_phase_record_parameters, constrained_equilibrium
from pycalphad.core.calculate import _sample_phase_constitution
from pycalphad.core.utils import point_sample
_log = logging.getLogger(__name__)
@dataclass
class RegionVertex:
phase_name: str
composition: ArrayLike # 1D of size (number nonvacant pure elements)
comp_conds: Dict[v.X, float]
points: ArrayLike
phase_records: Dict[str, PhaseRecord]
is_disordered: bool
has_missing_comp_cond: bool
@dataclass
class PhaseRegion:
vertices: Sequence[RegionVertex]
potential_conds: Dict[v.StateVariable, float]
species: Sequence[v.Species]
phases: Sequence[str]
models: Dict[str, Model]
def eq_str(self):
phase_compositions = ', '.join(f'{vtx.phase_name}: {vtx.comp_conds}' for vtx in self.vertices)
return f"conds: ({self.potential_conds}), comps: ({phase_compositions})"
def _extract_pot_conds(all_conditions: Dict[v.StateVariable, np.ndarray], idx: int) -> Dict[v.StateVariable, float]:
"""Conditions are either scalar or 1d arrays for the conditions in the entire dataset.
This function extracts the condition corresponding to the current region,
based on the index in the 1d condition array.
"""
pot_conds = {} # e.g. v.P, v.T
for cond_key, cond_val in all_conditions.items():
cond_val = np.atleast_1d(np.asarray(cond_val))
# If the conditions is an array, we want the corresponding value
# Otherwise treat it as a scalar
if len(cond_val) > 1:
cond_val = cond_val[idx]
pot_conds[getattr(v, cond_key)] = float(cond_val)
return pot_conds
def _extract_phases_comps(vertex):
"""Extract the phase name, phase compositions and disordered flag from a vertex
"""
if len(vertex) == 4: # phase_flag within
phase_name, components, compositions, flag = vertex
if flag == "disordered":
disordered_flag = True
else:
disordered_flag = False
elif len(vertex) == 3: # no phase_flag within
phase_name, components, compositions = vertex
disordered_flag = False
else:
raise ValueError("Wrong number of data in tie-line point")
comp_conds = dict(zip(map(v.X, map(str.upper, components)), compositions))
return phase_name, comp_conds, disordered_flag
def _phase_is_stoichiometric(mod):
return all(len(subl) == 1 for subl in mod.constituents)
def _compute_vertex_composition(comps: Sequence[str], comp_conds: Dict[str, float]):
"""Compute the overall composition in a vertex assuming an N=1 normalization condition"""
pure_elements = sorted(c for c in comps if c != 'VA')
vertex_composition = np.empty(len(pure_elements), dtype=np.float_)
unknown_indices = []
for idx, el in enumerate(pure_elements):
amt = comp_conds.get(v.X(el), None)
if amt is None:
unknown_indices.append(idx)
vertex_composition[idx] = np.nan
else:
vertex_composition[idx] = amt
if len(unknown_indices) == 1:
# Determine the dependent component by mass balance
vertex_composition[unknown_indices[0]] = 1 - np.nansum(vertex_composition)
return vertex_composition
def _subsample_phase_points(phase_record, phase_points, target_composition, additional_distance_radius=0.02):
# Compute the mole fractions of each point
phase_compositions = np.zeros((phase_points.shape[0], target_composition.size), order='F')
# TODO: potential bug here if the composition has dependence (even piecewise
# dependence) in the state variables. The compositions may be nan in this case.
statevar_placeholder = np.ones((phase_points.shape[0], phase_record.num_statevars))
dof = np.hstack((statevar_placeholder, phase_points))
for el_idx in range(target_composition.size):
phase_record.mass_obj_2d(phase_compositions[:, el_idx], dof, el_idx)
# Find the points indicdes where the mass is within the radius of minimum distance + additional_distance_radius
distances = np.mean(np.abs(phase_compositions - target_composition), axis=1)
idxs = np.nonzero(distances < (distances.min() + additional_distance_radius))[0]
# Return the sub-space of points where this condition holds valid
return phase_points[idxs]
def get_zpf_data(dbf: Database, comps: Sequence[str], phases: Sequence[str], datasets: PickleableTinyDB, parameters: Dict[str, float], model: Optional[Dict[str, Type[Model]]] = None):
"""
Return the ZPF data used in the calculation of ZPF error
Parameters
----------
comps : list
List of active component names
phases : list
List of phases to consider
datasets : espei.utils.PickleableTinyDB
Datasets that contain single phase data
parameters : dict
Dictionary mapping symbols to optimize to their initial values
model : Optional[Dict[str, Type[Model]]]
Dictionary phase names to pycalphad Model classes.
Returns
-------
list
List of data dictionaries with keys ``weight``, ``phase_regions`` and ``dataset_references``.
"""
desired_data = datasets.search((tinydb.where('output') == 'ZPF') &
(tinydb.where('components').test(lambda x: set(x).issubset(comps))) &
(tinydb.where('phases').test(lambda x: len(set(phases).intersection(x)) > 0)))
zpf_data = [] # 1:1 correspondence with each dataset
for data in desired_data:
data_comps = list(set(data['components']).union({'VA'}))
species = sorted(unpack_components(dbf, data_comps), key=str)
data_phases = filter_phases(dbf, species, candidate_phases=phases)
models = instantiate_models(dbf, species, data_phases, model=model, parameters=parameters)
# assumed N, P, T state variables
phase_recs = build_phase_records(dbf, species, data_phases, {v.N, v.P, v.T}, models, parameters=parameters, build_gradients=True, build_hessians=True)
all_phase_points = {phase_name: _sample_phase_constitution(models[phase_name], point_sample, True, 50) for phase_name in data_phases}
all_regions = data['values']
conditions = data['conditions']
phase_regions = []
# Each phase_region is one set of phases in equilibrium (on a tie-line),
# e.g. [["ALPHA", ["B"], [0.25]], ["BETA", ["B"], [0.5]]]
for idx, phase_region in enumerate(all_regions):
# Extract the conditions for entire phase region
pot_conds = _extract_pot_conds(conditions, idx)
pot_conds.setdefault(v.N, 1.0) # Add v.N condition, if missing
# Extract all the phases and compositions from the tie-line points
vertices = []
for vertex in phase_region:
phase_name, comp_conds, disordered_flag = _extract_phases_comps(vertex)
# Construct single-phase points satisfying the conditions for each phase in the region
mod = models[phase_name]
composition = _compute_vertex_composition(data_comps, comp_conds)
if np.any(np.isnan(composition)):
# We can't construct points because we don't have a known composition
has_missing_comp_cond = True
phase_points = None
elif _phase_is_stoichiometric(mod):
has_missing_comp_cond = False
phase_points = None
else:
has_missing_comp_cond = False
# Only sample points that have an average mass residual within tol
tol = 0.02
phase_points = _subsample_phase_points(phase_recs[phase_name], all_phase_points[phase_name], composition, tol)
assert phase_points.shape[0] > 0, f"phase {phase_name} must have at least one set of points within the target tolerance {pot_conds} {comp_conds}"
vtx = RegionVertex(phase_name, composition, comp_conds, phase_points, phase_recs, disordered_flag, has_missing_comp_cond)
vertices.append(vtx)
region = PhaseRegion(vertices, pot_conds, species, data_phases, models)
phase_regions.append(region)
data_dict = {
'weight': data.get('weight', 1.0),
'phase_regions': phase_regions,
'dataset_reference': data['reference']
}
zpf_data.append(data_dict)
return zpf_data
def estimate_hyperplane(phase_region: PhaseRegion, parameters: np.ndarray, approximate_equilibrium: bool = False) -> np.ndarray:
"""
Calculate the chemical potentials for the target hyperplane, one vertex at a time
Notes
-----
This takes just *one* set of phase equilibria, a phase region, e.g. a dataset point of
[['FCC_A1', ['CU'], [0.1]], ['LAVES_C15', ['CU'], [0.3]]]
and calculates the chemical potentials given all the phases possible at the
given compositions. Then the average chemical potentials of each end point
are taken as the target hyperplane for the given equilibria.
"""
if approximate_equilibrium:
_equilibrium = no_op_equilibrium_
else:
_equilibrium = equilibrium_
target_hyperplane_chempots = []
target_hyperplane_phases = []
species = phase_region.species
phases = phase_region.phases
models = phase_region.models
for vertex in phase_region.vertices:
phase_records = vertex.phase_records
update_phase_record_parameters(phase_records, parameters)
cond_dict = {**vertex.comp_conds, **phase_region.potential_conds}
if vertex.has_missing_comp_cond:
# This composition is unknown -- it doesn't contribute to hyperplane estimation
pass
else:
# Extract chemical potential hyperplane from multi-phase calculation
# Note that we consider all phases in the system, not just ones in this tie region
str_statevar_dict = OrderedDict([(str(key), cond_dict[key]) for key in sorted(phase_region.potential_conds.keys(), key=str)])
grid = calculate_(species, phases, str_statevar_dict, models, phase_records, pdens=50, fake_points=True)
multi_eqdata = _equilibrium(phase_records, cond_dict, grid)
target_hyperplane_phases.append(multi_eqdata.Phase.squeeze())
# Does there exist only a single phase in the result with zero internal degrees of freedom?
# We should exclude those chemical potentials from the average because they are meaningless.
num_phases = np.sum(multi_eqdata.Phase.squeeze() != '')
Y_values = multi_eqdata.Y.squeeze()
no_internal_dof = np.all((np.isclose(Y_values, 1.)) | np.isnan(Y_values))
MU_values = multi_eqdata.MU.squeeze()
if (num_phases == 1) and no_internal_dof:
target_hyperplane_chempots.append(np.full_like(MU_values, np.nan))
else:
target_hyperplane_chempots.append(MU_values)
target_hyperplane_mean_chempots = np.nanmean(target_hyperplane_chempots, axis=0, dtype=np.float_)
return target_hyperplane_mean_chempots
def driving_force_to_hyperplane(target_hyperplane_chempots: np.ndarray,
phase_region: PhaseRegion, vertex: RegionVertex,
parameters: np.ndarray, approximate_equilibrium: bool = False) -> float:
"""Calculate the integrated driving force between the current hyperplane and target hyperplane.
"""
species = phase_region.species
models = phase_region.models
current_phase = vertex.phase_name
cond_dict = {**phase_region.potential_conds, **vertex.comp_conds}
str_statevar_dict = OrderedDict([(str(key),cond_dict[key]) for key in sorted(phase_region.potential_conds.keys(), key=str)])
phase_points = vertex.points
phase_records = vertex.phase_records
update_phase_record_parameters(phase_records, parameters)
if phase_points is None:
# We don't have the phase composition here, so we estimate the driving force.
# Can happen if one of the composition conditions is unknown or if the phase is
# stoichiometric and the user did not specify a valid phase composition.
single_eqdata = calculate_(species, [current_phase], str_statevar_dict, models, phase_records, pdens=50)
df = np.multiply(target_hyperplane_chempots, single_eqdata.X).sum(axis=-1) - single_eqdata.GM
driving_force = float(df.max())
elif vertex.is_disordered:
# Construct disordered sublattice configuration from composition dict
# Compute energy
# Compute residual driving force
# TODO: Check that it actually makes sense to declare this phase 'disordered'
num_dof = sum([len(subl) for subl in models[current_phase].constituents])
desired_sitefracs = np.ones(num_dof, dtype=np.float_)
dof_idx = 0
for subl in models[current_phase].constituents:
dof = sorted(subl, key=str)
num_subl_dof = len(subl)
if v.Species("VA") in dof:
if num_subl_dof == 1:
_log.debug('Cannot predict the site fraction of vacancies in the disordered configuration %s of %s. Returning driving force of zero.', subl, current_phase)
return 0
else:
sitefracs_to_add = [1.0]
else:
sitefracs_to_add = np.array([cond_dict.get(v.X(d)) for d in dof], dtype=np.float_)
# Fix composition of dependent component
sitefracs_to_add[np.isnan(sitefracs_to_add)] = 1 - np.nansum(sitefracs_to_add)
desired_sitefracs[dof_idx:dof_idx + num_subl_dof] = sitefracs_to_add
dof_idx += num_subl_dof
single_eqdata = calculate_(species, [current_phase], str_statevar_dict, models, phase_records, points=np.asarray([desired_sitefracs]))
driving_force = np.multiply(target_hyperplane_chempots, single_eqdata.X).sum(axis=-1) - single_eqdata.GM
driving_force = float(np.squeeze(driving_force))
else:
# Extract energies from single-phase calculations
grid = calculate_(species, [current_phase], str_statevar_dict, models, phase_records, points=phase_points, pdens=50, fake_points=True)
# TODO: consider enabling approximate for this?
converged, energy = constrained_equilibrium(phase_records, cond_dict, grid)
if not converged:
_log.debug('Calculation failure: constrained equilibrium not converged for %s, conditions: %s, parameters %s', current_phase, cond_dict, parameters)
return np.inf
driving_force = float(np.dot(target_hyperplane_chempots, vertex.composition) - float(energy))
return driving_force
def calculate_zpf_driving_forces(zpf_data: Sequence[Dict[str, Any]],
parameters: ArrayLike = None,
approximate_equilibrium: bool = False,
short_circuit: bool = False
) -> Tuple[List[List[float]], List[List[float]]]:
"""
Calculate error due to phase equilibria data
zpf_data : Sequence[Dict[str, Any]]
Datasets that contain single phase data
parameters : ArrayLike
Array of parameters to calculate the error with.
approximate_equilibrium : bool
Whether or not to use an approximate version of equilibrium that does
not refine the solution and uses ``starting_point`` instead.
short_circuit: bool
If True, immediately return a size 1 array with a driving force of
``np.nan`` (failed hyperplane) or ``np.inf`` (failed driving force).
Can save computational time if the caller will aggregate driving forces.
Returns
-------
Tuple[List[List[float]], List[List[float]]]
Driving forces and weights as ragged 2D arrays with shape
``(len(zpf_data), len(vertices in each zpf_data))``
Notes
-----
The physical picture of the standard deviation is that we've measured a ZPF
line. That line corresponds to some equilibrium chemical potentials. The
standard deviation is the standard deviation of those 'measured' chemical
potentials.
"""
if parameters is None:
parameters = np.array([])
driving_forces = []
weights = []
for data in zpf_data:
data_driving_forces = []
data_weights = []
weight = data['weight']
dataset_ref = data['dataset_reference']
# for the set of phases and corresponding tie-line verticies in equilibrium
for phase_region in data['phase_regions']:
# 1. Calculate the average multiphase hyperplane
eq_str = phase_region.eq_str()
target_hyperplane = estimate_hyperplane(phase_region, parameters, approximate_equilibrium=approximate_equilibrium)
if np.any(np.isnan(target_hyperplane)):
_log.debug('NaN target hyperplane. Equilibria: (%s), driving force: 0.0, reference: %s.', eq_str, dataset_ref)
data_driving_forces.extend([0]*len(phase_region.vertices))
data_weights.extend([weight]*len(phase_region.vertices))
continue
# 2. Calculate the driving force to that hyperplane for each vertex
for vertex in phase_region.vertices:
driving_force = driving_force_to_hyperplane(target_hyperplane, phase_region, vertex, parameters,
approximate_equilibrium=approximate_equilibrium,
)
if np.isinf(driving_force) and short_circuit:
_log.debug('Equilibria: (%s), current phase: %s, hyperplane: %s, driving force: %s, reference: %s. Short circuiting.', eq_str, vertex.phase_name, target_hyperplane, driving_force, dataset_ref)
return [[np.inf]], [[np.inf]]
data_driving_forces.append(driving_force)
data_weights.append(weight)
_log.debug('Equilibria: (%s), current phase: %s, hyperplane: %s, driving force: %s, reference: %s', eq_str, vertex.phase_name, target_hyperplane, driving_force, dataset_ref)
driving_forces.append(data_driving_forces)
weights.append(data_weights)
return driving_forces, weights
def calculate_zpf_error(zpf_data: Sequence[Dict[str, Any]],
parameters: np.ndarray = None,
data_weight: int = 1.0,
approximate_equilibrium: bool = False) -> float:
"""
Calculate the likelihood due to phase equilibria data.
For detailed documentation, see ``calculate_zpf_driving_forces``
Returns
-------
float
Log probability of ZPF driving forces
"""
if len(zpf_data) == 0:
return 0.0
driving_forces, weights = calculate_zpf_driving_forces(zpf_data, parameters, approximate_equilibrium, short_circuit=True)
# Driving forces and weights are 2D ragged arrays with the shape (len(zpf_data), len(zpf_data['values']))
driving_forces = np.concatenate(driving_forces)
weights = np.concatenate(weights)
if np.any(np.logical_or(np.isinf(driving_forces), np.isnan(driving_forces))):
return -np.inf
log_probabilites = norm.logpdf(driving_forces, loc=0, scale=1000/data_weight/weights)
_log.debug('Data weight: %s, driving forces: %s, weights: %s, probabilities: %s', data_weight, driving_forces, weights, log_probabilites)
return np.sum(log_probabilites)
|
PhasesResearchLab/ESPEI
|
espei/error_functions/zpf_error.py
|
Python
|
mit
| 21,071
|
[
"pycalphad"
] |
8b48aac3431f6b934ac3da5a5e7e29c535d51652e8be7c6e013ae5e419deb323
|
#!/usr/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Parameter handlers for the SMIRNOFF force field engine
This file contains standard parameter handlers for the SMIRNOFF force field engine.
These classes implement the object model for self-contained parameter assignment.
New pluggable handlers can be created by creating subclasses of :class:`ParameterHandler`.
"""
__all__ = [
"SMIRNOFFSpecError",
"SMIRNOFFSpecUnimplementedError",
"IncompatibleParameterError",
"FractionalBondOrderInterpolationMethodUnsupportedError",
"NotEnoughPointsForInterpolationError",
"UnassignedValenceParameterException",
"UnassignedBondParameterException",
"UnassignedAngleParameterException",
"DuplicateVirtualSiteTypeException",
"ParameterLookupError",
"NonbondedMethod",
"ParameterList",
"ParameterType",
"ParameterHandler",
"ParameterAttribute",
"MappedParameterAttribute",
"IndexedParameterAttribute",
"IndexedMappedParameterAttribute",
"ConstraintHandler",
"BondHandler",
"AngleHandler",
"ProperTorsionHandler",
"ImproperTorsionHandler",
"vdWHandler",
"GBSAHandler",
"ToolkitAM1BCCHandler",
"VirtualSiteHandler",
]
# =============================================================================================
# GLOBAL IMPORTS
# =============================================================================================
import abc
import copy
import functools
import inspect
import logging
import re
from collections import OrderedDict, defaultdict
from enum import Enum
from itertools import combinations
from simtk import openmm, unit
from openff.toolkit.topology import ImproperDict, SortedDict, Topology, ValenceDict
from openff.toolkit.topology.molecule import Molecule
from openff.toolkit.typing.chemistry import ChemicalEnvironment
from openff.toolkit.utils.collections import ValidatedDict, ValidatedList
from openff.toolkit.utils.toolkits import GLOBAL_TOOLKIT_REGISTRY
from openff.toolkit.utils.utils import (
IncompatibleUnitError,
MessageException,
all_subclasses,
attach_units,
extract_serialized_units_from_dict,
object_to_quantity,
)
# =============================================================================================
# CONFIGURE LOGGER
# =============================================================================================
logger = logging.getLogger(__name__)
# ======================================================================
# CUSTOM EXCEPTIONS
# ======================================================================
class SMIRNOFFSpecError(MessageException):
"""
Exception for when data is noncompliant with the SMIRNOFF data specification.
"""
pass
class SMIRNOFFSpecUnimplementedError(MessageException):
"""
Exception for when a portion of the SMIRNOFF specification is not yet implemented.
"""
class FractionalBondOrderInterpolationMethodUnsupportedError(MessageException):
"""
Exception for when an unsupported fractional bond order interpolation assignment method is called.
"""
pass
class NotEnoughPointsForInterpolationError(MessageException):
"""Exception for when less than two points are provided for interpolation"""
pass
class IncompatibleParameterError(MessageException):
"""
Exception for when a set of parameters is scientifically/technically incompatible with another
"""
pass
class UnassignedValenceParameterException(Exception):
"""Exception raised when there are valence terms for which a ParameterHandler can't find parameters."""
pass
class UnassignedBondParameterException(UnassignedValenceParameterException):
"""Exception raised when there are bond terms for which a ParameterHandler can't find parameters."""
pass
class UnassignedAngleParameterException(UnassignedValenceParameterException):
"""Exception raised when there are angle terms for which a ParameterHandler can't find parameters."""
pass
class UnassignedProperTorsionParameterException(UnassignedValenceParameterException):
"""Exception raised when there are proper torsion terms for which a ParameterHandler can't find parameters."""
pass
class UnassignedMoleculeChargeException(Exception):
"""Exception raised when no charge method is able to assign charges to a molecule."""
pass
class NonintegralMoleculeChargeException(Exception):
"""Exception raised when the partial charges on a molecule do not sum up to its formal charge."""
pass
class DuplicateParameterError(MessageException):
"""Exception raised when trying to add a ParameterType that already exists"""
class ParameterLookupError(MessageException):
"""Exception raised when something goes wrong in a parameter lookup in
ParameterHandler.__getitem__"""
class DuplicateVirtualSiteTypeException(Exception):
"""Exception raised when trying to register two different virtual site classes with the same 'type'"""
pass
# ======================================================================
# ENUM TYPES
# ======================================================================
class NonbondedMethod(Enum):
"""
An enumeration of the nonbonded methods
"""
NoCutoff = 0
CutoffPeriodic = 1
CutoffNonPeriodic = 2
Ewald = 3
PME = 4
# ======================================================================
# UTILITY FUNCTIONS
# ======================================================================
def _linear_inter_or_extrapolate(points_dict, x_query):
"""
Linearly interpolate or extrapolate based on a piecewise linear function defined by a set of points.
This function is designed to work with key:value pairs where the value may be a simtk.unit.Quantity.
Parameters
----------
points_dict : dict{float: float or float-valued simtk.unit.Quantity}
A dictionary with each item representing a point, where the key is the X value and the value is the Y value.
x_query : float
The X value of the point to interpolate or extrapolate.
Returns
-------
y_value : float or float-valued simtk.unit.Quantity
The result of interpolation/extrapolation.
"""
# pre-empt case where no interpolation is necessary
if x_query in points_dict:
return points_dict[x_query]
if len(points_dict) < 2:
raise NotEnoughPointsForInterpolationError(
f"Unable to perform interpolation with less than two points. "
f"points_dict: {points_dict} x_query: {x_query}"
)
# TODO: error out for nonsensical fractional bond orders
# find the nearest point beneath our queried x value
try:
below = max(bo for bo in points_dict if bo < x_query)
except ValueError:
below = None
# find the nearest point above our queried x value
try:
above = min(bo for bo in points_dict if bo > x_query)
except ValueError:
above = None
# handle case where we can clearly interpolate
if (above is not None) and (below is not None):
return points_dict[below] + (points_dict[above] - points_dict[below]) * (
(x_query - below) / (above - below)
)
# error if we can't hope to interpolate at all
elif (above is None) and (below is None):
raise NotImplementedError(
f"Failed to find interpolation references for "
f"`x_query` '{x_query}', "
f"with `points_dict` '{points_dict}'"
)
# extrapolate for fractional bond orders below our lowest defined bond order
elif below is None:
bond_orders = sorted(points_dict)
k = points_dict[bond_orders[0]] - (
(points_dict[bond_orders[1]] - points_dict[bond_orders[0]])
/ (bond_orders[1] - bond_orders[0])
) * (bond_orders[0] - x_query)
return k
# extrapolate for fractional bond orders above our highest defined bond order
elif above is None:
bond_orders = sorted(points_dict)
k = points_dict[bond_orders[-1]] + (
(points_dict[bond_orders[-1]] - points_dict[bond_orders[-2]])
/ (bond_orders[-1] - bond_orders[-2])
) * (x_query - bond_orders[-1])
return k
# TODO: This is technically a validator, not a converter, but ParameterAttribute doesn't support them yet (it'll be easy if we switch to use the attrs library).
def _allow_only(allowed_values):
"""A converter that checks the new value is only in a set."""
allowed_values = frozenset(allowed_values)
def _value_checker(instance, attr, new_value):
# This statement means that, in the "SMIRNOFF Data Dict" format, the string "None"
# and the Python None are the same thing
if new_value == "None":
new_value = None
# Ensure that the new value is in the list of allowed values
if new_value not in allowed_values:
err_msg = (
f"Attempted to set {instance.__class__.__name__}.{attr.name} "
f"to {new_value}. Currently, only the following values "
f"are supported: {sorted(allowed_values)}."
)
raise SMIRNOFFSpecError(err_msg)
return new_value
return _value_checker
# ======================================================================
# PARAMETER ATTRIBUTES
# ======================================================================
# TODO: Think about adding attrs to the dependencies and inherit from attr.ib
class ParameterAttribute:
"""A descriptor for ``ParameterType`` attributes.
The descriptors allows associating to the parameter a default value,
which makes the attribute optional, a unit, and a custom converter.
Because we may want to have ``None`` as a default value, required
attributes have the ``default`` set to the special type ``UNDEFINED``.
Converters can be both static or instance functions/methods with
respective signatures::
converter(value): -> converted_value
converter(instance, parameter_attribute, value): -> converted_value
A decorator syntax is available (see example below).
Parameters
----------
default : object, optional
When specified, the descriptor makes this attribute optional by
attaching a default value to it.
unit : simtk.unit.Quantity, optional
When specified, only quantities with compatible units are allowed
to be set, and string expressions are automatically parsed into a
``Quantity``.
converter : callable, optional
An optional function that can be used to convert values before
setting the attribute.
See Also
--------
IndexedParameterAttribute
A parameter attribute with multiple terms.
Examples
--------
Create a parameter type with an optional and a required attribute.
>>> class MyParameter:
... attr_required = ParameterAttribute()
... attr_optional = ParameterAttribute(default=2)
...
>>> my_par = MyParameter()
Even without explicit assignment, the default value is returned.
>>> my_par.attr_optional
2
If you try to access an attribute without setting it first, an
exception is raised.
>>> my_par.attr_required
Traceback (most recent call last):
...
AttributeError: 'MyParameter' object has no attribute '_attr_required'
The attribute allow automatic conversion and validation of units.
>>> from simtk import unit
>>> class MyParameter:
... attr_quantity = ParameterAttribute(unit=unit.angstrom)
...
>>> my_par = MyParameter()
>>> my_par.attr_quantity = '1.0 * nanometer'
>>> my_par.attr_quantity
Quantity(value=1.0, unit=nanometer)
>>> my_par.attr_quantity = 3.0
Traceback (most recent call last):
...
openff.toolkit.utils.utils.IncompatibleUnitError: attr_quantity=3.0 dimensionless should have units of angstrom
You can attach a custom converter to an attribute.
>>> class MyParameter:
... # Both strings and integers convert nicely to floats with float().
... attr_all_to_float = ParameterAttribute(converter=float)
... attr_int_to_float = ParameterAttribute()
... @attr_int_to_float.converter
... def attr_int_to_float(self, attr, value):
... # This converter converts only integers to float
... # and raise an exception for the other types.
... if isinstance(value, int):
... return float(value)
... elif not isinstance(value, float):
... raise TypeError(f"Cannot convert '{value}' to float")
... return value
...
>>> my_par = MyParameter()
attr_all_to_float accepts and convert to float both strings and integers
>>> my_par.attr_all_to_float = 1
>>> my_par.attr_all_to_float
1.0
>>> my_par.attr_all_to_float = '2.0'
>>> my_par.attr_all_to_float
2.0
The custom converter associated to attr_int_to_float converts only integers instead.
>>> my_par.attr_int_to_float = 3
>>> my_par.attr_int_to_float
3.0
>>> my_par.attr_int_to_float = '4.0'
Traceback (most recent call last):
...
TypeError: Cannot convert '4.0' to float
"""
class UNDEFINED:
"""Custom type used by ``ParameterAttribute`` to differentiate between ``None`` and undeclared default."""
pass
def __init__(self, default=UNDEFINED, unit=None, converter=None, docstring=""):
self.default = default
self._unit = unit
self._converter = converter
self.__doc__ = docstring
def __set_name__(self, owner, name):
self._name = "_" + name
@property
def name(self):
# Get rid of the initial underscore.
return self._name[1:]
def __get__(self, instance, owner):
if instance is None:
# This is called from the class. Return the descriptor object.
return self
try:
return getattr(instance, self._name)
except AttributeError:
# The attribute has not initialized. Check if there's a default.
if self.default is ParameterAttribute.UNDEFINED:
raise
return self.default
def __set__(self, instance, value):
# Convert and validate the value.
value = self._convert_and_validate(instance, value)
setattr(instance, self._name, value)
def converter(self, converter):
"""Create a new ParameterAttribute with an associated converter.
This is meant to be used as a decorator (see main examples).
"""
return self.__class__(default=self.default, converter=converter)
def _convert_and_validate(self, instance, value):
"""Convert to Quantity, validate units, and call custom converter."""
# The default value is always allowed.
if self._is_valid_default(value):
return value
# Convert and validate units.
value = self._validate_units(value)
# Call the custom converter before setting the value.
value = self._call_converter(value, instance)
return value
def _is_valid_default(self, value):
"""Return True if this is a defined default value."""
return (
self.default is not ParameterAttribute.UNDEFINED and value == self.default
)
def _validate_units(self, value):
"""Convert strings expressions to Quantity and validate the units if requested."""
if self._unit is not None:
# Convert eventual strings to Quantity objects.
value = object_to_quantity(value)
# Check if units are compatible.
try:
if not self._unit.is_compatible(value.unit):
raise IncompatibleUnitError(
f"{self.name}={value} should have units of {self._unit}"
)
except AttributeError:
# This is not a Quantity object.
raise IncompatibleUnitError(
f"{self.name}={value} should have units of {self._unit}"
)
return value
def _call_converter(self, value, instance):
"""Correctly calls static and instance converters."""
if self._converter is not None:
try:
# Static function.
return self._converter(value)
except TypeError:
# Instance method.
return self._converter(instance, self, value)
return value
class IndexedParameterAttribute(ParameterAttribute):
"""The attribute of a parameter with an unspecified number of terms.
Some parameters can be associated to multiple terms, For example,
torsions have parameters such as k1, k2, ..., and ``IndexedParameterAttribute``
can be used to encapsulate the sequence of terms.
The only substantial difference with ``ParameterAttribute`` is that
only sequences are supported as values and converters and units are
checked on each element of the sequence.
Currently, the descriptor makes the sequence immutable. This is to
avoid that an element of the sequence could be set without being
properly validated. In the future, the data could be wrapped in a
safe list that would safely allow mutability.
Parameters
----------
default : object, optional
When specified, the descriptor makes this attribute optional by
attaching a default value to it.
unit : simtk.unit.Quantity, optional
When specified, only sequences of quantities with compatible units
are allowed to be set.
converter : callable, optional
An optional function that can be used to validate and cast each
element of the sequence before setting the attribute.
See Also
--------
ParameterAttribute
A simple parameter attribute.
MappedParameterAttribute
A parameter attribute representing a mapping.
IndexedMappedParameterAttribute
A parameter attribute representing a sequence, each term of which is a mapping.
Examples
--------
Create an optional indexed attribute with unit of angstrom.
>>> from simtk import unit
>>> class MyParameter:
... length = IndexedParameterAttribute(default=None, unit=unit.angstrom)
...
>>> my_par = MyParameter()
>>> my_par.length is None
True
Strings are parsed into Quantity objects.
>>> my_par.length = ['1 * angstrom', 0.5 * unit.nanometer]
>>> my_par.length[0]
Quantity(value=1, unit=angstrom)
Similarly, custom converters work as with ``ParameterAttribute``, but
they are used to validate each value in the sequence.
>>> class MyParameter:
... attr_indexed = IndexedParameterAttribute(converter=float)
...
>>> my_par = MyParameter()
>>> my_par.attr_indexed = [1, '1.0', '1e-2', 4.0]
>>> my_par.attr_indexed
[1.0, 1.0, 0.01, 4.0]
"""
def _convert_and_validate(self, instance, value):
"""Overwrite ParameterAttribute._convert_and_validate to make the value a ValidatedList."""
# The default value is always allowed.
if self._is_valid_default(value):
return value
# We push the converters into a ValidatedList so that we can make
# sure that elements are validated correctly when they are modified
# after their initialization.
# ValidatedList expects converters that take the value as a single
# argument so we create a partial function with the instance assigned.
static_converter = functools.partial(self._call_converter, instance=instance)
value = ValidatedList(value, converter=[self._validate_units, static_converter])
return value
class MappedParameterAttribute(ParameterAttribute):
"""The attribute of a parameter in which each term is a mapping.
The substantial difference with ``IndexedParameterAttribute`` is that, unlike
indexing, the mapping can be based on artbitrary references, like indices but
can starting at non-zero values and include non-adjacent keys.
Parameters
----------
default : object, optional
When specified, the descriptor makes this attribute optional by
attaching a default value to it.
unit : simtk.unit.Quantity, optional
When specified, only sequences of mappings where values are quantities with
compatible units are allowed to be set.
converter : callable, optional
An optional function that can be used to validate and cast each
component of each element of the sequence before setting the attribute.
See Also
--------
IndexedParameterAttribute
A parameter attribute representing a sequence.
IndexedMappedParameterAttribute
A parameter attribute representing a sequence, each term of which is a mapping.
Examples
--------
Create an optional indexed attribute with unit of angstrom.
>>> from simtk import unit
>>> class MyParameter:
... length = MappedParameterAttribute(default=None, unit=unit.angstrom)
...
>>> my_par = MyParameter()
>>> my_par.length is None
True
Like other ParameterAttribute objects, strings are parsed into Quantity objects.
>>> my_par.length = {1:'1.5 * angstrom', 2: '1.4 * angstrom'}
>>> my_par.length[1]
Quantity(value=1.5, unit=angstrom)
Unlike other ParameterAttribute objects, the reference points can do not need ot be
zero-indexed, non-adjancent, such as interpolating defining a bond parameter for
interpolation by defining references values and bond orders 2 and 3:
>>> my_par.length = {2:'1.42 * angstrom', 3: '1.35 * angstrom'}
>>> my_par.length[2]
Quantity(value=1.42, unit=angstrom)
"""
def _convert_and_validate(self, instance, value):
if self._is_valid_default(value):
return value
static_converter = functools.partial(self._call_converter, instance=instance)
value = ValidatedDict(value, converter=[self._validate_units, static_converter])
return value
class IndexedMappedParameterAttribute(ParameterAttribute):
"""The attribute of a parameter with an unspecified number of terms, where
each term is a mapping.
Some parameters can be associated to multiple terms,
where those terms have multiple components.
For example, torsions with fractional bond orders have parameters such as
k1_bondorder1, k1_bondorder2, k2_bondorder1, k2_bondorder2, ..., and
``IndexedMappedParameterAttribute`` can be used to encapsulate the sequence of
terms as mappings (typically, ``dict``\ s) of their components.
The only substantial difference with ``IndexedParameterAttribute`` is that
only sequences of mappings are supported as values and converters and units are
checked on each component of each element in the sequence.
Currently, the descriptor makes the sequence immutable. This is to
avoid that an element of the sequence could be set without being
properly validated. In the future, the data could be wrapped in a
safe list that would safely allow mutability.
Parameters
----------
default : object, optional
When specified, the descriptor makes this attribute optional by
attaching a default value to it.
unit : simtk.unit.Quantity, optional
When specified, only sequences of mappings where values are quantities with
compatible units are allowed to be set.
converter : callable, optional
An optional function that can be used to validate and cast each
component of each element of the sequence before setting the attribute.
See Also
--------
IndexedParameterAttribute
A parameter attribute representing a sequence.
MappedParameterAttribute
A parameter attribute representing a mapping.
Examples
--------
Create an optional indexed attribute with unit of angstrom.
>>> from simtk import unit
>>> class MyParameter:
... length = IndexedMappedParameterAttribute(default=None, unit=unit.angstrom)
...
>>> my_par = MyParameter()
>>> my_par.length is None
True
Strings are parsed into Quantity objects.
>>> my_par.length = [{1:'1 * angstrom'}, {1: 0.5 * unit.nanometer}]
>>> my_par.length[0]
{1: Quantity(value=1, unit=angstrom)}
Similarly, custom converters work as with ``ParameterAttribute``, but
they are used to validate each value in the sequence.
>>> class MyParameter:
... attr_indexed = IndexedMappedParameterAttribute(converter=float)
...
>>> my_par = MyParameter()
>>> my_par.attr_indexed = [{1: 1}, {2: '1.0', 3: '1e-2'}, {4: 4.0}]
>>> my_par.attr_indexed
[{1: 1.0}, {2: 1.0, 3: 0.01}, {4: 4.0}]
"""
def _convert_and_validate(self, instance, value):
"""Overwrite ParameterAttribute._convert_and_validate to make the value a ValidatedList."""
# The default value is always allowed.
if self._is_valid_default(value):
return value
# We push the converters into a ValidatedListMapping so that we can make
# sure that elements are validated correctly when they are modified
# after their initialization.
# ValidatedListMapping expects converters that take the value as a single
# argument so we create a partial function with the instance assigned.
static_converter = functools.partial(self._call_converter, instance=instance)
value = ValidatedList(
[
ValidatedDict(
element, converter=[self._validate_units, static_converter]
)
for element in value
],
converter=self._index_converter,
)
return value
@staticmethod
def _index_converter(x):
return ValidatedDict(x)
class _ParameterAttributeHandler:
"""A base class for ``ParameterType`` and ``ParameterHandler`` objects.
Encapsulate shared code of ``ParameterType`` and ``ParameterHandler``.
In particular, this base class provides an ``__init__`` method that
automatically initialize the attributes defined through the ``ParameterAttribute``
and ``IndexedParameterAttribute`` descriptors, as well as handling
cosmetic attributes.
See Also
--------
ParameterAttribute
A simple parameter attribute.
IndexedParameterAttribute
A parameter attribute with multiple terms.
Examples
--------
This base class was design to encapsulate shared code between ``ParameterType``
and ``ParameterHandler``, which both need to deal with parameter and cosmetic
attributes.
To create a new type/handler, you can use the ``ParameterAttribute`` descriptors.
>>> class ParameterTypeOrHandler(_ParameterAttributeHandler):
... length = ParameterAttribute(unit=unit.angstrom)
... k = ParameterAttribute(unit=unit.kilocalorie_per_mole / unit.angstrom**2)
...
``_ParameterAttributeHandler`` and the descriptors take care of performing
sanity checks on initialization and assignment of the single attributes. Because
we attached units to the parameters, we need to pass them with compatible units.
>>> my_par = ParameterTypeOrHandler(
... length='1.01 * angstrom',
... k=5 * unit.kilocalorie_per_mole / unit.angstrom**2
... )
Note that ``_ParameterAttributeHandler`` took care of implementing
a constructor, and that unit parameters support string assignments.
These are automatically converted to ``Quantity`` objects.
>>> my_par.length
Quantity(value=1.01, unit=angstrom)
While assigning incompatible units is forbidden.
>>> my_par.k = 3.0 * unit.gram
Traceback (most recent call last):
...
openff.toolkit.utils.utils.IncompatibleUnitError: k=3.0 g should have units of kilocalorie/(angstrom**2*mole)
On top of type checking, the constructor implemented in ``_ParameterAttributeHandler``
checks if some required parameters are not given.
>>> ParameterTypeOrHandler(length=3.0*unit.nanometer)
Traceback (most recent call last):
...
openff.toolkit.typing.engines.smirnoff.parameters.SMIRNOFFSpecError: <class 'openff.toolkit.typing.engines.smirnoff.parameters.ParameterTypeOrHandler'> require the following missing parameters: ['k']. Defined kwargs are ['length']
Each attribute can be made optional by specifying a default value,
and you can attach a converter function by passing a callable as an
argument or through the decorator syntax.
>>> class ParameterTypeOrHandler(_ParameterAttributeHandler):
... attr_optional = ParameterAttribute(default=2)
... attr_all_to_float = ParameterAttribute(converter=float)
... attr_int_to_float = ParameterAttribute()
...
... @attr_int_to_float.converter
... def attr_int_to_float(self, attr, value):
... # This converter converts only integers to floats
... # and raise an exception for the other types.
... if isinstance(value, int):
... return float(value)
... elif not isinstance(value, float):
... raise TypeError(f"Cannot convert '{value}' to float")
... return value
...
>>> my_par = ParameterTypeOrHandler(attr_all_to_float='3.0', attr_int_to_float=1)
>>> my_par.attr_optional
2
>>> my_par.attr_all_to_float
3.0
>>> my_par.attr_int_to_float
1.0
The float() function can convert strings to integers, but our custom
converter forbids it
>>> my_par.attr_all_to_float = '2.0'
>>> my_par.attr_int_to_float = '4.0'
Traceback (most recent call last):
...
TypeError: Cannot convert '4.0' to float
Parameter attributes that can be indexed can be handled with the
``IndexedParameterAttribute``. These support unit validation and
converters exactly as ``ParameterAttribute``s, but the validation/conversion
is performed for each indexed attribute.
>>> class MyTorsionType(_ParameterAttributeHandler):
... periodicity = IndexedParameterAttribute(converter=int)
... k = IndexedParameterAttribute(unit=unit.kilocalorie_per_mole)
...
>>> my_par = MyTorsionType(
... periodicity1=2,
... k1=5 * unit.kilocalorie_per_mole,
... periodicity2='3',
... k2=6 * unit.kilocalorie_per_mole,
... )
>>> my_par.periodicity
[2, 3]
Indexed attributes, can be accessed both as a list or as their indexed
parameter name.
>>> my_par.periodicity2 = 6
>>> my_par.periodicity[0] = 1
>>> my_par.periodicity
[1, 6]
"""
def __init__(self, allow_cosmetic_attributes=False, **kwargs):
"""
Initialize parameter and cosmetic attributes.
Parameters
----------
allow_cosmetic_attributes : bool optional. Default = False
Whether to permit non-spec kwargs ("cosmetic attributes").
If True, non-spec kwargs will be stored as an attribute of
this parameter which can be accessed and written out. Otherwise,
an exception will be raised.
"""
# A list that may be populated to record the cosmetic attributes
# read from a SMIRNOFF data source.
self._cosmetic_attribs = []
# Do not modify the original data.
smirnoff_data = copy.deepcopy(kwargs)
(
smirnoff_data,
indexed_mapped_attr_lengths,
) = self._process_indexed_mapped_attributes(smirnoff_data)
smirnoff_data = self._process_indexed_attributes(
smirnoff_data, indexed_mapped_attr_lengths
)
smirnoff_data = self._process_mapped_attributes(smirnoff_data)
# Check for missing required arguments.
given_attributes = set(smirnoff_data.keys())
required_attributes = set(self._get_required_parameter_attributes().keys())
missing_attributes = required_attributes.difference(given_attributes)
if len(missing_attributes) != 0:
msg = (
f"{self.__class__} require the following missing parameters: {sorted(missing_attributes)}."
f" Defined kwargs are {sorted(smirnoff_data.keys())}"
)
raise SMIRNOFFSpecError(msg)
# Finally, set attributes of this ParameterType and handle cosmetic attributes.
allowed_attributes = set(self._get_parameter_attributes().keys())
for key, val in smirnoff_data.items():
if key in allowed_attributes:
setattr(self, key, val)
# Handle all unknown kwargs as cosmetic so we can write them back out
elif allow_cosmetic_attributes:
self.add_cosmetic_attribute(key, val)
else:
msg = (
f"Unexpected kwarg ({key}: {val}) passed to {self.__class__} constructor. "
"If this is a desired cosmetic attribute, consider setting "
"'allow_cosmetic_attributes=True'"
)
raise SMIRNOFFSpecError(msg)
def _process_mapped_attributes(self, smirnoff_data):
kwargs = list(smirnoff_data.keys())
for kwarg in kwargs:
attr_name, key = self._split_attribute_mapping(kwarg)
# Check if this is a mapped attribute
if key is not None and attr_name in self._get_mapped_parameter_attributes():
if attr_name not in smirnoff_data:
smirnoff_data[attr_name] = dict()
smirnoff_data[attr_name][key] = smirnoff_data[kwarg]
del smirnoff_data[kwarg]
return smirnoff_data
def _process_indexed_mapped_attributes(self, smirnoff_data):
# TODO: construct data structure for holding indexed_mapped attrs, which
# will get fed into setattr
indexed_mapped_attr_lengths = {}
reindex = set()
reverse = defaultdict(dict)
kwargs = list(smirnoff_data.keys())
for kwarg in kwargs:
attr_name, index, key = self._split_attribute_index_mapping(kwarg)
# Check if this is an indexed_mapped attribute.
if (
(key is not None)
and (index is not None)
and attr_name in self._get_indexed_mapped_parameter_attributes()
):
# we start with a dict because have no guarantee of order
# in which we will see each kwarg
# we'll switch this to a list later
if attr_name not in smirnoff_data:
smirnoff_data[attr_name] = dict()
reindex.add(attr_name)
if index not in smirnoff_data[attr_name]:
smirnoff_data[attr_name][index] = dict()
smirnoff_data[attr_name][index][key] = smirnoff_data[kwarg]
del smirnoff_data[kwarg]
# build reverse mapping; needed for contiguity check below
if index not in reverse[attr_name]:
reverse[attr_name][index] = dict()
reverse[attr_name][index][key] = kwarg
# turn all our top-level dicts into lists
# catch cases where we skip an index,
# e.g. k1_bondorder*, k3_bondorder* defined, but not k2_bondorder*
for attr_name in reindex:
indexed_mapping = []
j = 0
for i in sorted(smirnoff_data[attr_name].keys()):
if int(i) == j:
indexed_mapping.append(smirnoff_data[attr_name][i])
j += 1
else:
# any key will do; we are sensitive only to top-level index
key = sorted(reverse[attr_name][i].keys())[0]
kwarg = reverse[attr_name][i][key]
val = smirnoff_data[attr_name][i][key]
msg = (
f"Unexpected kwarg ({kwarg}: {val}) passed to {self.__class__} constructor. "
"If this is a desired cosmetic attribute, consider setting "
"'allow_cosmetic_attributes=True'"
)
raise SMIRNOFFSpecError(msg)
smirnoff_data[attr_name] = indexed_mapping
# keep track of lengths; used downstream for checking against other
# indexed attributes
indexed_mapped_attr_lengths[attr_name] = len(smirnoff_data[attr_name])
return smirnoff_data, indexed_mapped_attr_lengths
def _process_indexed_attributes(self, smirnoff_data, indexed_attr_lengths=None):
# Check for indexed attributes and stack them into a list.
# Keep track of how many indexed attribute we find to make sure they all have the same length.
# TODO: REFACTOR ME; try looping over contents of `smirnoff_data`, using
# `split_attribute_index` to extract values
if indexed_attr_lengths is None:
indexed_attr_lengths = {}
for attrib_basename in self._get_indexed_parameter_attributes().keys():
index = 1
while True:
attrib_w_index = "{}{}".format(attrib_basename, index)
# Exit the while loop if the indexed attribute is not given.
# this is the stop condition
try:
attrib_w_index_value = smirnoff_data[attrib_w_index]
except KeyError:
break
# Check if this is the first iteration.
if index == 1:
# Check if this attribute has been specified with and without index.
if attrib_basename in smirnoff_data:
err_msg = (
f"The attribute '{attrib_basename}' has been specified "
f"with and without index: '{attrib_w_index}'"
)
raise TypeError(err_msg)
# Otherwise create the list object.
smirnoff_data[attrib_basename] = list()
# Append the new value to the list.
smirnoff_data[attrib_basename].append(attrib_w_index_value)
# Remove the indexed attribute from the kwargs as it will
# be exposed only as an element of the list.
del smirnoff_data[attrib_w_index]
index += 1
# Update the lengths with this attribute (if it was found).
if index > 1:
indexed_attr_lengths[attrib_basename] = len(
smirnoff_data[attrib_basename]
)
# Raise an error if we there are different indexed
# attributes with a different number of terms.
if len(set(indexed_attr_lengths.values())) > 1:
raise TypeError(
"The following indexed attributes have "
f"different lengths: {indexed_attr_lengths}"
)
return smirnoff_data
def to_dict(self, discard_cosmetic_attributes=False, duplicate_attributes=None):
"""
Convert this object to dict format.
The returning dictionary contains all the ``ParameterAttribute``
and ``IndexedParameterAttribute`` as well as cosmetic attributes
if ``discard_cosmetic_attributes`` is ``False``.
Parameters
----------
discard_cosmetic_attributes : bool, optional. Default = False
Whether to discard non-spec attributes of this object
duplicate_attributes : list of string, optional. Default = None
A list of names of attributes that redundantly decsribe
data and should be discarded during serializaiton
Returns
-------
smirnoff_dict : dict
The SMIRNOFF-compliant dict representation of this object.
"""
# Make a list of all attribs that should be included in the
# returned dict (call list() to make a copy). We discard
# optional attributes that are set to None defaults.
attribs_to_return = list(self._get_defined_parameter_attributes().keys())
if duplicate_attributes is not None:
for duplicate in duplicate_attributes:
attribs_to_return.pop(attribs_to_return.index(duplicate))
# Start populating a dict of the attribs.
indexed_attribs = set(self._get_indexed_parameter_attributes().keys())
mapped_attribs = set(self._get_mapped_parameter_attributes().keys())
indexed_mapped_attribs = set(
self._get_indexed_mapped_parameter_attributes().keys()
)
smirnoff_dict = OrderedDict()
# If attribs_to_return is ordered here, that will effectively be an informal output ordering
for attrib_name in attribs_to_return:
attrib_value = getattr(self, attrib_name)
if attrib_name in indexed_mapped_attribs:
for idx, mapping in enumerate(attrib_value):
for key, val in mapping.items():
attrib_name_indexed, attrib_name_mapped = attrib_name.split("_")
smirnoff_dict[
f"{attrib_name_indexed}{str(idx+1)}_{attrib_name_mapped}{key}"
] = val
elif attrib_name in indexed_attribs:
for idx, val in enumerate(attrib_value):
smirnoff_dict[attrib_name + str(idx + 1)] = val
elif attrib_name in mapped_attribs:
for key, val in attrib_value.items():
smirnoff_dict[f"{attrib_name}{str(key)}"] = val
else:
smirnoff_dict[attrib_name] = attrib_value
# Serialize cosmetic attributes.
if not (discard_cosmetic_attributes):
for cosmetic_attrib in self._cosmetic_attribs:
smirnoff_dict[cosmetic_attrib] = getattr(self, "_" + cosmetic_attrib)
return smirnoff_dict
def __getattr__(self, item):
"""Take care of mapping indexed attributes to their respective list elements."""
# Try matching the case where there are two indices
# this indicates a index_mapped parameter
attr_name, index, key = self._split_attribute_index_mapping(item)
# Check if this is an indexed_mapped attribute.
if (
(key is not None)
and (index is not None)
and attr_name in self._get_indexed_mapped_parameter_attributes()
):
indexed_mapped_attr_value = getattr(self, attr_name)
try:
return indexed_mapped_attr_value[index][key]
except (IndexError, KeyError) as err:
if not err.args:
err.args = ("",)
err.args = err.args + (
f"'{item}' is out of bound for indexed attribute '{attr_name}'",
)
raise
# Otherwise, try indexed attribute
# Separate the indexed attribute name from the list index.
attr_name, index = self._split_attribute_index(item)
# Check if this is an indexed attribute.
if (
index is not None
) and attr_name in self._get_indexed_parameter_attributes():
indexed_attr_value = getattr(self, attr_name)
try:
return indexed_attr_value[index]
except IndexError:
raise IndexError(
f"'{item}' is out of bound for indexed attribute '{attr_name}'"
)
# Otherwise, forward the search to the next class in the MRO.
try:
return super().__getattr__(item)
except AttributeError as e:
# If this fails because the next classes in the MRO do not
# implement __getattr__(), then raise the standard Attribute error.
if "__getattr__" in str(e):
raise AttributeError(
f"{self.__class__} object has no attribute '{item}'"
)
# Otherwise, re-raise the error from the class in the MRO.
raise
def __setattr__(self, key, value):
"""Take care of mapping indexed attributes to their respective list elements."""
# Try matching the case where there are two indices
# this indicates a index_mapped parameter
attr_name, index, mapkey = self._split_attribute_index_mapping(key)
# Check if this is an index_mapped attribute. avoiding an infinite
# recursion by calling getattr() with non-existing keys.
if (
(mapkey is not None)
and (index is not None)
and attr_name in self._get_indexed_mapped_parameter_attributes()
):
indexed_mapped_attr_value = getattr(self, attr_name)
try:
indexed_mapped_attr_value[index][mapkey] = value
return
except (IndexError, KeyError) as err:
if not err.args:
err.args = ("",)
err.args = err.args + (
f"'{key}' is out of bound for indexed attribute '{attr_name}'",
)
raise
# Otherwise, try indexed attribute
# Separate the indexed attribute name from the list index.
attr_name, index = self._split_attribute_index(key)
# Check if this is an indexed attribute. avoiding an infinite
# recursion by calling getattr() with non-existing keys.
if (index is not None) and (
attr_name in self._get_indexed_parameter_attributes()
):
indexed_attr_value = getattr(self, attr_name)
try:
indexed_attr_value[index] = value
return
except IndexError:
raise IndexError(
f"'{key}' is out of bound for indexed attribute '{attr_name}'"
)
# Forward the request to the next class in the MRO.
super().__setattr__(key, value)
def add_cosmetic_attribute(self, attr_name, attr_value):
"""
Add a cosmetic attribute to this object.
This attribute will not have a functional effect on the object
in the Open Force Field Toolkit, but can be written out during
output.
.. warning :: The API for modifying cosmetic attributes is experimental
and may change in the future (see issue #338).
Parameters
----------
attr_name : str
Name of the attribute to define for this object.
attr_value : str
The value of the attribute to define for this object.
"""
setattr(self, "_" + attr_name, attr_value)
self._cosmetic_attribs.append(attr_name)
def delete_cosmetic_attribute(self, attr_name):
"""
Delete a cosmetic attribute from this object.
.. warning :: The API for modifying cosmetic attributes is experimental
and may change in the future (see issue #338).
Parameters
----------
attr_name : str
Name of the cosmetic attribute to delete.
"""
# TODO: Can we handle this by overriding __delattr__ instead?
# Would we also need to override __del__ as well to cover both deletation methods?
delattr(self, "_" + attr_name)
self._cosmetic_attribs.remove(attr_name)
def attribute_is_cosmetic(self, attr_name):
"""
Determine whether an attribute of this object is cosmetic.
.. warning :: The API for modifying cosmetic attributes is experimental
and may change in the future (see issue #338).
Parameters
----------
attr_name : str
The attribute name to check
Returns
-------
is_cosmetic : bool
Returns True if the attribute is defined and is cosmetic. Returns False otherwise.
"""
return attr_name in self._cosmetic_attribs
@staticmethod
def _split_attribute_index(item):
"""Split the attribute name from the final index.
For example, the method takes 'k2' and returns the tuple ('k', 1).
If attribute_name doesn't end with an integer, it returns (item, None).
"""
# Match any number (\d+) at the end of the string ($).
match = re.search(r"\d+$", item)
if match is None:
return item, None
index = match.group() # This is a str.
attr_name = item[: -len(index)]
index = int(match.group()) - 1
return attr_name, index
@staticmethod
def _split_attribute_index_mapping(item):
"""Split the attribute name from the final index.
For example, the method takes 'k1_bondorder2' and returns the tuple ('k_bondorder', 0, 2).
If attribute_name doesn't end with an integer, it returns (item, None, None).
"""
# Match items of the form <item><index>_<mapping><key>
# where <index> and <key> always integers
match = re.search(r"\d+_[A-z]+\d+$", item)
if match is None:
return item, None, None
# Match any number (\d+) at the end of the string ($).
i_match = r"\d+$"
indexed, mapped = item.split("_")
# process indexed component
match_indexed = re.search(i_match, indexed)
index = match_indexed.group() # This is a str.
attr_name = indexed[: -len(index)]
index = int(index) - 1
# process mapped component
match_mapping = re.search(i_match, mapped)
key = match_mapping.group() # This is a str.
attr_name = f"{attr_name}_{mapped[:-len(key)]}"
key = int(key) # we don't subtract 1 here, because these are keys, not indices
return attr_name, index, key
@staticmethod
def _split_attribute_mapping(item):
"""Split the attribute name from the and its mapping.
For example, the method takes 'k_foo2' and returns the tuple ('k_foo', 2).
If attribute_name doesn't end with an integer, it returns (item, None).
"""
# TODO: Can these three splitting functions be collapsed down into one?
# Match any number (\d+) at the end of the string ($).
map_match = r"\d+$"
match_mapping = re.search(map_match, item)
if match_mapping is None:
return item, None
key = match_mapping.group()
attr_name = item[: -len(key)]
key = int(key)
return attr_name, key
@classmethod
def _get_parameter_attributes(cls, filter=None):
"""Return all the attributes of the parameters.
This is constructed dynamically by introspection gathering all
the descriptors that are instances of the ParameterAttribute class.
Parent classes of the parameter types are inspected as well.
Note that since Python 3.6 the order of the class attribute definition
is preserved (see PEP 520) so this function will return the attribute
in their declaration order.
Parameters
----------
filter : Callable, optional
An optional function with signature filter(ParameterAttribute) -> bool.
If specified, only attributes for which this functions returns
True are returned.
Returns
-------
parameter_attributes : Dict[str, ParameterAttribute]
A map from the name of the controlled parameter to the
ParameterAttribute descriptor handling it.
Examples
--------
>>> parameter_attributes = ParameterType._get_parameter_attributes()
>>> sorted(parameter_attributes.keys())
['id', 'parent_id', 'smirks']
>>> isinstance(parameter_attributes['id'], ParameterAttribute)
True
"""
# If no filter is specified, get all the parameters.
if filter is None:
filter = lambda x: True
# Go through MRO and retrieve also parents descriptors. The function
# inspect.getmembers() automatically resolves the MRO, but it also
# sorts the attribute alphabetically by name. Here we want the order
# to be the same as the declaration order, which is guaranteed by PEP 520,
# starting from the parent class.
parameter_attributes = OrderedDict(
(name, descriptor)
for c in reversed(inspect.getmro(cls))
for name, descriptor in c.__dict__.items()
if isinstance(descriptor, ParameterAttribute) and filter(descriptor)
)
return parameter_attributes
@classmethod
def _get_indexed_mapped_parameter_attributes(cls):
"""Shortcut to retrieve only IndexedMappedParameterAttributes."""
return cls._get_parameter_attributes(
filter=lambda x: isinstance(x, IndexedMappedParameterAttribute)
)
@classmethod
def _get_indexed_parameter_attributes(cls):
"""Shortcut to retrieve only IndexedParameterAttributes."""
return cls._get_parameter_attributes(
filter=lambda x: isinstance(x, IndexedParameterAttribute)
)
@classmethod
def _get_mapped_parameter_attributes(cls):
"""Shortcut to retrieve only IndexedParameterAttributes."""
return cls._get_parameter_attributes(
filter=lambda x: isinstance(x, MappedParameterAttribute)
)
@classmethod
def _get_required_parameter_attributes(cls):
"""Shortcut to retrieve only required ParameterAttributes."""
return cls._get_parameter_attributes(filter=lambda x: x.default is x.UNDEFINED)
@classmethod
def _get_optional_parameter_attributes(cls):
"""Shortcut to retrieve only required ParameterAttributes."""
return cls._get_parameter_attributes(
filter=lambda x: x.default is not x.UNDEFINED
)
def _get_defined_parameter_attributes(self):
"""Returns all the attributes except for the optional attributes that have None default value.
This returns first the required attributes and then the defined optional
attribute in their respective declaration order.
"""
required = self._get_required_parameter_attributes()
optional = self._get_optional_parameter_attributes()
# Filter the optional parameters that are set to their default.
optional = OrderedDict(
(name, descriptor)
for name, descriptor in optional.items()
if not (
descriptor.default is None and getattr(self, name) == descriptor.default
)
)
required.update(optional)
return required
# ======================================================================
# PARAMETER TYPE/LIST
# ======================================================================
# We can't actually make this derive from dict, because it's possible for the user to change SMIRKS
# of parameters already in the list, which would cause the ParameterType object's SMIRKS and
# the dictionary key's SMIRKS to be out of sync.
class ParameterList(list):
"""
Parameter list that also supports accessing items by SMARTS string.
.. warning :: This API is experimental and subject to change.
"""
# TODO: Make this faster by caching SMARTS -> index lookup?
# TODO: Override __del__ to make sure we don't remove root atom type
# TODO: Allow retrieval by `id` as well
def __init__(self, input_parameter_list=None):
"""
Initialize a new ParameterList, optionally providing a list of ParameterType objects
to initially populate it.
Parameters
----------
input_parameter_list: list[ParameterType], default=None
A pre-existing list of ParameterType-based objects. If None, this ParameterList
will be initialized empty.
"""
super().__init__()
input_parameter_list = input_parameter_list or []
# TODO: Should a ParameterList only contain a single kind of ParameterType?
for input_parameter in input_parameter_list:
self.append(input_parameter)
def append(self, parameter):
"""
Add a ParameterType object to the end of the ParameterList
Parameters
----------
parameter : a ParameterType object
"""
# TODO: Ensure that newly added parameter is the same type as existing?
super().append(parameter)
def extend(self, other):
"""
Add a ParameterList object to the end of the ParameterList
Parameters
----------
other : a ParameterList
"""
if not isinstance(other, ParameterList):
msg = (
"ParameterList.extend(other) expected instance of ParameterList, "
"but received {} (type {}) instead".format(other, type(other))
)
raise TypeError(msg)
# TODO: Check if other ParameterList contains the same ParameterTypes?
super().extend(other)
def index(self, item):
"""
Get the numerical index of a ParameterType object or SMIRKS in this ParameterList.
Raises ParameterLookupError if the item is not found.
Parameters
----------
item : ParameterType object or str
The parameter or SMIRKS to look up in this ParameterList
Returns
-------
index : int
The index of the found item
Raises
------
ParameterLookupError if SMIRKS pattern is passed in but not found
"""
if isinstance(item, ParameterType):
return super().index(item)
else:
for parameter in self:
if parameter.smirks == item:
return self.index(parameter)
raise ParameterLookupError(
"SMIRKS {item} not found in ParameterList".format(item=item)
)
def insert(self, index, parameter):
"""
Add a ParameterType object as if this were a list
Parameters
----------
index : int
The numerical position to insert the parameter at
parameter : a ParameterType object
The parameter to insert
"""
# TODO: Ensure that newly added parameter is the same type as existing?
super().insert(index, parameter)
def __delitem__(self, item):
"""
Delete item by index or SMIRKS.
Parameters
----------
item : str or int
SMIRKS or numerical index of item in this ParameterList
"""
if type(item) is int:
index = item
else:
# Try to find by SMIRKS
index = self.index(item)
super().__delitem__(index)
def __getitem__(self, item):
"""
Retrieve item by index or SMIRKS
Parameters
----------
item : str or int
SMIRKS or numerical index of item in this ParameterList
"""
if type(item) is int:
index = item
elif type(item) is slice:
index = item
elif isinstance(item, str):
index = self.index(item)
elif isinstance(item, ParameterType) or issubclass(item, ParameterType):
raise ParameterLookupError("Lookup by instance is not supported")
return super().__getitem__(index)
# TODO: Override __setitem__ and __del__ to ensure we can slice by SMIRKS as well
# This is needed for pickling. See https://github.com/openforcefield/openff-toolkit/issues/411
# for more details.
# TODO: Is there a cleaner way (getstate/setstate perhaps?) to allow FFs to be
# pickled?
def __reduce__(self):
return (__class__, (list(self),), self.__dict__)
def __contains__(self, item):
"""Check to see if either Parameter or SMIRKS is contained in parameter list.
Parameters
----------
item : str
SMIRKS of item in this ParameterList
"""
if isinstance(item, str):
# Special case for SMIRKS strings
if item in [result.smirks for result in self]:
return True
# Fall back to traditional access
return list.__contains__(self, item)
def to_list(self, discard_cosmetic_attributes=True):
"""
Render this ParameterList to a normal list, serializing each ParameterType object in it to dict.
Parameters
----------
discard_cosmetic_attributes : bool, optional. Default = True
Whether to discard non-spec attributes of each ParameterType object.
Returns
-------
parameter_list : List[dict]
A serialized representation of a ParameterList, with each ParameterType it contains converted to dict.
"""
parameter_list = list()
for parameter in self:
parameter_dict = parameter.to_dict(
discard_cosmetic_attributes=discard_cosmetic_attributes
)
parameter_list.append(parameter_dict)
return parameter_list
# TODO: Rename to better reflect role as parameter base class?
class ParameterType(_ParameterAttributeHandler):
"""
Base class for SMIRNOFF parameter types.
This base class provides utilities to create new parameter types. See
the below for examples of how to do this.
.. warning :: This API is experimental and subject to change.
Attributes
----------
smirks : str
The SMIRKS pattern that this parameter matches.
id : str or None
An optional identifier for the parameter.
parent_id : str or None
Optionally, the identifier of the parameter of which this parameter
is a specialization.
See Also
--------
ParameterAttribute
IndexedParameterAttribute
Examples
--------
This class allows to define new parameter types by just listing its
attributes. In the example below, ``_VALENCE_TYPE`` AND ``_ELEMENT_NAME``
are used for the validation of the SMIRKS pattern associated to the
parameter and the automatic serialization/deserialization into a ``dict``.
>>> class MyBondParameter(ParameterType):
... _VALENCE_TYPE = 'Bond'
... _ELEMENT_NAME = 'Bond'
... length = ParameterAttribute(unit=unit.angstrom)
... k = ParameterAttribute(unit=unit.kilocalorie_per_mole / unit.angstrom**2)
...
The parameter automatically inherits the required smirks attribute
from ``ParameterType``. Associating a ``unit`` to a ``ParameterAttribute``
cause the attribute to accept only values in compatible units and to
parse string expressions.
>>> my_par = MyBondParameter(
... smirks='[*:1]-[*:2]',
... length='1.01 * angstrom',
... k=5 * unit.kilocalorie_per_mole / unit.angstrom**2
... )
>>> my_par.length
Quantity(value=1.01, unit=angstrom)
>>> my_par.k = 3.0 * unit.gram
Traceback (most recent call last):
...
openff.toolkit.utils.utils.IncompatibleUnitError: k=3.0 g should have units of kilocalorie/(angstrom**2*mole)
Each attribute can be made optional by specifying a default value,
and you can attach a converter function by passing a callable as an
argument or through the decorator syntax.
>>> class MyParameterType(ParameterType):
... _VALENCE_TYPE = 'Atom'
... _ELEMENT_NAME = 'Atom'
...
... attr_optional = ParameterAttribute(default=2)
... attr_all_to_float = ParameterAttribute(converter=float)
... attr_int_to_float = ParameterAttribute()
...
... @attr_int_to_float.converter
... def attr_int_to_float(self, attr, value):
... # This converter converts only integers to floats
... # and raise an exception for the other types.
... if isinstance(value, int):
... return float(value)
... elif not isinstance(value, float):
... raise TypeError(f"Cannot convert '{value}' to float")
... return value
...
>>> my_par = MyParameterType(smirks='[*:1]', attr_all_to_float='3.0', attr_int_to_float=1)
>>> my_par.attr_optional
2
>>> my_par.attr_all_to_float
3.0
>>> my_par.attr_int_to_float
1.0
The float() function can convert strings to integers, but our custom
converter forbids it
>>> my_par.attr_all_to_float = '2.0'
>>> my_par.attr_int_to_float = '4.0'
Traceback (most recent call last):
...
TypeError: Cannot convert '4.0' to float
Parameter attributes that can be indexed can be handled with the
``IndexedParameterAttribute``. These support unit validation and
converters exactly as ``ParameterAttribute``\ s, but the validation/conversion
is performed for each indexed attribute.
>>> class MyTorsionType(ParameterType):
... _VALENCE_TYPE = 'ProperTorsion'
... _ELEMENT_NAME = 'Proper'
... periodicity = IndexedParameterAttribute(converter=int)
... k = IndexedParameterAttribute(unit=unit.kilocalorie_per_mole)
...
>>> my_par = MyTorsionType(
... smirks='[*:1]-[*:2]-[*:3]-[*:4]',
... periodicity1=2,
... k1=5 * unit.kilocalorie_per_mole,
... periodicity2='3',
... k2=6 * unit.kilocalorie_per_mole,
... )
>>> my_par.periodicity
[2, 3]
Indexed attributes, can be accessed both as a list or as their indexed
parameter name.
>>> my_par.periodicity2 = 6
>>> my_par.periodicity[0] = 1
>>> my_par.periodicity
[1, 6]
"""
# ChemicalEnvironment valence type string expected by SMARTS string for this Handler
_VALENCE_TYPE = None
# The string mapping to this ParameterType in a SMIRNOFF data source
_ELEMENT_NAME = None
# Parameter attributes shared among all parameter types.
smirks = ParameterAttribute()
id = ParameterAttribute(default=None)
parent_id = ParameterAttribute(default=None)
@smirks.converter
def smirks(self, attr, smirks):
# Validate the SMIRKS string to ensure it matches the expected
# parameter type, raising an exception if it is invalid or doesn't
# tag a valid set of atoms.
# TODO: Add check to make sure we can't make tree non-hierarchical
# This would require parameter type knows which ParameterList it belongs to
ChemicalEnvironment.validate_smirks(smirks, validate_valence_type=True)
return smirks
def __init__(self, smirks, allow_cosmetic_attributes=False, **kwargs):
"""
Create a ParameterType.
Parameters
----------
smirks : str
The SMIRKS match for the provided parameter type.
allow_cosmetic_attributes : bool optional. Default = False
Whether to permit non-spec kwargs ("cosmetic attributes"). If True, non-spec kwargs will be stored as
an attribute of this parameter which can be accessed and written out. Otherwise an exception will
be raised.
"""
# This is just to make smirks a required positional argument.
kwargs["smirks"] = smirks
super().__init__(allow_cosmetic_attributes=allow_cosmetic_attributes, **kwargs)
def __repr__(self):
ret_str = "<{} with ".format(self.__class__.__name__)
for attr, val in self.to_dict().items():
ret_str += f"{attr}: {val} "
ret_str += ">"
return ret_str
# ======================================================================
# PARAMETER HANDLERS
#
# The following classes are Handlers that know how to create Force
# subclasses and add them to an OpenMM System that is being created. Each
# Handler class must define three methods:
# 1) a constructor which takes as input hierarchical dictionaries of data
# conformant to the SMIRNOFF spec;
# 2) a create_force() method that constructs the Force object and adds it
# to the System; and
# 3) a labelForce() method that provides access to which terms are applied
# to which atoms in specified mols.
# ======================================================================
# TODO: Should we have a parameter handler registry?
class ParameterHandler(_ParameterAttributeHandler):
"""Base class for parameter handlers.
Parameter handlers are configured with some global parameters for a
given section. They may also contain a :class:`ParameterList` populated
with :class:`ParameterType` objects if they are responsible for assigning
SMIRKS-based parameters.
.. warning
Parameter handler objects can only belong to a single :class:`ForceField` object.
If you need to create a copy to attach to a different :class:`ForceField` object,
use ``create_copy()``.
.. warning :: This API is experimental and subject to change.
"""
_TAGNAME = None # str of section type handled by this ParameterHandler (XML element name for SMIRNOFF XML representation)
_INFOTYPE = None # container class with type information that will be stored in self._parameters
_OPENMMTYPE = None # OpenMM Force class (or None if no equivalent)
_DEPENDENCIES = (
None # list of ParameterHandler classes that must precede this, or None
)
_KWARGS = [] # Kwargs to catch when create_force is called
_SMIRNOFF_VERSION_INTRODUCED = (
0.0 # the earliest version of SMIRNOFF spec that supports this ParameterHandler
)
_SMIRNOFF_VERSION_DEPRECATED = (
None # if deprecated, the first SMIRNOFF version number it is no longer used
)
_MIN_SUPPORTED_SECTION_VERSION = 0.3
_MAX_SUPPORTED_SECTION_VERSION = 0.3
version = ParameterAttribute()
@version.converter
def version(self, attr, new_version):
"""
Raise a parsing exception if the given section version is unsupported.
Raises
------
SMIRNOFFVersionError if an incompatible version is passed in.
"""
import packaging.version
from openff.toolkit.typing.engines.smirnoff import SMIRNOFFVersionError
# Use PEP-440 compliant version number comparison, if requested
if (
packaging.version.parse(str(new_version))
> packaging.version.parse(str(self._MAX_SUPPORTED_SECTION_VERSION))
) or (
packaging.version.parse(str(new_version))
< packaging.version.parse(str(self._MIN_SUPPORTED_SECTION_VERSION))
):
raise SMIRNOFFVersionError(
f"SMIRNOFF offxml file was written with version {new_version}, but this version "
f"of ForceField only supports version {self._MIN_SUPPORTED_SECTION_VERSION} "
f"to version {self._MAX_SUPPORTED_SECTION_VERSION}"
)
return new_version
def __init__(
self, allow_cosmetic_attributes=False, skip_version_check=False, **kwargs
):
"""
Initialize a ParameterHandler, optionally with a list of parameters and other kwargs.
Parameters
----------
allow_cosmetic_attributes : bool, optional. Default = False
Whether to permit non-spec kwargs. If True, non-spec kwargs will be stored as attributes of this object
and can be accessed and modified. Otherwise an exception will be raised if a non-spec kwarg is encountered.
skip_version_check: bool, optional. Default = False
If False, the SMIRNOFF section version will not be checked, and the ParameterHandler will be initialized
with version set to _MAX_SUPPORTED_SECTION_VERSION.
**kwargs : dict
The dict representation of the SMIRNOFF data source
"""
# Skip version check if requested.
if "version" not in kwargs:
if skip_version_check:
kwargs["version"] = self._MAX_SUPPORTED_SECTION_VERSION
else:
raise SMIRNOFFSpecError(
f"Missing version while trying to construct {self.__class__}. "
f"0.3 SMIRNOFF spec requires each parameter section to have its own version."
)
# List of ParameterType objects (also behaves like an OrderedDict where keys are SMARTS).
self._parameters = ParameterList()
# Initialize ParameterAttributes and cosmetic attributes.
super().__init__(allow_cosmetic_attributes=allow_cosmetic_attributes, **kwargs)
def _add_parameters(self, section_dict, allow_cosmetic_attributes=False):
"""
Extend the ParameterList in this ParameterHandler using a SMIRNOFF data source.
Parameters
----------
section_dict : dict
The dict representation of a SMIRNOFF data source containing parameters to att to this ParameterHandler
allow_cosmetic_attributes : bool, optional. Default = False
Whether to allow non-spec fields in section_dict. If True, non-spec kwargs will be stored as an
attribute of the parameter. If False, non-spec kwargs will raise an exception.
"""
unitless_kwargs, attached_units = extract_serialized_units_from_dict(
section_dict
)
smirnoff_data = attach_units(unitless_kwargs, attached_units)
for key, val in smirnoff_data.items():
if self._INFOTYPE is not None:
element_name = self._INFOTYPE._ELEMENT_NAME
# Skip sections that aren't the parameter list
if key != element_name:
break
# If there are multiple parameters, this will be a list. If there's just one, make it a list
if not (isinstance(val, list)):
val = [val]
# If we're reading the parameter list, iterate through and attach units to
# each parameter_dict, then use it to initialize a ParameterType
for unitless_param_dict in val:
param_dict = attach_units(unitless_param_dict, attached_units)
new_parameter = self._INFOTYPE(
**param_dict, allow_cosmetic_attributes=allow_cosmetic_attributes
)
self._parameters.append(new_parameter)
@property
def parameters(self):
"""The ParameterList that holds this ParameterHandler's parameter objects"""
return self._parameters
@property
def TAGNAME(self):
"""
The name of this ParameterHandler corresponding to the SMIRNOFF tag name
Returns
-------
handler_name : str
The name of this parameter handler
"""
return self._TAGNAME
# TODO: Do we need to return these, or can we handle this internally
@property
def known_kwargs(self):
"""List of kwargs that can be parsed by the function."""
# TODO: Should we use introspection to inspect the function signature instead?
return set(self._KWARGS)
def check_handler_compatibility(self, handler_kwargs):
"""
Checks if a set of kwargs used to create a ParameterHandler are compatible with this ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
handler_kwargs : dict
The kwargs that would be used to construct
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
pass
# TODO: Can we ensure SMIRKS and other parameters remain valid after manipulation?
def add_parameter(
self, parameter_kwargs=None, parameter=None, after=None, before=None
):
"""Add a parameter to the force field, ensuring all parameters are valid.
Parameters
----------
parameter_kwargs: dict, optional
The kwargs to pass to the ParameterHandler.INFOTYPE (a ParameterType) constructor
parameter: ParameterType, optional
A ParameterType to add to the ParameterHandler
after : str or int, optional
The SMIRKS pattern (if str) or index (if int) of the parameter directly before where
the new parameter will be added
before : str, optional
The SMIRKS pattern (if str) or index (if int) of the parameter directly after where
the new parameter will be added
Note that one of (parameter_kwargs, parameter) must be specified
Note that when `before` and `after` are both None, the new parameter will be appended
to the END of the parameter list.
Note that when `before` and `after` are both specified, the new parameter
will be added immediately after the parameter matching the `after` pattern or index.
Examples
--------
Add a ParameterType to an existing ParameterList at a specified position.
Given an existing parameter handler and a new parameter to add to it:
>>> from simtk import unit
>>> bh = BondHandler(skip_version_check=True)
>>> length = 1.5 * unit.angstrom
>>> k = 100 * unit.kilocalorie_per_mole / unit.angstrom ** 2
>>> bh.add_parameter({'smirks': '[*:1]-[*:2]', 'length': length, 'k': k, 'id': 'b1'})
>>> bh.add_parameter({'smirks': '[*:1]=[*:2]', 'length': length, 'k': k, 'id': 'b2'})
>>> bh.add_parameter({'smirks': '[*:1]#[*:2]', 'length': length, 'k': k, 'id': 'b3'})
>>> [p.id for p in bh.parameters]
['b1', 'b2', 'b3']
>>> param = {'smirks': '[#1:1]-[#6:2]', 'length': length, 'k': k, 'id': 'b4'}
Add a new parameter immediately after the parameter with the smirks '[*:1]=[*:2]'
>>> bh.add_parameter(param, after='[*:1]=[*:2]')
>>> [p.id for p in bh.parameters]
['b1', 'b2', 'b4', 'b3']
"""
for val in [before, after]:
if val and not isinstance(val, (str, int)):
raise TypeError
# If a dict was passed, construct it; if a ParameterType was passed, do nothing
if parameter_kwargs:
new_parameter = self._INFOTYPE(**parameter_kwargs)
elif parameter:
new_parameter = parameter
else:
raise ValueError("One of (parameter, parameter_kwargs) must be specified")
if new_parameter.smirks in [p.smirks for p in self._parameters]:
msg = f"A parameter SMIRKS pattern {new_parameter.smirks} already exists."
raise DuplicateParameterError(msg)
if before is not None:
if isinstance(before, str):
before_index = self._parameters.index(before)
elif isinstance(before, int):
before_index = before
if after is not None:
if isinstance(after, str):
after_index = self._parameters.index(after)
elif isinstance(after, int):
after_index = after
if None not in (before, after):
if after_index > before_index:
raise ValueError("before arg must be before after arg")
if after is not None:
self._parameters.insert(after_index + 1, new_parameter)
elif before is not None:
self._parameters.insert(before_index, new_parameter)
else:
self._parameters.append(new_parameter)
def get_parameter(self, parameter_attrs):
"""
Return the parameters in this ParameterHandler that match the parameter_attrs argument.
When multiple attrs are passed, parameters that have any (not all) matching attributes
are returned.
Parameters
----------
parameter_attrs : dict of {attr: value}
The attrs mapped to desired values (for example {"smirks": "[*:1]~[#16:2]=,:[#6:3]~[*:4]", "id": "t105"} )
Returns
-------
params : list of ParameterType objects
A list of matching ParameterType objects
Examples
--------
Create a parameter handler and populate it with some data.
>>> from simtk import unit
>>> handler = BondHandler(skip_version_check=True)
>>> handler.add_parameter(
... {
... 'smirks': '[*:1]-[*:2]',
... 'length': 1*unit.angstrom,
... 'k': 10*unit.kilocalorie_per_mole/unit.angstrom**2,
... }
... )
Look up, from this handler, all parameters matching some SMIRKS pattern
>>> handler.get_parameter({'smirks': '[*:1]-[*:2]'})
[<BondType with smirks: [*:1]-[*:2] length: 1 A k: 10 kcal/(A**2 mol) >]
"""
params = list()
for attr, value in parameter_attrs.items():
for param in self.parameters:
if param in params:
continue
# TODO: Cleaner accessing of cosmetic attributes
# See issue #338
if param.attribute_is_cosmetic(attr):
attr = "_" + attr
if hasattr(param, attr):
if getattr(param, attr) == value:
params.append(param)
return params
class _Match:
"""Represents a ParameterType which has been matched to
a given chemical environment.
"""
@property
def parameter_type(self):
"""ParameterType: The matched parameter type."""
return self._parameter_type
@property
def environment_match(self):
"""Topology._ChemicalEnvironmentMatch: The environment which matched the type."""
return self._environment_match
def __init__(self, parameter_type, environment_match):
"""Constructs a new ParameterHandlerMatch object.
Parameters
----------
parameter_type: ParameterType
The matched parameter type.
environment_match: Topology._ChemicalEnvironmentMatch
The environment which matched the type.
"""
self._parameter_type = parameter_type
self._environment_match = environment_match
def find_matches(self, entity):
"""Find the elements of the topology/molecule matched by a parameter type.
Parameters
----------
entity : openff.toolkit.topology.Topology
Topology to search.
Returns
---------
matches : ValenceDict[Tuple[int], ParameterHandler._Match]
``matches[particle_indices]`` is the ``ParameterType`` object
matching the tuple of particle indices in ``entity``.
"""
# TODO: Right now, this method is only ever called with an entity that is a Topology.
# Should we reduce its scope and have a check here to make sure entity is a Topology?
return self._find_matches(entity)
def _find_matches(self, entity, transformed_dict_cls=ValenceDict):
"""Implement find_matches() and allow using a difference valence dictionary.
Parameters
----------
entity : openff.toolkit.topology.Topology
Topology to search.
transformed_dict_cls: class
The type of dictionary to store the matches in. This
will determine how groups of atom indices are stored
and accessed (e.g for angles indices should be 0-1-2
and not 2-1-0).
Returns
---------
matches : `transformed_dict_cls` of ParameterHandlerMatch
``matches[particle_indices]`` is the ``ParameterType`` object
matching the tuple of particle indices in ``entity``.
"""
logger.debug("Finding matches for {}".format(self.__class__.__name__))
matches = transformed_dict_cls()
# TODO: There are probably performance gains to be had here
# by performing this loop in reverse order, and breaking early once
# all environments have been matched.
for parameter_type in self._parameters:
matches_for_this_type = {}
for environment_match in entity.chemical_environment_matches(
parameter_type.smirks
):
# Update the matches for this parameter type.
handler_match = self._Match(parameter_type, environment_match)
matches_for_this_type[
environment_match.topology_atom_indices
] = handler_match
# Update matches of all parameter types.
matches.update(matches_for_this_type)
logger.debug(
"{:64} : {:8} matches".format(
parameter_type.smirks, len(matches_for_this_type)
)
)
logger.debug("{} matches identified".format(len(matches)))
return matches
@staticmethod
def _assert_correct_connectivity(match, expected_connectivity=None):
"""A more performant version of the `topology.assert_bonded` method
to ensure that the results of `_find_matches` are valid.
Raises
------
ValueError
Raise an exception when the atoms in the match don't have
the correct connectivity.
Parameters
----------
match: ParameterHandler._Match
The match found by `_find_matches`
connectivity: list of tuple of int, optional
The expected connectivity of the match (e.g. for a torsion
expected_connectivity=[(0, 1), (1, 2), (2, 3)]). If `None`,
a connectivity of [(0, 1), ... (n - 1, n)] is assumed.
"""
# I'm not 100% sure this is really necessary... but this should do
# the same checks as the more costly assert_bonded method in the
# ParameterHandler.create_force methods.
if expected_connectivity is None:
return
reference_molecule = match.environment_match.reference_molecule
for connectivity in expected_connectivity:
atom_i = match.environment_match.reference_atom_indices[connectivity[0]]
atom_j = match.environment_match.reference_atom_indices[connectivity[1]]
reference_molecule.get_bond_between(atom_i, atom_j)
def assign_parameters(self, topology, system):
"""Assign parameters for the given Topology to the specified OpenMM ``System`` object.
Parameters
----------
topology : openff.toolkit.topology.Topology
The Topology for which parameters are to be assigned.
Either a new Force will be created or parameters will be appended to an existing Force.
system : simtk.openmm.System
The OpenMM System object to add the Force (or append new parameters) to.
"""
pass
def postprocess_system(self, topology, system, **kwargs):
"""Allow the force to perform a a final post-processing pass on the OpenMM ``System`` following parameter assignment, if needed.
Parameters
----------
topology : openff.toolkit.topology.Topology
The Topology for which parameters are to be assigned.
Either a new Force will be created or parameters will be appended to an existing Force.
system : simtk.openmm.System
The OpenMM System object to add the Force (or append new parameters) to.
"""
pass
def to_dict(self, discard_cosmetic_attributes=False):
"""
Convert this ParameterHandler to an OrderedDict, compliant with the SMIRNOFF data spec.
Parameters
----------
discard_cosmetic_attributes : bool, optional. Default = False.
Whether to discard non-spec parameter and header attributes in this ParameterHandler.
Returns
-------
smirnoff_data : OrderedDict
SMIRNOFF-spec compliant representation of this ParameterHandler and its internal ParameterList.
"""
smirnoff_data = OrderedDict()
# Populate parameter list
parameter_list = self._parameters.to_list(
discard_cosmetic_attributes=discard_cosmetic_attributes
)
# NOTE: This assumes that a ParameterHandler will have just one homogenous ParameterList under it
if self._INFOTYPE is not None:
# smirnoff_data[self._INFOTYPE._ELEMENT_NAME] = unitless_parameter_list
smirnoff_data[self._INFOTYPE._ELEMENT_NAME] = parameter_list
# Collect parameter and cosmetic attributes.
header_attribute_dict = super().to_dict(
discard_cosmetic_attributes=discard_cosmetic_attributes
)
smirnoff_data.update(header_attribute_dict)
return smirnoff_data
# -------------------------------
# Utilities for children classes.
# -------------------------------
@classmethod
def _check_all_valence_terms_assigned(
cls,
assigned_terms,
valence_terms,
exception_cls=UnassignedValenceParameterException,
):
"""Check that all valence terms have been assigned and print a user-friendly error message.
Parameters
----------
assigned_terms : ValenceDict
Atom index tuples defining added valence terms.
valence_terms : Iterable[TopologyAtom] or Iterable[Iterable[TopologyAtom]]
Atom or atom tuples defining topological valence terms.
exception_cls : UnassignedValenceParameterException
A specific exception class to raise to allow catching only specific
types of errors.
"""
from openff.toolkit.topology import TopologyAtom
# Provided there are no duplicates in either list,
# or something weird like a bond has been added to
# a torsions list - this should work just fine I think.
# If we expect either of those assumptions to be incorrect,
# (i.e len(not_found_terms) > 0) we have bigger issues
# in the code and should be catching those cases elsewhere!
# The fact that we graph match all topol molecules to ref
# molecules should avoid the len(not_found_terms) > 0 case.
if len(assigned_terms) == len(valence_terms):
return
# Convert the valence term to a valence dictionary to make sure
# the order of atom indices doesn't matter for comparison.
valence_terms_dict = assigned_terms.__class__()
for atoms in valence_terms:
try:
# valence_terms is a list of TopologyAtom tuples.
atom_indices = (a.topology_particle_index for a in atoms)
except TypeError:
# valence_terms is a list of TopologyAtom.
atom_indices = (atoms.topology_particle_index,)
valence_terms_dict[atom_indices] = atoms
# Check that both valence dictionaries have the same keys (i.e. terms).
assigned_terms_set = set(assigned_terms.keys())
valence_terms_set = set(valence_terms_dict.keys())
unassigned_terms = valence_terms_set.difference(assigned_terms_set)
not_found_terms = assigned_terms_set.difference(valence_terms_set)
# Raise an error if there are unassigned terms.
err_msg = ""
if len(unassigned_terms) > 0:
unassigned_topology_atom_tuples = []
# Gain access to the relevant topology
if type(valence_terms[0]) is TopologyAtom:
topology = valence_terms[0].topology_molecule.topology
else:
topology = valence_terms[0][0].topology_molecule.topology
unassigned_str = ""
for unassigned_tuple in unassigned_terms:
unassigned_str += "\n- Topology indices " + str(unassigned_tuple)
unassigned_str += ": names and elements "
unassigned_topology_atoms = []
# Pull and add additional helpful info on missing terms
for atom_idx in unassigned_tuple:
topology_atom = topology.atom(atom_idx)
unassigned_topology_atoms.append(topology_atom)
unassigned_str += f"({topology_atom.atom.name} {topology_atom.atom.element.symbol}), "
unassigned_topology_atom_tuples.append(tuple(unassigned_topology_atoms))
err_msg += (
"{parameter_handler} was not able to find parameters for the following valence terms:\n"
"{unassigned_str}"
).format(parameter_handler=cls.__name__, unassigned_str=unassigned_str)
if len(not_found_terms) > 0:
if err_msg != "":
err_msg += "\n"
not_found_str = "\n- ".join([str(x) for x in not_found_terms])
err_msg += (
"{parameter_handler} assigned terms that were not found in the topology:\n"
"- {not_found_str}"
).format(parameter_handler=cls.__name__, not_found_str=not_found_str)
if err_msg != "":
err_msg += "\n"
exception = exception_cls(err_msg)
exception.unassigned_topology_atom_tuples = unassigned_topology_atom_tuples
exception.handler_class = cls
raise exception
def _check_attributes_are_equal(
self, other, identical_attrs=(), tolerance_attrs=(), tolerance=1e-6
):
"""Utility function to check that the given attributes of the two handlers are equal.
Parameters
----------
identical_attrs : List[str]
Names of the parameters that must be checked with the equality operator.
tolerance_attrs : List[str]
Names of the parameters that must be equal up to a tolerance.
tolerance : float
The absolute tolerance used to compare the parameters.
"""
def get_unitless_values(attr):
this_val = getattr(self, attr)
other_val = getattr(other, attr)
# Strip quantities of their units before comparison.
try:
u = this_val.unit
except AttributeError:
return this_val, other_val
return this_val / u, other_val / u
for attr in identical_attrs:
this_val, other_val = get_unitless_values(attr)
if this_val != other_val:
raise IncompatibleParameterError(
"{} values are not identical. "
"(handler value: {}, incompatible value: {}".format(
attr, this_val, other_val
)
)
for attr in tolerance_attrs:
this_val, other_val = get_unitless_values(attr)
if abs(this_val - other_val) > tolerance:
raise IncompatibleParameterError(
"Difference between '{}' values is beyond allowed tolerance {}. "
"(handler value: {}, incompatible value: {}".format(
attr, tolerance, this_val, other_val
)
)
@staticmethod
def check_partial_bond_orders_from_molecules_duplicates(pb_mols):
if len(set(map(Molecule.to_smiles, pb_mols))) < len(pb_mols):
raise ValueError(
"At least two user-provided fractional bond order "
"molecules are isomorphic"
)
@staticmethod
def assign_partial_bond_orders_from_molecules(topology, pbo_mols):
# for each reference molecule in our topology, we'll walk through the provided partial bond order molecules
# if we find a match, we'll apply the partial bond orders and skip to the next molecule
for ref_mol in topology.reference_molecules:
for pbo_mol in pbo_mols:
# we are as stringent as we are in the ElectrostaticsHandler
# TODO: figure out whether bond order matching is redundant with aromatic matching
isomorphic, topology_atom_map = Molecule.are_isomorphic(
ref_mol,
pbo_mol,
return_atom_map=True,
aromatic_matching=True,
formal_charge_matching=True,
bond_order_matching=True,
atom_stereochemistry_matching=True,
bond_stereochemistry_matching=True,
)
# if matching, assign bond orders and skip to next molecule
# first match wins
if isomorphic:
# walk through bonds on reference molecule
for bond in ref_mol.bonds:
# use atom mapping to translate to pbo_molecule bond
pbo_bond = pbo_mol.get_bond_between(
topology_atom_map[bond.atom1_index],
topology_atom_map[bond.atom2_index],
)
# extract fractional bond order
# assign fractional bond order to reference molecule bond
if pbo_bond.fractional_bond_order is None:
raise ValueError(
f"Molecule '{ref_mol}' was requested to be parameterized "
f"with user-provided fractional bond orders from '{pbo_mol}', but not "
"all bonds were provided with `fractional_bond_order` specified"
)
bond.fractional_bond_order = pbo_bond.fractional_bond_order
break
# not necessary, but explicit
else:
continue
def __getitem__(self, val):
"""
Syntax sugar for lookikng up a ParameterType in a ParameterHandler
based on its SMIRKS.
"""
return self.parameters[val]
# =============================================================================================
class ConstraintHandler(ParameterHandler):
"""Handle SMIRNOFF ``<Constraints>`` tags
``ConstraintHandler`` must be applied before ``BondHandler`` and ``AngleHandler``,
since those classes add constraints for which equilibrium geometries are needed from those tags.
.. warning :: This API is experimental and subject to change.
"""
class ConstraintType(ParameterType):
"""A SMIRNOFF constraint type
.. warning :: This API is experimental and subject to change.
"""
_VALENCE_TYPE = "Bond"
_ELEMENT_NAME = "Constraint"
distance = ParameterAttribute(default=None, unit=unit.angstrom)
_TAGNAME = "Constraints"
_INFOTYPE = ConstraintType
_OPENMMTYPE = None # don't create a corresponding OpenMM Force class
def create_force(self, system, topology, **kwargs):
constraint_matches = self.find_matches(topology)
for (atoms, constraint_match) in constraint_matches.items():
# Update constrained atom pairs in topology
# topology.add_constraint(*atoms, constraint.distance)
# If a distance is specified (constraint.distance != True), add the constraint here.
# Otherwise, the equilibrium bond length will be used to constrain the atoms in HarmonicBondHandler
constraint = constraint_match.parameter_type
if constraint.distance is None:
topology.add_constraint(*atoms, True)
else:
system.addConstraint(*atoms, constraint.distance)
topology.add_constraint(*atoms, constraint.distance)
# =============================================================================================
class BondHandler(ParameterHandler):
"""Handle SMIRNOFF ``<Bonds>`` tags
.. warning :: This API is experimental and subject to change.
"""
class BondType(ParameterType):
"""A SMIRNOFF bond type
.. warning :: This API is experimental and subject to change.
"""
# ChemicalEnvironment valence type string expected by SMARTS string for this Handler
_VALENCE_TYPE = "Bond"
_ELEMENT_NAME = "Bond"
length = ParameterAttribute(default=None, unit=unit.angstrom)
k = ParameterAttribute(
default=None, unit=unit.kilocalorie_per_mole / unit.angstrom ** 2
)
# fractional bond order params
length_bondorder = MappedParameterAttribute(default=None, unit=unit.angstrom)
k_bondorder = MappedParameterAttribute(
default=None, unit=unit.kilocalorie_per_mole / unit.angstrom ** 2
)
def __init__(self, **kwargs):
# these checks enforce mutually-exclusive parameterattribute specifications
has_k = "k" in kwargs.keys()
has_k_bondorder = any(["k_bondorder" in key for key in kwargs.keys()])
has_length = "length" in kwargs.keys()
has_length_bondorder = any(
["length_bondorder" in key for key in kwargs.keys()]
)
# Are these errors too general? What about ParametersMissingError/ParametersOverspecifiedError?
if has_k:
if has_k_bondorder:
raise SMIRNOFFSpecError(
"BOTH k and k_bondorder* cannot be specified simultaneously."
)
else:
if not has_k_bondorder:
raise SMIRNOFFSpecError(
"Either k or k_bondorder* must be specified."
)
if has_length:
if has_length_bondorder:
raise SMIRNOFFSpecError(
"BOTH length and length_bondorder* cannot be specified simultaneously."
)
else:
if not has_length_bondorder:
raise SMIRNOFFSpecError(
"Either length or length_bondorder* must be specified."
)
super().__init__(**kwargs)
_TAGNAME = "Bonds" # SMIRNOFF tag name to process
_INFOTYPE = BondType # class to hold force type info
_OPENMMTYPE = openmm.HarmonicBondForce # OpenMM force class to create
_DEPENDENCIES = [ConstraintHandler] # ConstraintHandler must be executed first
_MAX_SUPPORTED_SECTION_VERSION = 0.4
# Use the _allow_only filter here because this class's implementation contains all the information about supported
# potentials for this handler.
potential = ParameterAttribute(
default="overridden in init",
converter=_allow_only(["harmonic", "(k/2)*(r-length)^2"]),
)
# The default value for fractional_bondorder_method depends on the section version and is overwritten in __init__.
# Do not use the allow_only filter here since ToolkitWrappers may be imported that support additional fractional
# bondorder methods.
fractional_bondorder_method = ParameterAttribute(default="overridden in init")
# Use the _allow_only filter here because this class's implementation contains all the information about supported
# interpolation types.
fractional_bondorder_interpolation = ParameterAttribute(
default="linear", converter=_allow_only(["linear"])
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Default value for fractional_bondorder_interpolation depends on section version
if self.version == 0.3 and "fractional_bondorder_interpolation" not in kwargs:
self.fractional_bondorder_method = "none"
elif self.version == 0.4 and "fractional_bondorder_interpolation" not in kwargs:
self.fractional_bondorder_method = "AM1-Wiberg"
# Default value for potential depends on section version
if self.version == 0.3 and "potential" not in kwargs:
self.potential = "harmonic"
elif self.version == 0.4 and "potential" not in kwargs:
self.potential = "(k/2)*(r-length)^2"
def check_handler_compatibility(self, other_handler):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
string_attrs_to_compare = [
"fractional_bondorder_method",
"fractional_bondorder_interpolation",
]
self._check_attributes_are_equal(
other_handler, identical_attrs=string_attrs_to_compare
)
# potential="harmonic" and potential="(k/2)*(r-length)^2" should be considered identical
self_has_harmonic_potential = (
self.potential == "harmonic" or self.potential == "(k/2)*(r-length)^2"
)
other_has_harmonic_potential = (
other_handler.potential == "harmonic"
or other_handler.potential == "(k/2)*(r-length)^2"
)
if not (self_has_harmonic_potential and other_has_harmonic_potential):
if self.potential != other_handler.potential:
raise IncompatibleParameterError(
f"potential values are not identical. "
f"(handler value: {self.potential}, incompatible value: {other_handler.potential}"
)
def create_force(self, system, topology, **kwargs):
# Create or retrieve existing OpenMM Force object
# TODO: The commented line below should replace the system.getForce search
# force = super(BondHandler, self).create_force(system, topology, **kwargs)
existing = [system.getForce(i) for i in range(system.getNumForces())]
existing = [f for f in existing if type(f) == self._OPENMMTYPE]
if len(existing) == 0:
force = self._OPENMMTYPE()
system.addForce(force)
else:
force = existing[0]
# Do not trust previously-calculated partial bond orders, since we don't know
# what method was used to assign them
# TODO: Jeff tried implementing a way to mark how bond orders were assigned on the
# topology, but realized that there's already a hierarchy of assignment
# methods. That is, if a molecule was assigned using PBOs_from_mols, then
# a different fractional bondorder method SHOULD NOT attempt
# recalculation, whereas if the previous method was simply DIFFERENT,
# then the old results should be erased/cached and overwritten with the
# new ones. It will be easier to handle this at the level of caching
# the results of molecule.assign_fractional_bond_orders
for top_bond in topology.topology_bonds:
top_bond.bond.fractional_bond_order = None
# check whether any of the reference molecules in the topology
# are in the partial_bond_orders_from_molecules list
if "partial_bond_orders_from_molecules" in kwargs:
# check whether molecules in the partial_bond_orders_from_molecules
# list have any duplicates
self.check_partial_bond_orders_from_molecules_duplicates(
kwargs["partial_bond_orders_from_molecules"]
)
self.assign_partial_bond_orders_from_molecules(
topology, kwargs["partial_bond_orders_from_molecules"]
)
# Add all bonds to the system.
bond_matches = self.find_matches(topology)
skipped_constrained_bonds = (
0 # keep track of how many bonds were constrained (and hence skipped)
)
for (topology_atom_indices, bond_match) in bond_matches.items():
# Get corresponding particle indices in Topology
# particle_indices = tuple([ atom.particle_index for atom in atoms ])
# Ensure atoms are actually bonded correct pattern in Topology
self._assert_correct_connectivity(bond_match)
# topology.assert_bonded(atoms[0], atoms[1])
bond_params = bond_match.parameter_type
match = bond_match.environment_match
# Compute equilibrium bond length and spring constant.
bond = match.reference_molecule.get_bond_between(
*match.reference_atom_indices
)
length_requires_interpolation = (
getattr(bond_params, "length_bondorder", None) is not None
)
k_requires_interpolation = (
getattr(bond_params, "k_bondorder", None) is not None
)
# Calculate fractional bond orders for this molecule only if needed.
if (
length_requires_interpolation or k_requires_interpolation
) and bond.fractional_bond_order is None:
toolkit_registry = kwargs.get(
"toolkit_registry", GLOBAL_TOOLKIT_REGISTRY
)
match.reference_molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_registry,
bond_order_model=self.fractional_bondorder_method.lower(),
)
if not length_requires_interpolation:
length = bond_params.length
else:
# Interpolate length using fractional bond orders
bond_order = bond.fractional_bond_order
if self.fractional_bondorder_interpolation == "linear":
if len(bond_params.length_bondorder) < 2:
raise SMIRNOFFSpecError(
"In order to use bond order interpolation, 2 or more parameters "
f"must be present. Found {len(bond_params.length_bondorder)} parameters."
)
length = _linear_inter_or_extrapolate(
points_dict=bond_params.length_bondorder,
x_query=bond_order,
)
else:
# TODO: This code is effectively unreachable due to the the _allow_only converter used in this
# ParameterAttribute's definition, which only allows "linear". Remove?
raise FractionalBondOrderInterpolationMethodUnsupportedError(
"Fractional bondorder interpolation method {} is not implemented.".format(
self.fractional_bondorder_interpolation
)
)
if not k_requires_interpolation:
k = bond_params.k
else:
# Interpolate k using fractional bond orders
bond_order = bond.fractional_bond_order
if self.fractional_bondorder_interpolation == "linear":
if len(bond_params.k_bondorder) < 2:
raise SMIRNOFFSpecError(
"In order to use bond order interpolation, 2 or more parameters "
f"must be present. Found {len(bond_params.k_bondorder)} parameters."
)
k = _linear_inter_or_extrapolate(
points_dict=bond_params.k_bondorder,
x_query=bond_order,
)
else:
# TODO: This code is effectively unreachable due to the the _allow_only converter used in this
# ParameterAttribute's definition, which only allows "linear". Remove?
raise FractionalBondOrderInterpolationMethodUnsupportedError(
"Fractional bondorder interpolation method {} is not implemented.".format(
self.fractional_bondorder_interpolation
)
)
# If this pair of atoms is subject to a constraint, only use the length
is_constrained = topology.is_constrained(*topology_atom_indices)
if not is_constrained:
# Add harmonic bond to HarmonicBondForce
force.addBond(*topology_atom_indices, length, k)
else:
# Handle constraints.
# Atom pair is constrained; we don't need to add a bond term.
skipped_constrained_bonds += 1
# Check if we need to add the constraint here to the equilibrium bond length.
if is_constrained is True:
# Mark that we have now assigned a specific constraint distance to this constraint.
topology.add_constraint(*topology_atom_indices, length)
# Add the constraint to the System.
system.addConstraint(*topology_atom_indices, length)
# system.addConstraint(*particle_indices, length)
logger.info(
"{} bonds added ({} skipped due to constraints)".format(
len(bond_matches) - skipped_constrained_bonds, skipped_constrained_bonds
)
)
# Check that no topological bonds are missing force parameters.
valence_terms = [list(b.atoms) for b in topology.topology_bonds]
self._check_all_valence_terms_assigned(
assigned_terms=bond_matches,
valence_terms=valence_terms,
exception_cls=UnassignedBondParameterException,
)
# =============================================================================================
class AngleHandler(ParameterHandler):
"""Handle SMIRNOFF ``<AngleForce>`` tags
.. warning :: This API is experimental and subject to change.
"""
class AngleType(ParameterType):
"""A SMIRNOFF angle type.
.. warning :: This API is experimental and subject to change.
"""
_VALENCE_TYPE = "Angle" # ChemicalEnvironment valence type string expected by SMARTS string for this Handler
_ELEMENT_NAME = "Angle"
angle = ParameterAttribute(unit=unit.degree)
k = ParameterAttribute(unit=unit.kilocalorie_per_mole / unit.degree ** 2)
_TAGNAME = "Angles" # SMIRNOFF tag name to process
_INFOTYPE = AngleType # class to hold force type info
_OPENMMTYPE = openmm.HarmonicAngleForce # OpenMM force class to create
_DEPENDENCIES = [ConstraintHandler] # ConstraintHandler must be executed first
potential = ParameterAttribute(default="harmonic")
def check_handler_compatibility(self, other_handler):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
string_attrs_to_compare = ["potential"]
self._check_attributes_are_equal(
other_handler, identical_attrs=string_attrs_to_compare
)
def create_force(self, system, topology, **kwargs):
# force = super(AngleHandler, self).create_force(system, topology, **kwargs)
existing = [system.getForce(i) for i in range(system.getNumForces())]
existing = [f for f in existing if type(f) == self._OPENMMTYPE]
if len(existing) == 0:
force = self._OPENMMTYPE()
system.addForce(force)
else:
force = existing[0]
# Add all angles to the system.
angle_matches = self.find_matches(topology)
skipped_constrained_angles = (
0 # keep track of how many angles were constrained (and hence skipped)
)
for (atoms, angle_match) in angle_matches.items():
# Ensure atoms are actually bonded correct pattern in Topology
# for (i, j) in [(0, 1), (1, 2)]:
# topology.assert_bonded(atoms[i], atoms[j])
self._assert_correct_connectivity(angle_match)
if (
topology.is_constrained(atoms[0], atoms[1])
and topology.is_constrained(atoms[1], atoms[2])
and topology.is_constrained(atoms[0], atoms[2])
):
# Angle is constrained; we don't need to add an angle term.
skipped_constrained_angles += 1
continue
angle = angle_match.parameter_type
force.addAngle(*atoms, angle.angle, angle.k)
logger.info(
"{} angles added ({} skipped due to constraints)".format(
len(angle_matches) - skipped_constrained_angles,
skipped_constrained_angles,
)
)
# Check that no topological angles are missing force parameters
self._check_all_valence_terms_assigned(
assigned_terms=angle_matches,
valence_terms=list(topology.angles),
exception_cls=UnassignedAngleParameterException,
)
# =============================================================================================
# TODO: There's a lot of duplicated code in ProperTorsionHandler and ImproperTorsionHandler
class ProperTorsionHandler(ParameterHandler):
"""Handle SMIRNOFF ``<ProperTorsionForce>`` tags
.. warning :: This API is experimental and subject to change.
"""
class ProperTorsionType(ParameterType):
"""A SMIRNOFF torsion type for proper torsions.
.. warning :: This API is experimental and subject to change.
"""
_VALENCE_TYPE = "ProperTorsion"
_ELEMENT_NAME = "Proper"
periodicity = IndexedParameterAttribute(converter=int)
phase = IndexedParameterAttribute(unit=unit.degree)
k = IndexedParameterAttribute(default=None, unit=unit.kilocalorie_per_mole)
idivf = IndexedParameterAttribute(default=None, converter=float)
# fractional bond order params
k_bondorder = IndexedMappedParameterAttribute(
default=None, unit=unit.kilocalorie_per_mole
)
_TAGNAME = "ProperTorsions" # SMIRNOFF tag name to process
_KWARGS = ["partial_bond_orders_from_molecules"]
_INFOTYPE = ProperTorsionType # info type to store
_OPENMMTYPE = openmm.PeriodicTorsionForce # OpenMM force class to create
_MAX_SUPPORTED_SECTION_VERSION = 0.4
potential = ParameterAttribute(
default="k*(1+cos(periodicity*theta-phase))",
converter=_allow_only(["k*(1+cos(periodicity*theta-phase))"]),
)
default_idivf = ParameterAttribute(default="auto")
fractional_bondorder_method = ParameterAttribute(default="AM1-Wiberg")
fractional_bondorder_interpolation = ParameterAttribute(
default="linear", converter=_allow_only(["linear"])
)
def check_handler_compatibility(self, other_handler):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
float_attrs_to_compare = []
string_attrs_to_compare = [
"potential",
"fractional_bondorder_method",
"fractional_bondorder_interpolation",
]
if self.default_idivf == "auto":
string_attrs_to_compare.append("default_idivf")
else:
float_attrs_to_compare.append("default_idivf")
self._check_attributes_are_equal(
other_handler,
identical_attrs=string_attrs_to_compare,
tolerance_attrs=float_attrs_to_compare,
)
def create_force(self, system, topology, **kwargs):
# force = super(ProperTorsionHandler, self).create_force(system, topology, **kwargs)
existing = [system.getForce(i) for i in range(system.getNumForces())]
existing = [f for f in existing if type(f) == self._OPENMMTYPE]
if len(existing) == 0:
force = self._OPENMMTYPE()
system.addForce(force)
else:
force = existing[0]
# Do not trust previously-calculated partial bond orders, since we don't know
# what method was used to assign them
# TODO: Jeff tried implementing a way to mark how bond orders were assigned on the
# topology, but realized that there's already a hierarchy of assignment
# methods. That is, if a molecule was assigned using PBOs_from_mols, then
# a different fractional bondorder method SHOULD NOT attempt
# recalculation, whereas if the previous method was simply DIFFERENT,
# then the old results should be erased/cached and overwritten with the
# new ones. It will be easier to handle this at the level of caching
# the results of molecule.assign_fractional_bond_orders
for top_bond in topology.topology_bonds:
top_bond.bond.fractional_bond_order = None
# check whether any of the reference molecules in the topology
# are in the partial_bond_orders_from_molecules list
if "partial_bond_orders_from_molecules" in kwargs:
# check whether molecules in the partial_bond_orders_from_molecules
# list have any duplicates
self.check_partial_bond_orders_from_molecules_duplicates(
kwargs["partial_bond_orders_from_molecules"]
)
self.assign_partial_bond_orders_from_molecules(
topology, kwargs["partial_bond_orders_from_molecules"]
)
# find all proper torsions for which we have parameters
# operates on reference molecules in topology
# but gives back matches for atoms for instance molecules
torsion_matches = self.find_matches(topology)
for (atom_indices, torsion_match) in torsion_matches.items():
# Ensure atoms are actually bonded correct pattern in Topology
# Currently does nothing
self._assert_correct_connectivity(torsion_match)
if torsion_match.parameter_type.k_bondorder is None:
# TODO: add a check here that we have same number of terms for
# `kX_bondorder*`, `periodicityX`, `phaseX`
# only count a given `kX_bondorder*` once
# assign torsion with no interpolation
self._assign_torsion(atom_indices, torsion_match, force)
else:
# TODO: add a check here that we have same number of terms for
# `kX_bondorder*`, `periodicityX`, `phaseX`
# only count a given `kX_bondorder*` once
# assign torsion with interpolation
self._assign_fractional_bond_orders(
atom_indices, torsion_match, force, **kwargs
)
logger.info("{} torsions added".format(len(torsion_matches)))
# Check that no topological torsions are missing force parameters
# I can see the appeal of these kind of methods as an 'absolute' check
# that things have gone well, but I think just making sure that the
# reference molecule has been fully parametrised should have the same
# effect! It would be good to eventually refactor things so that everything
# is focused on the single unique molecules, and then simply just cloned
# onto the system. It seems like John's proposed System object would do
# exactly this.
self._check_all_valence_terms_assigned(
assigned_terms=torsion_matches,
valence_terms=list(topology.propers),
exception_cls=UnassignedProperTorsionParameterException,
)
def _assign_torsion(self, atom_indices, torsion_match, force):
torsion_params = torsion_match.parameter_type
for (periodicity, phase, k, idivf) in zip(
torsion_params.periodicity,
torsion_params.phase,
torsion_params.k,
torsion_params.idivf,
):
if idivf == "auto":
# TODO: Implement correct "auto" behavior
raise NotImplementedError(
"The OpenForceField toolkit hasn't implemented "
"support for the torsion `idivf` value of 'auto'"
)
force.addTorsion(
atom_indices[0],
atom_indices[1],
atom_indices[2],
atom_indices[3],
periodicity,
phase,
k / idivf,
)
def _assign_fractional_bond_orders(
self, atom_indices, torsion_match, force, **kwargs
):
from openff.toolkit.utils.toolkits import GLOBAL_TOOLKIT_REGISTRY
torsion_params = torsion_match.parameter_type
match = torsion_match.environment_match
for (periodicity, phase, k_bondorder, idivf) in zip(
torsion_params.periodicity,
torsion_params.phase,
torsion_params.k_bondorder,
torsion_params.idivf,
):
if len(k_bondorder) < 2:
raise ValueError(
"At least 2 bond order values required for `k_bondorder`; "
"got {}".format(len(k_bondorder))
)
if idivf == "auto":
# TODO: Implement correct "auto" behavior
raise NotImplementedError(
"The OpenForceField toolkit hasn't implemented "
"support for the torsion `idivf` value of 'auto'"
)
# get central bond for reference molecule
central_bond = match.reference_molecule.get_bond_between(
match.reference_atom_indices[1], match.reference_atom_indices[2]
)
# if fractional bond order not calculated yet, we calculate it
# should only happen once per reference molecule for which we care
# about fractional bond interpolation
# and not at all for reference molecules we don't
if central_bond.fractional_bond_order is None:
toolkit_registry = kwargs.get(
"toolkit_registry", GLOBAL_TOOLKIT_REGISTRY
)
match.reference_molecule.assign_fractional_bond_orders(
toolkit_registry=toolkit_registry,
bond_order_model=self.fractional_bondorder_method.lower(),
)
# scale k based on the bondorder of the central bond
if self.fractional_bondorder_interpolation == "linear":
# we only interpolate on k
k = _linear_inter_or_extrapolate(
k_bondorder, central_bond.fractional_bond_order
)
else:
# TODO: This code is effectively unreachable due to the the _allow_only converter used in this
# ParameterAttribute's definition, which only allows "linear". Remove?
raise FractionalBondOrderInterpolationMethodUnsupportedError(
"Fractional bondorder interpolation method {} is not implemented.".format(
self.fractional_bondorder_interpolation
)
)
# add a torsion with given parameters for topology atoms
force.addTorsion(
atom_indices[0],
atom_indices[1],
atom_indices[2],
atom_indices[3],
periodicity,
phase,
k / idivf,
)
# TODO: There's a lot of duplicated code in ProperTorsionHandler and ImproperTorsionHandler
class ImproperTorsionHandler(ParameterHandler):
"""Handle SMIRNOFF ``<ImproperTorsionForce>`` tags
.. warning :: This API is experimental and subject to change.
"""
class ImproperTorsionType(ParameterType):
"""A SMIRNOFF torsion type for improper torsions.
.. warning :: This API is experimental and subject to change.
"""
_VALENCE_TYPE = "ImproperTorsion"
_ELEMENT_NAME = "Improper"
periodicity = IndexedParameterAttribute(converter=int)
phase = IndexedParameterAttribute(unit=unit.degree)
k = IndexedParameterAttribute(unit=unit.kilocalorie_per_mole)
idivf = IndexedParameterAttribute(default=None, converter=float)
_TAGNAME = "ImproperTorsions" # SMIRNOFF tag name to process
_INFOTYPE = ImproperTorsionType # info type to store
_OPENMMTYPE = openmm.PeriodicTorsionForce # OpenMM force class to create
potential = ParameterAttribute(
default="k*(1+cos(periodicity*theta-phase))",
converter=_allow_only(["k*(1+cos(periodicity*theta-phase))"]),
)
default_idivf = ParameterAttribute(default="auto")
def check_handler_compatibility(self, other_handler):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
float_attrs_to_compare = []
string_attrs_to_compare = ["potential"]
if self.default_idivf == "auto":
string_attrs_to_compare.append("default_idivf")
else:
float_attrs_to_compare.append("default_idivf")
self._check_attributes_are_equal(
other_handler,
identical_attrs=string_attrs_to_compare,
tolerance_attrs=float_attrs_to_compare,
)
def find_matches(self, entity):
"""Find the improper torsions in the topology/molecule matched by a parameter type.
Parameters
----------
entity : openff.toolkit.topology.Topology
Topology to search.
Returns
---------
matches : ImproperDict[Tuple[int], ParameterHandler._Match]
``matches[atom_indices]`` is the ``ParameterType`` object
matching the 4-tuple of atom indices in ``entity``.
"""
return self._find_matches(entity, transformed_dict_cls=ImproperDict)
def create_force(self, system, topology, **kwargs):
# force = super(ImproperTorsionHandler, self).create_force(system, topology, **kwargs)
# force = super().create_force(system, topology, **kwargs)
existing = [system.getForce(i) for i in range(system.getNumForces())]
existing = [f for f in existing if type(f) == openmm.PeriodicTorsionForce]
if len(existing) == 0:
force = openmm.PeriodicTorsionForce()
system.addForce(force)
else:
force = existing[0]
# Add all improper torsions to the system
improper_matches = self.find_matches(topology)
for (atom_indices, improper_match) in improper_matches.items():
# Ensure atoms are actually bonded correct pattern in Topology
# For impropers, central atom is atom 1
# for (i, j) in [(0, 1), (1, 2), (1, 3)]:
# topology.assert_bonded(atom_indices[i], atom_indices[j])
self._assert_correct_connectivity(improper_match, [(0, 1), (1, 2), (1, 3)])
improper = improper_match.parameter_type
# TODO: This is a lazy hack. idivf should be set according to the ParameterHandler's default_idivf attrib
if improper.idivf is None:
improper.idivf = [3 for item in improper.k]
# Impropers are applied in three paths around the trefoil having the same handedness
for (
improper_periodicity,
improper_phase,
improper_k,
improper_idivf,
) in zip(improper.periodicity, improper.phase, improper.k, improper.idivf):
# TODO: Implement correct "auto" behavior
if improper_idivf == "auto":
improper_idivf = 3
logger.warning(
"The OpenForceField toolkit hasn't implemented "
"support for the torsion `idivf` value of 'auto'."
"Currently assuming a value of '3' for impropers."
)
# Permute non-central atoms
others = [atom_indices[0], atom_indices[2], atom_indices[3]]
# ((0, 1, 2), (1, 2, 0), and (2, 0, 1)) are the three paths around the trefoil
for p in [
(others[i], others[j], others[k])
for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]
]:
# The torsion force gets added three times, since the k is divided by three
force.addTorsion(
atom_indices[1],
p[0],
p[1],
p[2],
improper_periodicity,
improper_phase,
improper_k / improper_idivf,
)
logger.info(
"{} impropers added, each applied in a six-fold trefoil".format(
len(improper_matches)
)
)
class _NonbondedHandler(ParameterHandler):
"""Base class for ParameterHandlers that deal with OpenMM NonbondedForce objects."""
_OPENMMTYPE = openmm.NonbondedForce
def create_force(self, system, topology, **kwargs):
# If we aren't yet keeping track of which molecules' charges have been assigned by which charge methods,
# initialize a dict for that here.
# TODO: This should be an attribute of the _system_, not the _topology_. However, since we're still using
# OpenMM's System class, I am storing this data on the OFF Topology until we make an OFF System class.
if not hasattr(topology, "_ref_mol_to_charge_method"):
topology._ref_mol_to_charge_method = {
ref_mol: None for ref_mol in topology.reference_molecules
}
# Retrieve the system's OpenMM NonbondedForce
existing = [system.getForce(i) for i in range(system.getNumForces())]
existing = [f for f in existing if type(f) == self._OPENMMTYPE]
# If there isn't yet one, initialize it and populate it with particles
if len(existing) == 0:
force = self._OPENMMTYPE()
system.addForce(force)
# Create all particles.
for _ in topology.topology_particles:
force.addParticle(0.0, 1.0, 0.0)
else:
force = existing[0]
return force
def mark_charges_assigned(self, ref_mol, topology):
"""
Record that charges have been assigned for a reference molecule.
Parameters
----------
ref_mol : openff.toolkit.topology.Molecule
The molecule to mark as having charges assigned
topology : openff.toolkit.topology.Topology
The topology to record this information on.
"""
# TODO: Change this to interface with system object instead of topology once we move away from OMM's System
topology._ref_mol_to_charge_method[ref_mol] = self.__class__
@staticmethod
def check_charges_assigned(ref_mol, topology):
"""
Check whether charges have been assigned for a reference molecule.
Parameters
----------
ref_mol : openff.toolkit.topology.Molecule
The molecule to check for having charges assigned
topology : openff.toolkit.topology.Topology
The topology to query for this information
Returns
-------
charges_assigned : bool
Whether charges have already been assigned to this molecule
"""
# TODO: Change this to interface with system object instead of topology once we move away from OMM's System
return topology._ref_mol_to_charge_method[ref_mol] is not None
class vdWHandler(_NonbondedHandler):
"""Handle SMIRNOFF ``<vdW>`` tags
.. warning :: This API is experimental and subject to change.
"""
class vdWType(ParameterType):
"""A SMIRNOFF vdWForce type.
.. warning :: This API is experimental and subject to change.
"""
_VALENCE_TYPE = "Atom" # ChemicalEnvironment valence type expected for SMARTS
_ELEMENT_NAME = "Atom"
epsilon = ParameterAttribute(unit=unit.kilocalorie_per_mole)
sigma = ParameterAttribute(default=None, unit=unit.angstrom)
rmin_half = ParameterAttribute(default=None, unit=unit.angstrom)
def __init__(self, **kwargs):
sigma = kwargs.get("sigma", None)
rmin_half = kwargs.get("rmin_half", None)
if (sigma is None) and (rmin_half is None):
raise SMIRNOFFSpecError("Either sigma or rmin_half must be specified.")
if (sigma is not None) and (rmin_half is not None):
raise SMIRNOFFSpecError(
"BOTH sigma and rmin_half cannot be specified simultaneously."
)
super().__init__(**kwargs)
if sigma:
self._extra_nb_var = "rmin_half"
if rmin_half:
self._extra_nb_var = "sigma"
def __setattr__(self, name, value):
super().__setattr__(key=name, value=value)
if name == "rmin_half":
if type(value) == str:
value = object_to_quantity(value)
super().__setattr__("sigma", 2.0 * value / 2 ** (1 / 6))
self._extra_nb_var = "sigma"
if name == "sigma":
if type(value) == str:
value = object_to_quantity(value)
super().__setattr__("rmin_half", value * 2 ** (1 / 6) / 2.0)
self._extra_nb_var = "rmin_half"
def to_dict(
self,
discard_cosmetic_attributes=False,
duplicate_attributes=None,
):
return super().to_dict(
discard_cosmetic_attributes=discard_cosmetic_attributes,
duplicate_attributes=[self._extra_nb_var],
)
_TAGNAME = "vdW" # SMIRNOFF tag name to process
_INFOTYPE = vdWType # info type to store
# _KWARGS = ['ewaldErrorTolerance',
# 'useDispersionCorrection',
# 'usePbc'] # Kwargs to catch when create_force is called
potential = ParameterAttribute(
default="Lennard-Jones-12-6", converter=_allow_only(["Lennard-Jones-12-6"])
)
combining_rules = ParameterAttribute(
default="Lorentz-Berthelot", converter=_allow_only(["Lorentz-Berthelot"])
)
scale12 = ParameterAttribute(default=0.0, converter=float)
scale13 = ParameterAttribute(default=0.0, converter=float)
scale14 = ParameterAttribute(default=0.5, converter=float)
scale15 = ParameterAttribute(default=1.0, converter=float)
cutoff = ParameterAttribute(default=9.0 * unit.angstroms, unit=unit.angstrom)
switch_width = ParameterAttribute(default=1.0 * unit.angstroms, unit=unit.angstrom)
method = ParameterAttribute(
default="cutoff", converter=_allow_only(["cutoff", "PME"])
)
# TODO: Use _allow_only when ParameterAttribute will support multiple converters (it'll be easy when we switch to use the attrs library)
@scale12.converter
def scale12(self, attrs, new_scale12):
if new_scale12 != 0.0:
raise SMIRNOFFSpecError(
"Current OFF toolkit is unable to handle scale12 values other than 0.0. "
"Specified 1-2 scaling was {}".format(self.scale12)
)
return new_scale12
@scale13.converter
def scale13(self, attrs, new_scale13):
if new_scale13 != 0.0:
raise SMIRNOFFSpecError(
"Current OFF toolkit is unable to handle scale13 values other than 0.0. "
"Specified 1-3 scaling was {}".format(self.scale13)
)
return new_scale13
@scale15.converter
def scale15(self, attrs, new_scale15):
if new_scale15 != 1.0:
raise SMIRNOFFSpecError(
"Current OFF toolkit is unable to handle scale15 values other than 1.0. "
"Specified 1-5 scaling was {}".format(self.scale15)
)
return new_scale15
# Tolerance when comparing float attributes for handler compatibility.
_SCALETOL = 1e-5
def check_handler_compatibility(self, other_handler):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
float_attrs_to_compare = ["scale12", "scale13", "scale14", "scale15"]
string_attrs_to_compare = ["potential", "combining_rules", "method"]
unit_attrs_to_compare = ["cutoff"]
self._check_attributes_are_equal(
other_handler,
identical_attrs=string_attrs_to_compare,
tolerance_attrs=float_attrs_to_compare + unit_attrs_to_compare,
tolerance=self._SCALETOL,
)
def create_force(self, system, topology, **kwargs):
force = super().create_force(system, topology, **kwargs)
# If we're using PME, then the only possible openMM Nonbonded type is LJPME
if self.method == "PME":
# If we're given a nonperiodic box, we always set NoCutoff. Later we'll add support for CutoffNonPeriodic
if topology.box_vectors is None:
force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)
# if (topology.box_vectors is None):
# raise SMIRNOFFSpecError("If vdW method is PME, a periodic Topology "
# "must be provided")
else:
force.setNonbondedMethod(openmm.NonbondedForce.LJPME)
force.setCutoffDistance(9.0 * unit.angstrom)
force.setEwaldErrorTolerance(1.0e-4)
# If method is cutoff, then we currently support openMM's PME for periodic system and NoCutoff for nonperiodic
elif self.method == "cutoff":
# If we're given a nonperiodic box, we always set NoCutoff. Later we'll add support for CutoffNonPeriodic
if topology.box_vectors is None:
force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)
else:
force.setNonbondedMethod(openmm.NonbondedForce.PME)
force.setUseDispersionCorrection(True)
force.setCutoffDistance(self.cutoff)
# Iterate over all defined Lennard-Jones types, allowing later matches to override earlier ones.
atom_matches = self.find_matches(topology)
# Set the particle Lennard-Jones terms.
for atom_key, atom_match in atom_matches.items():
atom_idx = atom_key[0]
ljtype = atom_match.parameter_type
if ljtype.sigma is None:
sigma = 2.0 * ljtype.rmin_half / (2.0 ** (1.0 / 6.0))
else:
sigma = ljtype.sigma
force.setParticleParameters(atom_idx, 0.0, sigma, ljtype.epsilon)
# Check that no atoms (n.b. not particles) are missing force parameters.
self._check_all_valence_terms_assigned(
assigned_terms=atom_matches, valence_terms=list(topology.topology_atoms)
)
class ElectrostaticsHandler(_NonbondedHandler):
"""Handles SMIRNOFF ``<Electrostatics>`` tags.
.. warning :: This API is experimental and subject to change.
"""
_TAGNAME = "Electrostatics"
_DEPENDENCIES = [vdWHandler]
_KWARGS = ["charge_from_molecules", "allow_nonintegral_charges"]
scale12 = ParameterAttribute(default=0.0, converter=float)
scale13 = ParameterAttribute(default=0.0, converter=float)
scale14 = ParameterAttribute(default=0.833333, converter=float)
scale15 = ParameterAttribute(default=1.0, converter=float)
cutoff = ParameterAttribute(default=9.0 * unit.angstrom, unit=unit.angstrom)
switch_width = ParameterAttribute(default=0.0 * unit.angstrom, unit=unit.angstrom)
method = ParameterAttribute(
default="PME", converter=_allow_only(["Coulomb", "PME", "reaction-field"])
)
# TODO: Use _allow_only when ParameterAttribute will support multiple converters (it'll be easy when we switch to use the attrs library)
@scale12.converter
def scale12(self, attrs, new_scale12):
if new_scale12 != 0.0:
raise SMIRNOFFSpecError(
"Current OFF toolkit is unable to handle scale12 values other than 0.0. "
"Specified 1-2 scaling was {}".format(self.scale12)
)
return new_scale12
@scale13.converter
def scale13(self, attrs, new_scale13):
if new_scale13 != 0.0:
raise SMIRNOFFSpecError(
"Current OFF toolkit is unable to handle scale13 values other than 0.0. "
"Specified 1-3 scaling was {}".format(self.scale13)
)
return new_scale13
@scale15.converter
def scale15(self, attrs, new_scale15):
if new_scale15 != 1.0:
raise SMIRNOFFSpecError(
"Current OFF toolkit is unable to handle scale15 values other than 1.0. "
"Specified 1-5 scaling was {}".format(self.scale15)
)
return new_scale15
@switch_width.converter
def switch_width(self, attr, new_switch_width):
if self.switch_width != 0.0 * unit.angstrom:
raise IncompatibleParameterError(
"The current implementation of the Open Force Field Toolkit can not "
"support an electrostatic switching width. Currently only `0.0 angstroms` "
f"is supported (SMIRNOFF data specified {new_switch_width})"
)
# Tolerance when comparing float attributes for handler compatibility.
_SCALETOL = 1e-5
def check_handler_compatibility(self, other_handler):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
float_attrs_to_compare = ["scale12", "scale13", "scale14", "scale15"]
string_attrs_to_compare = ["method"]
unit_attrs_to_compare = ["cutoff", "switch_width"]
self._check_attributes_are_equal(
other_handler,
identical_attrs=string_attrs_to_compare,
tolerance_attrs=float_attrs_to_compare + unit_attrs_to_compare,
tolerance=self._SCALETOL,
)
def assign_charge_from_molecules(self, molecule, charge_mols):
"""
Given an input molecule, checks against a list of molecules for an isomorphic match. If found, assigns
partial charges from the match to the input molecule.
Parameters
----------
molecule : an openff.toolkit.topology.FrozenMolecule
The molecule to have partial charges assigned if a match is found.
charge_mols : list of [openff.toolkit.topology.FrozenMolecule]
A list of molecules with charges already assigned.
Returns
-------
match_found : bool
Whether a match was found. If True, the input molecule will have been modified in-place.
"""
import simtk.unit
# Check each charge_mol for whether it's isomorphic to the input molecule
for charge_mol in charge_mols:
ismorphic, topology_atom_map = Molecule.are_isomorphic(
molecule,
charge_mol,
return_atom_map=True,
aromatic_matching=True,
formal_charge_matching=True,
bond_order_matching=True,
atom_stereochemistry_matching=True,
bond_stereochemistry_matching=True,
)
# if they are isomorphic then use the mapping
if ismorphic:
# Take the first valid atom indexing map
# Set the partial charges
# Make a copy of the charge molecule's charges array (this way it's the right shape)
temp_mol_charges = copy.deepcopy(
simtk.unit.Quantity(charge_mol.partial_charges)
)
for charge_idx, ref_idx in topology_atom_map.items():
temp_mol_charges[ref_idx] = charge_mol.partial_charges[charge_idx]
molecule.partial_charges = temp_mol_charges
return True
# If no match was found, return False
return False
def create_force(self, system, topology, **kwargs):
from openff.toolkit.topology import TopologyAtom, TopologyVirtualSite
force = super().create_force(system, topology, **kwargs)
# See if each molecule should have charges assigned by the charge_from_molecules kwarg
for ref_mol in topology.reference_molecules:
# If charges were already assigned, skip this molecule
if self.check_charges_assigned(ref_mol, topology):
continue
# First, check whether any of the reference molecules in the topology are in the charge_from_mol list
charges_from_charge_mol = False
if "charge_from_molecules" in kwargs:
charges_from_charge_mol = self.assign_charge_from_molecules(
ref_mol, kwargs["charge_from_molecules"]
)
# If this reference molecule wasn't in the charge_from_molecules list, end this iteration
if not (charges_from_charge_mol):
continue
# Otherwise, the molecule is in the charge_from_molecules list, and we should assign charges to all
# instances of it in this topology.
for topology_molecule in topology._reference_molecule_to_topology_molecules[
ref_mol
]:
for topology_particle in topology_molecule.particles:
if type(topology_particle) is TopologyAtom:
ref_mol_particle_index = (
topology_particle.atom.molecule_particle_index
)
elif type(topology_particle) is TopologyVirtualSite:
ref_mol_particle_index = (
topology_particle.virtual_site.molecule_particle_index
)
else:
raise ValueError(
f"Particles of type {type(topology_particle)} are not supported"
)
topology_particle_index = topology_particle.topology_particle_index
particle_charge = ref_mol._partial_charges[ref_mol_particle_index]
# Retrieve nonbonded parameters for reference atom (charge not set yet)
_, sigma, epsilon = force.getParticleParameters(
topology_particle_index
)
# Set the nonbonded force with the partial charge
force.setParticleParameters(
topology_particle_index, particle_charge, sigma, epsilon
)
# Finally, mark that charges were assigned for this reference molecule
self.mark_charges_assigned(ref_mol, topology)
# Set the nonbonded method
current_nb_method = force.getNonbondedMethod()
# First, check whether the vdWHandler set the nonbonded method to LJPME, because that means
# that electrostatics also has to be PME
if (current_nb_method == openmm.NonbondedForce.LJPME) and (
self.method != "PME"
):
raise IncompatibleParameterError(
"In current Open Force Field Toolkit implementation, if vdW "
"treatment is set to LJPME, electrostatics must also be PME "
"(electrostatics treatment currently set to {}".format(self.method)
)
# Then, set nonbonded methods based on method keyword
if self.method == "PME":
# Check whether the topology is nonperiodic, in which case we always switch to NoCutoff
# (vdWHandler will have already set this to NoCutoff)
# TODO: This is an assumption right now, and a bad one. See issue #219
if topology.box_vectors is None:
assert current_nb_method == openmm.NonbondedForce.NoCutoff
force.setCutoffDistance(self.cutoff)
# raise IncompatibleParameterError("Electrostatics handler received PME method keyword, but a nonperiodic"
# " topology. Use of PME electrostatics requires a periodic topology.")
else:
if current_nb_method == openmm.NonbondedForce.LJPME:
pass
# There's no need to check for matching cutoff/tolerance here since both are hard-coded defaults
else:
force.setNonbondedMethod(openmm.NonbondedForce.PME)
force.setCutoffDistance(9.0 * unit.angstrom)
force.setEwaldErrorTolerance(1.0e-4)
# If vdWHandler set the nonbonded method to NoCutoff, then we don't need to change anything
elif self.method == "Coulomb":
if topology.box_vectors is None:
# (vdWHandler will have already set this to NoCutoff)
assert current_nb_method == openmm.NonbondedForce.NoCutoff
else:
raise IncompatibleParameterError(
"Electrostatics method set to Coulomb, and topology is periodic. "
"In the future, this will lead to use of OpenMM's CutoffPeriodic "
"Nonbonded force method, however this is not supported in the "
"current Open Force Field Toolkit."
)
# If the vdWHandler set the nonbonded method to PME, then ensure that it has the same cutoff
elif self.method == "reaction-field":
if topology.box_vectors is None:
raise SMIRNOFFSpecError(
"Electrostatics method reaction-field can only be applied to a periodic system."
)
else:
raise SMIRNOFFSpecUnimplementedError(
"Electrostatics method reaction-field is supported in the SMIRNOFF specification "
"but not yet implemented in the OpenFF Toolkit."
)
def postprocess_system(self, system, topology, **kwargs):
force = super().create_force(system, topology, **kwargs)
# Check to ensure all molecules have had charges assigned
uncharged_mols = []
for ref_mol in topology.reference_molecules:
if not self.check_charges_assigned(ref_mol, topology):
uncharged_mols.append(ref_mol)
if len(uncharged_mols) != 0:
msg = "The following molecules did not have charges assigned by any ParameterHandler in the ForceField:\n"
for ref_mol in uncharged_mols:
msg += f"{ref_mol.to_smiles()}\n"
raise UnassignedMoleculeChargeException(msg)
# Unless check is disabled, ensure that the sum of partial charges on a molecule
# add up to approximately its formal charge
allow_nonintegral_charges = kwargs.get("allow_nonintegral_charges", False)
for top_mol in topology.topology_molecules:
# Skip check if user manually disables it.
if allow_nonintegral_charges:
continue
formal_charge_sum = top_mol.reference_molecule.total_charge
partial_charge_sum = 0.0 * unit.elementary_charge
for top_particle in top_mol.particles:
q, _, _ = force.getParticleParameters(
top_particle.topology_particle_index
)
partial_charge_sum += q
if (
abs(formal_charge_sum - partial_charge_sum)
> 0.01 * unit.elementary_charge
):
msg = (
f"Partial charge sum ({partial_charge_sum}) "
f"for molecule '{top_mol.reference_molecule.name}' (SMILES "
f"{top_mol.reference_molecule.to_smiles()} does not equal formal charge sum "
f"({formal_charge_sum}). To override this error, provide the "
f"'allow_nonintegral_charges=True' keyword to ForceField.create_openmm_system"
)
raise NonintegralMoleculeChargeException(msg)
class LibraryChargeHandler(_NonbondedHandler):
"""Handle SMIRNOFF ``<LibraryCharges>`` tags
.. warning :: This API is experimental and subject to change.
"""
class LibraryChargeType(ParameterType):
"""A SMIRNOFF Library Charge type.
.. warning :: This API is experimental and subject to change.
"""
_VALENCE_TYPE = None # This disables the connectivity check when parsing LibraryChargeType objects
_ELEMENT_NAME = "LibraryCharge"
name = ParameterAttribute(default=None)
charge = IndexedParameterAttribute(unit=unit.elementary_charge)
def __init__(self, **kwargs):
super().__init__(**kwargs)
unique_tags, connectivity = GLOBAL_TOOLKIT_REGISTRY.call(
"get_tagged_smarts_connectivity", self.smirks
)
if len(self.charge) != len(unique_tags):
raise SMIRNOFFSpecError(
f"LibraryCharge {self} was initialized with unequal number of "
f"tagged atoms and charges"
)
_TAGNAME = "LibraryCharges" # SMIRNOFF tag name to process
_INFOTYPE = LibraryChargeType # info type to store
_DEPENDENCIES = [vdWHandler, ElectrostaticsHandler]
def find_matches(self, entity):
"""Find the elements of the topology/molecule matched by a parameter type.
Parameters
----------
entity : openff.toolkit.topology.Topology
Topology to search.
Returns
---------
matches : ValenceDict[Tuple[int], ParameterHandler._Match]
``matches[particle_indices]`` is the ``ParameterType`` object
matching the tuple of particle indices in ``entity``.
"""
# TODO: Right now, this method is only ever called with an entity that is a Topology.
# Should we reduce its scope and have a check here to make sure entity is a Topology?
return self._find_matches(entity, transformed_dict_cls=dict)
def create_force(self, system, topology, **kwargs):
force = super().create_force(system, topology, **kwargs)
# Iterate over all defined library charge parameters, allowing later matches to override earlier ones.
atom_matches = self.find_matches(topology)
# Create a set of all the topology atom indices for which library charges can be applied
assignable_atoms = set()
atom_assignments = dict()
# TODO: This assumes that later matches should always override earlier ones. This may require more
# thought, since matches can be partially overlapping
for topology_indices, library_charge in atom_matches.items():
for charge_idx, top_idx in enumerate(topology_indices):
if top_idx in assignable_atoms:
logger.debug(
f"Multiple library charge assignments found for atom {top_idx}"
)
assignable_atoms.add(top_idx)
atom_assignments[top_idx] = library_charge.parameter_type.charge[
charge_idx
]
# TODO: Should header include a residue separator delimiter? Maybe not, since it's not clear how having
# multiple LibraryChargeHandlers could return a single set of matches while respecting different
# separators.
# Keep track of the reference molecules that this successfully assigns charges to, so we can
# mark them and subsequent charge generation handlers won't override the values
ref_mols_assigned = set()
# Check to see whether the set contains any complete molecules, and remove the matches if not.
for top_mol in topology.topology_molecules:
# Make a temporary copy of ref_mol to assign charges from charge_mol
# If charges were already assigned, skip this molecule
if self.check_charges_assigned(top_mol.reference_molecule, topology):
continue
# Ensure all of the atoms in this mol are covered, otherwise skip it
top_particle_idxs = [atom.topology_particle_index for atom in top_mol.atoms]
if (
len(set(top_particle_idxs).intersection(assignable_atoms))
!= top_mol.n_atoms
):
logger.debug(
"Entire molecule is not covered. Skipping library charge assignment."
)
continue
# If we pass both tests above, go ahead and assign charges
# TODO: We could probably save a little time by looking up this TopologyMolecule's _reference molecule_
# and assigning charges to all other instances of it in this topology
for top_particle_idx in top_particle_idxs:
_, sigma, epsilon = force.getParticleParameters(top_particle_idx)
force.setParticleParameters(
top_particle_idx, atom_assignments[top_particle_idx], sigma, epsilon
)
ref_mols_assigned.add(top_mol.reference_molecule)
# Finally, mark that charges were assigned for this reference molecule
for assigned_mol in ref_mols_assigned:
self.mark_charges_assigned(assigned_mol, topology)
class ToolkitAM1BCCHandler(_NonbondedHandler):
"""Handle SMIRNOFF ``<ToolkitAM1BCC>`` tags
.. warning :: This API is experimental and subject to change.
"""
_TAGNAME = "ToolkitAM1BCC" # SMIRNOFF tag name to process
_DEPENDENCIES = [vdWHandler, ElectrostaticsHandler, LibraryChargeHandler]
_KWARGS = ["toolkit_registry"] # Kwargs to catch when create_force is called
def check_handler_compatibility(
self, other_handler, assume_missing_is_default=True
):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
pass
def create_force(self, system, topology, **kwargs):
import warnings
from openff.toolkit.topology import TopologyAtom, TopologyVirtualSite
from openff.toolkit.utils.toolkits import GLOBAL_TOOLKIT_REGISTRY
force = super().create_force(system, topology, **kwargs)
for ref_mol in topology.reference_molecules:
# If charges were already assigned, skip this molecule
if self.check_charges_assigned(ref_mol, topology):
continue
# If the molecule wasn't already assigned charge values, calculate them here
toolkit_registry = kwargs.get("toolkit_registry", GLOBAL_TOOLKIT_REGISTRY)
try:
# We don't need to generate conformers here, since that will be done by default in
# compute_partial_charges with am1bcc if the use_conformers kwarg isn't defined
ref_mol.assign_partial_charges(
partial_charge_method="am1bcc", toolkit_registry=toolkit_registry
)
except Exception as e:
warnings.warn(str(e), Warning)
continue
# Assign charges to relevant atoms
for topology_molecule in topology._reference_molecule_to_topology_molecules[
ref_mol
]:
for topology_particle in topology_molecule.atoms:
if type(topology_particle) is TopologyAtom:
ref_mol_particle_index = (
topology_particle.atom.molecule_particle_index
)
elif type(topology_particle) is TopologyVirtualSite:
ref_mol_particle_index = (
topology_particle.virtual_site.molecule_particle_index
)
else:
raise ValueError(
f"Particles of type {type(topology_particle)} are not supported"
)
topology_particle_index = topology_particle.topology_particle_index
particle_charge = ref_mol._partial_charges[ref_mol_particle_index]
# Retrieve nonbonded parameters for reference atom (charge not set yet)
_, sigma, epsilon = force.getParticleParameters(
topology_particle_index
)
# Set the nonbonded force with the partial charge
force.setParticleParameters(
topology_particle_index, particle_charge, sigma, epsilon
)
# Finally, mark that charges were assigned for this reference molecule
self.mark_charges_assigned(ref_mol, topology)
# TODO: Move chargeModel and library residue charges to SMIRNOFF spec
def postprocess_system(self, system, topology, **kwargs):
bond_matches = self.find_matches(topology)
# Apply bond charge increments to all appropriate force groups
# QUESTION: Should we instead apply this to the Topology in a preprocessing step, prior to spreading out charge onto virtual sites?
for force in system.getForces():
if force.__class__.__name__ in [
"NonbondedForce"
]: # TODO: We need to apply this to all Force types that involve charges, such as (Custom)GBSA forces and CustomNonbondedForce
for (atoms, bond_match) in bond_matches.items():
# Get corresponding particle indices in Topology
bond = bond_match.parameter_type
particle_indices = tuple([atom.particle_index for atom in atoms])
# Retrieve parameters
[charge0, sigma0, epsilon0] = force.getParticleParameters(
particle_indices[0]
)
[charge1, sigma1, epsilon1] = force.getParticleParameters(
particle_indices[1]
)
# Apply bond charge increment
charge0 -= bond.increment
charge1 += bond.increment
# Update charges
force.setParticleParameters(
particle_indices[0], charge0, sigma0, epsilon0
)
force.setParticleParameters(
particle_indices[1], charge1, sigma1, epsilon1
)
# TODO: Calculate exceptions
class ChargeIncrementModelHandler(_NonbondedHandler):
"""Handle SMIRNOFF ``<ChargeIncrementModel>`` tags
.. warning :: This API is experimental and subject to change.
"""
class ChargeIncrementType(ParameterType):
"""A SMIRNOFF bond charge correction type.
.. warning :: This API is experimental and subject to change.
"""
_VALENCE_TYPE = None # This disables the connectivity check when parsing LibraryChargeType objects
_ELEMENT_NAME = "ChargeIncrement"
charge_increment = IndexedParameterAttribute(unit=unit.elementary_charge)
def __init__(self, **kwargs):
super().__init__(**kwargs)
unique_tags, connectivity = GLOBAL_TOOLKIT_REGISTRY.call(
"get_tagged_smarts_connectivity", self.smirks
)
n_tags = len(unique_tags)
n_increments = len(self.charge_increment)
diff = n_tags - n_increments
if diff < 0 or diff > 1:
# TODO: Consider dealing with diff > 2 by smearing charges across
# all un-specified increments
raise SMIRNOFFSpecError(
f"ChargeIncrement {self} was initialized with an invalid combination "
f"of tagged atoms and charge increments"
)
_TAGNAME = "ChargeIncrementModel" # SMIRNOFF tag name to process
_INFOTYPE = ChargeIncrementType # info type to store
_DEPENDENCIES = [
vdWHandler,
ElectrostaticsHandler,
LibraryChargeHandler,
ToolkitAM1BCCHandler,
]
_MAX_SUPPORTED_SECTION_VERSION = 0.4
number_of_conformers = ParameterAttribute(default=1, converter=int)
partial_charge_method = ParameterAttribute(default="AM1-Mulliken", converter=str)
def check_handler_compatibility(
self, other_handler, assume_missing_is_default=True
):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
int_attrs_to_compare = ["number_of_conformers"]
string_attrs_to_compare = ["partial_charge_method"]
self._check_attributes_are_equal(
other_handler,
identical_attrs=string_attrs_to_compare + int_attrs_to_compare,
)
def find_matches(self, entity):
"""Find the elements of the topology/molecule matched by a parameter type.
Parameters
----------
entity : openff.toolkit.topology.Topology
Topology to search.
Returns
---------
matches : ValenceDict[Tuple[int], ParameterHandler._Match]
``matches[particle_indices]`` is the ``ParameterType`` object
matching the tuple of particle indices in ``entity``.
"""
# Using SortedDict here leads to the desired deduplication behavior, BUT it mangles the order
# of the atom indices in the keys. Thankfully, the Match objects that are values in `matches` contain
# `match.environment_match.topology_atom_indices`, which has the tuple in the correct order
matches = self._find_matches(entity, transformed_dict_cls=SortedDict)
return matches
def create_force(self, system, topology, **kwargs):
import warnings
from openff.toolkit.topology import TopologyAtom, TopologyVirtualSite
# We only want one instance of this force type
existing = [system.getForce(i) for i in range(system.getNumForces())]
existing = [f for f in existing if type(f) == self._OPENMMTYPE]
if len(existing) == 0:
force = self._OPENMMTYPE()
system.addForce(force)
else:
force = existing[0]
for ref_mol in topology.reference_molecules:
# If charges were already assigned, skip this molecule
if self.check_charges_assigned(ref_mol, topology):
continue
toolkit_registry = kwargs.get("toolkit_registry", GLOBAL_TOOLKIT_REGISTRY)
try:
# If the molecule wasn't assigned parameters from a manually-input charge_mol, calculate them here
ref_mol.generate_conformers(n_conformers=self.number_of_conformers)
ref_mol.assign_partial_charges(
partial_charge_method=self.partial_charge_method,
toolkit_registry=toolkit_registry,
)
except Exception as e:
warnings.warn(str(e), Warning)
continue
charges_to_assign = {}
# Assign initial, un-incremented charges to relevant atoms
for topology_molecule in topology._reference_molecule_to_topology_molecules[
ref_mol
]:
for topology_particle in topology_molecule.particles:
topology_particle_index = topology_particle.topology_particle_index
if type(topology_particle) is TopologyAtom:
ref_mol_particle_index = (
topology_particle.atom.molecule_particle_index
)
if type(topology_particle) is TopologyVirtualSite:
ref_mol_particle_index = (
topology_particle.virtual_site.molecule_particle_index
)
particle_charge = ref_mol._partial_charges[ref_mol_particle_index]
charges_to_assign[topology_particle_index] = particle_charge
# Find SMARTS-based matches for charge increments
charge_increment_matches = self.find_matches(topology)
# We ignore the atom index order in the keys here, since they have been
# sorted in order to deduplicate matches and let us identify when one parameter overwrites another
# in the SMIRNOFF parameter hierarchy. Since they are sorted, the position of the atom index
# in the key tuple DOES NOT correspond to the appropriate charge_incrementX value.
# Instead, the correct ordering of the match indices is found in
# charge_increment_match.environment_match.topology_atom_indices
for (_, charge_increment_match) in charge_increment_matches.items():
# Adjust the values in the charges_to_assign dict by adding any
# charge increments onto the existing values
atom_indices = (
charge_increment_match.environment_match.topology_atom_indices
)
charge_increments = copy.deepcopy(
charge_increment_match.parameter_type.charge_increment
)
# If we've been provided with one less charge increment value than tagged atoms, assume the last
# tagged atom offsets the charge of the others to make the chargeincrement net-neutral
if len(atom_indices) - len(charge_increments) == 1:
charge_increment_sum = 0.0 * unit.elementary_charge
for ci in charge_increments:
charge_increment_sum += ci
charge_increments.append(-charge_increment_sum)
elif len(atom_indices) - len(charge_increments) == 0:
pass
else:
raise SMIRNOFFSpecError(
f"Trying to apply chargeincrements {charge_increment_match.parameter_type} "
f"to tagged atoms {atom_indices}, but the number of chargeincrements "
f"must be either the same as- or one less than the number of tagged atoms."
)
for top_particle_idx, charge_increment in zip(
atom_indices, charge_increments
):
if top_particle_idx in charges_to_assign:
charges_to_assign[top_particle_idx] += charge_increment
# Set the incremented charges on the System particles
for topology_particle_index, charge_to_assign in charges_to_assign.items():
_, sigma, epsilon = force.getParticleParameters(topology_particle_index)
force.setParticleParameters(
topology_particle_index, charge_to_assign, sigma, epsilon
)
# Finally, mark that charges were assigned for this reference molecule
self.mark_charges_assigned(ref_mol, topology)
class GBSAHandler(ParameterHandler):
"""Handle SMIRNOFF ``<GBSA>`` tags
.. warning :: This API is experimental and subject to change.
"""
class GBSAType(ParameterType):
"""A SMIRNOFF GBSA type.
.. warning :: This API is experimental and subject to change.
"""
_VALENCE_TYPE = "Atom"
_ELEMENT_NAME = "Atom"
radius = ParameterAttribute(unit=unit.angstrom)
scale = ParameterAttribute(converter=float)
_TAGNAME = "GBSA"
_INFOTYPE = GBSAType
_OPENMMTYPE = openmm.GBSAOBCForce
# It's important that this runs AFTER partial charges are assigned to all particles, since this will need to
# collect and assign them to the GBSA particles
_DEPENDENCIES = [
vdWHandler,
ElectrostaticsHandler,
ToolkitAM1BCCHandler,
ChargeIncrementModelHandler,
LibraryChargeHandler,
]
gb_model = ParameterAttribute(
default="OBC1", converter=_allow_only(["HCT", "OBC1", "OBC2"])
)
solvent_dielectric = ParameterAttribute(default=78.5, converter=float)
solute_dielectric = ParameterAttribute(default=1, converter=float)
sa_model = ParameterAttribute(default="ACE", converter=_allow_only(["ACE", None]))
surface_area_penalty = ParameterAttribute(
default=5.4 * unit.calorie / unit.mole / unit.angstrom ** 2,
unit=unit.calorie / unit.mole / unit.angstrom ** 2,
)
solvent_radius = ParameterAttribute(default=1.4 * unit.angstrom, unit=unit.angstrom)
def _validate_parameters(self):
"""
Checks internal attributes, raising an exception if they are configured in an invalid way.
"""
# If we're using HCT via GBSAHCTForce(CustomAmberGBForceBase):, then we need to ensure that:
# surface_area_energy is 5.4 cal/mol/A^2
# solvent_radius is 1.4 A
# Justification at https://github.com/openforcefield/openff-toolkit/pull/363
if self.gb_model == "HCT":
if (
self.surface_area_penalty
!= 5.4 * unit.calorie / unit.mole / unit.angstrom ** 2
) and (self.sa_model is not None):
raise IncompatibleParameterError(
f"The current implementation of HCT GBSA does not "
f"support surface_area_penalty values other than 5.4 "
f"cal/mol A^2 (data source specified value of "
f"{self.surface_area_penalty})"
)
if (self.solvent_radius != 1.4 * unit.angstrom) and (
self.sa_model is not None
):
raise IncompatibleParameterError(
f"The current implementation of HCT GBSA does not "
f"support solvent_radius values other than 1.4 "
f"A (data source specified value of "
f"{self.solvent_radius})"
)
# If we're using OBC1 via GBSAOBC1Force(CustomAmberGBForceBase), then we need to ensure that:
# surface_area_energy is 5.4 cal/mol/A^2
# solvent_radius is 1.4 A
# Justification at https://github.com/openforcefield/openff-toolkit/pull/363
if self.gb_model == "OBC1":
if (
self.surface_area_penalty
!= 5.4 * unit.calorie / unit.mole / unit.angstrom ** 2
) and (self.sa_model is not None):
raise IncompatibleParameterError(
f"The current implementation of OBC1 GBSA does not "
f"support surface_area_penalty values other than 5.4 "
f"cal/mol A^2 (data source specified value of "
f"{self.surface_area_penalty})"
)
if (self.solvent_radius != 1.4 * unit.angstrom) and (
self.sa_model is not None
):
raise IncompatibleParameterError(
f"The current implementation of OBC1 GBSA does not "
f"support solvent_radius values other than 1.4 "
f"A (data source specified value of "
f"{self.solvent_radius})"
)
# If we're using OBC2 via GBSAOBCForce, then we need to ensure that
# solvent_radius is 1.4 A
# Justification at https://github.com/openforcefield/openff-toolkit/pull/363
if self.gb_model == "OBC2":
if (self.solvent_radius != 1.4 * unit.angstrom) and (
self.sa_model is not None
):
raise IncompatibleParameterError(
f"The current implementation of OBC1 GBSA does not "
f"support solvent_radius values other than 1.4 "
f"A (data source specified value of "
f"{self.solvent_radius})"
)
# Tolerance when comparing float attributes for handler compatibility.
_SCALETOL = 1e-5
def check_handler_compatibility(self, other_handler):
"""
Checks whether this ParameterHandler encodes compatible physics as another ParameterHandler. This is
called if a second handler is attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with existing parameters.
"""
float_attrs_to_compare = ["solvent_dielectric", "solute_dielectric"]
string_attrs_to_compare = ["gb_model", "sa_model"]
unit_attrs_to_compare = ["surface_area_penalty", "solvent_radius"]
self._check_attributes_are_equal(
other_handler,
identical_attrs=string_attrs_to_compare,
tolerance_attrs=float_attrs_to_compare + unit_attrs_to_compare,
tolerance=self._SCALETOL,
)
def create_force(self, system, topology, **kwargs):
import simtk
self._validate_parameters()
# Grab the existing nonbonded force (which will have particle charges)
existing = [system.getForce(i) for i in range(system.getNumForces())]
existing = [f for f in existing if type(f) == openmm.NonbondedForce]
assert len(existing) == 1
nonbonded_force = existing[0]
# No previous GBSAForce should exist, so we're safe just making one here.
force_map = {
"HCT": simtk.openmm.app.internal.customgbforces.GBSAHCTForce,
"OBC1": simtk.openmm.app.internal.customgbforces.GBSAOBC1Force,
"OBC2": simtk.openmm.GBSAOBCForce,
# It's tempting to do use the class below, but the customgbforce
# version of OBC2 doesn't provide setSolventRadius()
#'OBC2': simtk.openmm.app.internal.customgbforces.GBSAOBC2Force,
}
openmm_force_type = force_map[self.gb_model]
if nonbonded_force.getNonbondedMethod() == openmm.NonbondedForce.NoCutoff:
amber_cutoff = None
else:
amber_cutoff = nonbonded_force.getCutoffDistance().value_in_unit(
unit.nanometer
)
if self.gb_model == "OBC2":
gbsa_force = openmm_force_type()
else:
# We set these values in the constructor if we use the internal AMBER GBSA type wrapper
gbsa_force = openmm_force_type(
solventDielectric=self.solvent_dielectric,
soluteDielectric=self.solute_dielectric,
SA=self.sa_model,
cutoff=amber_cutoff,
kappa=0,
)
# WARNING: If using a CustomAmberGBForce, the functional form is affected by whether
# the cutoff kwarg is None *during initialization*. So, if you initialize it with a
# non-None value, and then try to change it to None, you're likely to get unphysical results.
# Set the GBSAForce to have the same cutoff as NonbondedForce
# gbsa_force.setCutoffDistance(nonbonded_force.getCutoffDistance())
if amber_cutoff is not None:
gbsa_force.setCutoffDistance(amber_cutoff)
if nonbonded_force.usesPeriodicBoundaryConditions():
# WARNING: The lines below aren't equivalent. The NonbondedForce and
# CustomGBForce NonbondedMethod enums have different meanings.
# More details:
# http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.NonbondedForce.html
# http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.GBSAOBCForce.html
# http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomGBForce.html
# gbsa_force.setNonbondedMethod(simtk.openmm.NonbondedForce.CutoffPeriodic)
gbsa_force.setNonbondedMethod(simtk.openmm.CustomGBForce.CutoffPeriodic)
else:
# gbsa_force.setNonbondedMethod(simtk.openmm.NonbondedForce.NoCutoff)
gbsa_force.setNonbondedMethod(simtk.openmm.CustomGBForce.NoCutoff)
# Add all GBSA terms to the system. Note that this will have been done above
if self.gb_model == "OBC2":
gbsa_force.setSolventDielectric(self.solvent_dielectric)
gbsa_force.setSoluteDielectric(self.solute_dielectric)
if self.sa_model is None:
gbsa_force.setSurfaceAreaEnergy(0)
else:
gbsa_force.setSurfaceAreaEnergy(self.surface_area_penalty)
# Iterate over all defined GBSA types, allowing later matches to override earlier ones.
atom_matches = self.find_matches(topology)
# Create all particles.
# !!! WARNING: CustomAmberGBForceBase expects different per-particle parameters
# depending on whether you use addParticle or setParticleParameters. In
# setParticleParameters, we have to apply the offset and scale BEFORE setting
# parameters, whereas in addParticle, the offset is applied automatically, and the particle
# parameters are not set until an auxillary finalize() method is called. !!!
# To keep it simple, we DO NOT pre-populate the particles in the GBSA force here.
# We call addParticle further below instead.
# These lines are commented out intentionally as an example of what NOT to do.
# for topology_particle in topology.topology_particles:
# gbsa_force.addParticle([0.0, 1.0, 0.0])
params_to_add = [[] for _ in topology.topology_particles]
for atom_key, atom_match in atom_matches.items():
atom_idx = atom_key[0]
gbsatype = atom_match.parameter_type
charge, _, _2 = nonbonded_force.getParticleParameters(atom_idx)
params_to_add[atom_idx] = [charge, gbsatype.radius, gbsatype.scale]
if self.gb_model == "OBC2":
for particle_param in params_to_add:
gbsa_force.addParticle(*particle_param)
else:
for particle_param in params_to_add:
gbsa_force.addParticle(particle_param)
# We have to call finalize() for models that inherit from CustomAmberGBForceBase,
# otherwise the added particles aren't actually passed to the underlying CustomGBForce
gbsa_force.finalize()
# Check that no atoms (n.b. not particles) are missing force parameters.
self._check_all_valence_terms_assigned(
assigned_terms=atom_matches, valence_terms=list(topology.topology_atoms)
)
system.addForce(gbsa_force)
class VirtualSiteHandler(_NonbondedHandler):
"""Handle SMIRNOFF ``<VirtualSites>`` tags
TODO: Add example usage/documentation
.. warning :: This API is experimental and subject to change.
"""
# Virtual Site exclusions policies
############################################################################
# none: do not add any exclusions
# minimal: only add exclusions between vsite particles and their "single"
# parent atom. This is the atom that the vsite's origin is defined as
# parents: only add exclusions between vsite particles and all of the
# associated parent atoms
# local: add exclusions between vsites that share exactly the same atoms.
# neighbors: add exclusions between vsites and atoms that share the same
# "clique" of virtual sites. For example, if 1-2-3 and 3-4-5 each have a
# vsite, then the vsite on 1-2-3 will be excluded from atoms 4 and 5
# since they share atom 3.
# connected: add exclusions between the vsite and all atoms connected to
# the parents, e.g the entire molecule making it an interaction only
# between two nonbonded fragments.
# all: exclude all interactions, effectively turning vsites off.
# Note: TODO: only up to parents is implemented!
class _ExclusionPolicy(Enum):
NONE = 1
MINIMAL = 2
PARENTS = 3
LOCAL = 4
NEIGHBORS = 5
CONNECTED = 6
ALL = 7
_parameter_to_policy = {
"none": _ExclusionPolicy.NONE,
"minimal": _ExclusionPolicy.MINIMAL,
"parents": _ExclusionPolicy.PARENTS,
"local": _ExclusionPolicy.LOCAL,
"neighbors": _ExclusionPolicy.NEIGHBORS,
"connected": _ExclusionPolicy.CONNECTED,
"all": _ExclusionPolicy.ALL,
}
exclusion_policy = ParameterAttribute(default="parents") # has custom converter
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._virtual_site_types = dict()
for vsite_cls in all_subclasses(self.__class__.VirtualSiteType):
# catch classes which are not actual implementations, which should return None
vtype = vsite_cls.vsite_type()
if vtype:
self.register_virtual_site_type(vtype, vsite_cls)
def add_parameter(
self, parameter_kwargs=None, parameter=None, after=None, before=None
):
"""Add a parameter to the force field, ensuring all parameters are valid.
This method differs from other handlers in that it uses a plugin-style
enable/disable type system
Parameters
----------
parameter_kwargs: dict, optional
The kwargs to pass to the ParameterHandler.INFOTYPE (a ParameterType) constructor
parameter: ParameterType, optional
A ParameterType to add to the ParameterHandler
after : str or int, optional
The SMIRKS pattern (if str) or index (if int) of the parameter directly before where
the new parameter will be added
before : str, optional
The SMIRKS pattern (if str) or index (if int) of the parameter directly after where
the new parameter will be added
Note that one of (parameter_kwargs, parameter) must be specified
Note that when `before` and `after` are both None, the new parameter will be appended
to the END of the parameter list.
Note that when `before` and `after` are both specified, the new parameter
will be added immediately after the parameter matching the `after` pattern or index.
"""
# TODO: This function need unit tests
for val in [before, after]:
if val and not isinstance(val, (str, int)):
raise TypeError
# If a dict was passed, construct it; if a ParameterType was passed, do nothing
if parameter_kwargs:
vsite_type = parameter_kwargs["type"]
if (
vsite_type in self._virtual_site_types
and self._virtual_site_types[vsite_type] is not None
):
new_parameter = self._virtual_site_types[vsite_type](**parameter_kwargs)
else:
raise ValueError(
f"Virtual site type {vsite_type} not enabled or implemented in this handler {self.__class__}"
)
elif parameter:
new_parameter = parameter
# As a convenience, if parameter type not present, register it
if parameter.type not in self._virtual_site_types:
self.register_virtual_site_type(parameter.type, type(parameter))
# additionally, if the type was previously disabled (set to None),
# reenable it with this new type
elif self._virtual_site_types.get(parameter.type, None) is None:
self.register_virtual_site_type(
parameter.type, type(parameter), replace=True
)
else:
raise ValueError("One of (parameter, parameter_kwargs) must be specified")
if (
(new_parameter.smirks in [p.smirks for p in self._parameters])
and (new_parameter.type in [p.type for p in self._parameters])
and (new_parameter.name in [p.name for p in self._parameters])
):
msg = f"A parameter SMIRKS pattern {new_parameter.smirks} already exists for type {new_parameter.type} and name {new_parameter.name}."
raise DuplicateParameterError(msg)
if before is not None:
if isinstance(before, str):
before_index = self._parameters.index(before)
elif isinstance(before, int):
before_index = before
if after is not None:
if isinstance(after, str):
after_index = self._parameters.index(after)
elif isinstance(after, int):
after_index = after
if None not in (before, after):
if after_index > before_index:
raise ValueError("before arg must be before after arg")
if after is not None:
self._parameters.insert(after_index + 1, new_parameter)
elif before is not None:
self._parameters.insert(before_index, new_parameter)
else:
self._parameters.append(new_parameter)
def _add_parameters(self, section_dict, allow_cosmetic_attributes=False):
"""
Extend the ParameterList in this VirtualSiteHandler using a SMIRNOFF data source.
Parameters
----------
section_dict : dict
The dict representation of a SMIRNOFF data source containing parameters to att to this VirtualSiteHandler
allow_cosmetic_attributes : bool, optional. Default = False
Whether to allow non-spec fields in section_dict. If True, non-spec kwargs will be stored as an
attribute of the parameter. If False, non-spec kwargs will raise an exception.
"""
# Most of this is exactly the same as the base _add_parameters. The only
# difference is how INFOTYPE is implemented, see the comment below
unitless_kwargs, attached_units = extract_serialized_units_from_dict(
section_dict
)
smirnoff_data = attach_units(unitless_kwargs, attached_units)
for key, val in smirnoff_data.items():
if self._INFOTYPE is not None:
element_name = self._INFOTYPE._ELEMENT_NAME
# Skip sections that aren't the parameter list
if key != element_name:
break
# If there are multiple parameters, this will be a list. If there's just one, make it a list
if not (isinstance(val, list)):
val = [val]
# If we're reading the parameter list, iterate through and attach units to
# each parameter_dict, then use it to initialize a ParameterType
for unitless_param_dict in val:
param_dict = attach_units(unitless_param_dict, attached_units)
# This differs from other handlers in that we use both a
# dynamic version of INFOTYPE, and also allow a plugin-style
# system where we allow visibility of virtual site types to
# control which ones are allowed to be activated
vsite_type = param_dict["type"]
if (
vsite_type in self._virtual_site_types
and self._virtual_site_types[vsite_type] is not None
):
new_parameter = self._virtual_site_types[vsite_type](
**param_dict,
allow_cosmetic_attributes=allow_cosmetic_attributes,
)
self._parameters.append(new_parameter)
else:
raise ValueError(
f"Virtual site type {vsite_type} not enabled or implemented in this handler {self.__class__}"
)
def register_virtual_site_type(self, vsite_name, vsite_cls, replace=False):
"""
Register an implemented virtual site type. Doing this must be done to
pass the validation and option checking. To disable a type, register the
name with None and replace=True
Parameters
----------
vsite_name : str
The name of the type. This name must be what is found in the "type"
attribute in the OFFXML format
vsite_cls : Any
The class to register the name with that implements the type.
Returns
-------
None
"""
if vsite_name in self._virtual_site_types and not replace:
raise DuplicateVirtualSiteTypeException(
"VirtualSite type {} already registered for handler {} and replace=False".format(
vsite_name, self.__class__
)
)
self._virtual_site_types[vsite_name] = vsite_cls
self._parameters = ParameterList(
[param for param in self._parameters if param.type != vsite_name]
)
@property
def virtual_site_types(self):
"""
Return the dictionary of registered virtual site types
Parameters
----------
None
Returns
-------
virtual_site_types : dict
A list of virtual site types already registered, paired with their
class that implements them
"""
return self._virtual_site_types
@exclusion_policy.converter
def exclusion_policy(self, attr, policy):
"""
Convert and validate the exclusion policy specified in the VirtualSiteHandler
Parameters
----------
attr : openff.toolkit.typing.engines.smirnoff.parameters.ParameterAttribute
The underlying ParameterAttribute
policy : Any
The policy name to validate
Returns
-------
policy : str
The policy name if it is valid
Raises
------
SMIRNOFFSpecError if the value of policy did not match the SMIRNOFF Specification
ValueError if policy cannot be converted to a string
.. warning :: This API is experimental and subject to change.
"""
try:
policy = str(policy)
except ValueError:
raise
_exclusion_policies_implemented = ["none", "minimal", "parents"]
if policy in _exclusion_policies_implemented:
return policy
else:
raise SMIRNOFFSpecError(
'VirtualSiteHander exclusion policy not understood. Set "exclusion_policy" to one of {}'.format(
_exclusion_policies_implemented
)
)
class _VirtualSiteTypeSelector:
"""A SMIRNOFF virtual site base selector
This is a placeholder class that will dynamically choose the correct
virtual site to create based on the type specified in attributes.
Normally, the OFFXML element explicity defines the type, but here it
depends on the type attribute as well, which needs the introspection
implemented here.
"""
_VALENCE_TYPE = None
# This is needed so VirtualSite elements are parsed correctly
# using this generic selector as a type
_ELEMENT_NAME = "VirtualSite"
_enable_types = {}
def __new__(cls, **attrs):
VSH = VirtualSiteHandler
vsite_type = attrs["type"]
if vsite_type == "BondCharge":
cls = VSH.VirtualSiteBondChargeType
elif vsite_type == "MonovalentLonePair":
cls = VSH.VirtualSiteMonovalentLonePairType
elif vsite_type == "DivalentLonePair":
cls = VSH.VirtualSiteDivalentLonePairType
elif vsite_type == "TrivalentLonePair":
cls = VSH.VirtualSiteTrivalentLonePairType
else:
raise SMIRNOFFSpecError(
'VirtualSite type not understood. Choose one of "BondCharge", "MonovalentLonePair", "DivalentLonePair", "TrivalentLonePair"'
)
return cls(**attrs)
class VirtualSiteType(vdWHandler.vdWType, abc.ABC):
"""A SMIRNOFF virtual site base type
.. warning :: This API is experimental and subject to change.
"""
# The attributes that we expect in the OFFXML
name = ParameterAttribute(default="EP", converter=str)
distance = ParameterAttribute(unit=unit.angstrom)
charge_increment = IndexedParameterAttribute(unit=unit.elementary_charge)
# Type has a delayed converter/validator to support a plugin-style enable/disable system
type = ParameterAttribute(converter=str)
match = ParameterAttribute(default="all_permutations") # has custom converter
epsilon = ParameterAttribute(
default=0.0 * unit.kilocalorie_per_mole, unit=unit.kilocalorie_per_mole
)
sigma = ParameterAttribute(default=0.0 * unit.angstrom, unit=unit.angstrom)
rmin_half = ParameterAttribute(default=None, unit=unit.angstrom)
# Here we define the default sorting behavior if we need to sort the
# atom key into a canonical ordering
transformed_dict_cls = ValenceDict
# Value of None indicates "not a valid type" or "not an actual implemented type".
# To enable/register a new virtual site type, make a subclass and set its
# _vsite_type to what would need to be provided in the OFFXML "type" attr,
# e.g. type="BondCharge" would mean _vsite_type="BondCharge"
_vsite_type = None
@classmethod
def vsite_type(cls):
"""
The type of this virtual site as represented in the SMIRNOFF specification
.. warning :: This API is experimental and subject to change.
"""
return cls._vsite_type
def __init__(self, **kwargs):
"""
Create a virtual site parameter type
"""
# Need to create default vdW parameters if not specified, since they are optional
sigma = kwargs.get("sigma", None)
rmin_half = kwargs.get("rmin_half", None)
if (sigma is None) and (rmin_half is None):
kwargs["sigma"] = 0.0 * unit.angstrom
if kwargs.get("epsilon", None) is None:
kwargs["epsilon"] = 0.0 * unit.kilocalorie_per_mole
super().__init__(**kwargs)
if sigma:
self._extra_nb_var = "rmin_half"
if rmin_half:
self._extra_nb_var = "sigma"
# @type.converter
# def type(self, attr, vsite_type):
# """
# Convert and validate the virtual site type specified in the VirtualSite element
# Parameters
# ----------
# attr : openff.toolkit.typing.engines.smirnoff.parameters.ParameterAttribute
# The underlying ParameterAttribute
# vsite_type : Any
# The virtual site type to validate
# Returns
# -------
# vsite_type : str
# The virtual site type if it is valid
# Raises
# ------
# SMIRNOFFSpecError if the value of policy did not match the SMIRNOFF Specification
# ValueError if policy cannot be converted to a string
# .. warning :: This API is experimental and subject to change.
# """
# try:
# vsite_type = str(vsite_type)
# except ValueError:
# raise
# if vsite_type in VirtualSiteHandler.virtual_site_types():
# return vsite_type
# else:
# valid_types = ", ".join(
# [str('"' + vtype + '"') for vtype in self._virtual_site_types]
# )
# raise SMIRNOFFSpecError(
# "VirtualSite not given a type. Set type to one of:\n" + valid_types
# )
@match.converter
def match(self, attr, match):
"""
Convert and validate the virtual site type specified in the VirtualSite element
Parameters
----------
match : Any
The virtual site type to validate
Returns
-------
match : str
The virtual site type if it is valid
Raises
------
SMIRNOFFSpecError
ValueError
.. warning :: This API is experimental and subject to change.
"""
try:
match = str(match)
except ValueError:
raise
if match == "once" or match == "all_permutations":
return match
else:
raise SMIRNOFFSpecError(
'VirtualSite type must specify "match" as either "once" or "all_permutations"'
)
def __eq__(self, obj):
if type(self) != type(obj):
return False
A = ["name"]
are_equal = [getattr(self, a) == getattr(obj, a) for a in A]
return all(are_equal)
def _add_virtual_site(self, fn, atoms, orientations, *args, **kwargs):
"""
Parameters
----------
fn : callable
The underlying OpenFF function that should be called to create
the virtual site in the toolkit. Currently, these are:
* `Molecule._add_bond_charge_virtual_site`
* `Molecule._add_monovalent_lone_pair_virtual_site`
* `Molecule._add_divalent_lone_pair_virtual_site`
* `Molecule._add_trivalent_lone_pair_virtual_site`
orientations : list of int tuples
The permutations corresponding to each virtual particle in the
virtual site.
Returns
-------
The index of the created virtual site
"""
args = [atoms, self.distance] + list(args)
# This needs to be dealt with better
# Since we cannot save state with this object during find_matches,
# we have no idea here which permutations actually matched.
# Since we are past the point where we can examine chemical
# environment matches to determine the possible orientations, we
# must default and take the explicit interpretation:
# "all_permutations" will try to make a virtual particle for every
# permutation, and "once" is the canonical sorting of the atom
# indices.
# This means that, using the "match" option in the spec, it is not
# possible to choose specific permutations. For the current cases,
# this should be fine and works well.
# The API above takes all given orientations, but the OFFXML
# has the match setting, which ultimately decides which orientations
# to include.
if self.match == "once":
key = self.transformed_dict_cls.key_transform(orientations[0])
orientations = [key]
# else all matches wanted, so keep whatever was matched.
base_args = {
"name": self.name,
"charge_increments": self.charge_increment,
"epsilon": self.epsilon,
"sigma": self.sigma,
"rmin_half": self.rmin_half,
"orientations": orientations,
"replace": kwargs.pop("replace", False),
}
kwargs.update(base_args)
kwargs.pop(self._extra_nb_var)
return fn(*args, **kwargs)
class VirtualSiteBondChargeType(VirtualSiteType):
"""A SMIRNOFF virtual site bond charge type
.. warning :: This API is experimental and subject to change.
"""
_vsite_type = "BondCharge"
def add_virtual_site(self, molecule, orientations, replace=False):
"""
Add a virtual site to the molecule
Parameters
----------
molecule : openff.toolkit.topology.molecule.Molecule
The molecule to add the virtual site to
orientations : List[Tuple[int]]
A list of orientation tuples which define the permuations used
to contruct the geometry of the virtual site particles
replace : bool, default=False
Replace this virtual site if it already exists in the molecule
Returns
-------
off_idx : int
The index of the first particle added due to this virtual site
.. warning :: This API is experimental and subject to change.
"""
fn = molecule._add_bond_charge_virtual_site
ref_key = self.transformed_dict_cls.key_transform(orientations[0])
atoms = list([molecule.atoms[i] for i in ref_key])
args = (atoms, orientations)
off_idx = super()._add_virtual_site(fn, *args, replace=replace)
return off_idx
class VirtualSiteMonovalentLonePairType(VirtualSiteType):
"""A SMIRNOFF monovalent lone pair virtual site type
.. warning :: This API is experimental and subject to change.
"""
outOfPlaneAngle = ParameterAttribute(unit=unit.degree)
inPlaneAngle = ParameterAttribute(unit=unit.degree)
_vsite_type = "MonovalentLonePair"
def add_virtual_site(self, molecule, orientations, replace=False):
"""
Add a virtual site to the molecule
Parameters
----------
molecule : openff.toolkit.topology.molecule.Molecule
The molecule to add the virtual site to
orientations : List[Tuple[int]]
A list of orientation tuples which define the permuations used
to contruct the geometry of the virtual site particles
replace : bool, default=False
Replace this virtual site if it already exists in the molecule
Returns
-------
off_idx : int
The index of the first particle added due to this virtual site
.. warning :: This API is experimental and subject to change.
"""
fn = molecule._add_monovalent_lone_pair_virtual_site
ref_key = self.transformed_dict_cls.key_transform(orientations[0])
atoms = list([molecule.atoms[i] for i in ref_key])
args = (atoms, orientations, self.outOfPlaneAngle, self.inPlaneAngle)
off_idx = super()._add_virtual_site(fn, *args, replace=replace)
return off_idx
class VirtualSiteDivalentLonePairType(VirtualSiteType):
"""A SMIRNOFF divalent lone pair virtual site type
.. warning :: This API is experimental and subject to change.
"""
outOfPlaneAngle = ParameterAttribute(unit=unit.degree)
_vsite_type = "DivalentLonePair"
def add_virtual_site(self, molecule, orientations, replace=False):
"""
Add a virtual site to the molecule
Parameters
----------
molecule : openff.toolkit.topology.molecule.Molecule
The molecule to add the virtual site to
orientations : List[Tuple[int]]
A list of orientation tuples which define the permuations used
to contruct the geometry of the virtual site particles
replace : bool, default=False
Replace this virtual site if it already exists in the molecule
Returns
-------
off_idx : int
The index of the first particle added due to this virtual site
.. warning :: This API is experimental and subject to change.
"""
fn = molecule._add_divalent_lone_pair_virtual_site
ref_key = self.transformed_dict_cls.key_transform(orientations[0])
atoms = list([molecule.atoms[i] for i in ref_key])
args = (atoms, orientations, self.outOfPlaneAngle)
off_idx = super()._add_virtual_site(fn, *args, replace=replace)
return off_idx
class VirtualSiteTrivalentLonePairType(VirtualSiteType):
"""A SMIRNOFF trivalent lone pair virtual site type
.. warning :: This API is experimental and subject to change.
"""
transformed_dict_cls = ImproperDict
_vsite_type = "TrivalentLonePair"
def __init__(self, **kwargs):
"""
Special init method for TrivalentLonePairSites that ensures that match="all_permutations"
"""
super().__init__(**kwargs)
if self.match != "once":
raise SMIRNOFFSpecError(
f"TrivalentLonePair virtual site defined with match attribute set to {self.match}. "
f"Only supported value is 'once'."
)
def add_virtual_site(self, molecule, orientations, replace=False):
"""
Add a virtual site to the molecule
Parameters
----------
molecule : openff.toolkit.topology.molecule.Molecule
The molecule to add the virtual site to
orientations : List[Tuple[int]]
A list of orientation tuples which define the permuations used
to contruct the geometry of the virtual site particles
replace : bool, default=False
Replace this virtual site if it already exists in the molecule
Returns
-------
off_idx : int
The index of the first particle added due to this virtual site
.. warning :: This API is experimental and subject to change.
"""
fn = molecule._add_trivalent_lone_pair_virtual_site
ref_key = self.transformed_dict_cls.key_transform(orientations[0])
atoms = list([molecule.atoms[i] for i in ref_key])
# Trivalents should never need multiple orientations as long
# as there are no angle parameters
args = (atoms, orientations)
off_idx = super()._add_virtual_site(fn, *args, replace=replace)
return off_idx
_DEPENDENCIES = [
ElectrostaticsHandler,
LibraryChargeHandler,
ChargeIncrementModelHandler,
ToolkitAM1BCCHandler,
]
_TAGNAME = "VirtualSites" # SMIRNOFF tag name to process
# Trying to create an instance of this selector will cause
# some introspection to be done on the type attr passed in, and
# will dispatch the appropriate virtual site type.
_INFOTYPE = _VirtualSiteTypeSelector # class to hold force type info
def _find_matches(
self,
entity,
transformed_dict_cls=ValenceDict,
use_named_slots=False,
expand_permutations=False,
):
"""Implement find_matches() and allow using a difference valence dictionary.
Parameters
----------
entity : openff.toolkit.topology.Topology
Topology to search.
transformed_dict_cls: Union[Dict, ValenceDict, ImproperDict]
The type of dictionary to store the matches in. This
will determine how groups of atom indices are stored
and accessed (e.g for angles indices should be 0-1-2
and not 2-1-0).
Returns
-------
matches : `transformed_dict_cls` of ParameterHandlerMatch
``matches[particle_indices]`` is the ``ParameterType`` object
matching the tuple of particle indices in ``entity``.
"""
from collections import defaultdict
logger.debug("Finding matches for {}".format(self.__class__.__name__))
matches = transformed_dict_cls()
for parameter_type in self._parameters:
matches_for_this_type = defaultdict(list)
ce_matches = entity.chemical_environment_matches(parameter_type.smirks)
# Split the groups into unique sets i.e. 13,14 and 13,15
# Needed for vsites, where a vsite could match C-H with for a CH2 group
# Since these are distinct vsite definitions, we need to split them
# up into separate groups (match_groups)
match_groups_set = [m.topology_atom_indices for m in ce_matches]
match_groups = []
for key in set(match_groups_set):
distinct_atom_pairs = [
x
for x in ce_matches
if sorted(x.topology_atom_indices) == sorted(key)
]
match_groups.append(distinct_atom_pairs)
for ce_matches in match_groups:
for environment_match in ce_matches:
# Update the matches for this parameter type.
handler_match = self._Match(parameter_type, environment_match)
key = environment_match.topology_atom_indices
# only a match if orientation matches
if not hasattr(handler_match._parameter_type, "match"):
# Probably should never get here
raise SMIRNOFFSpecError(
"The match keyword not found in this parameter?!"
)
else:
# The possible orders of this match
# We must check that the tuple of atoms are the same
# as they can be different in e.g. formaldehyde
orders = [m.topology_atom_indices for m in ce_matches]
orientation_flag = handler_match._parameter_type.match
tdc = handler_match._parameter_type.transformed_dict_cls
index_of_key = tdc.index_of(key, possible=orders)
if orientation_flag == "once":
orientation = [0]
elif orientation_flag == "all_permutations":
orientation = [
tdc.index_of(k, possible=orders) for k in orders
]
else:
# Probably will never reach here since validation
# happens elsewhere
raise Exception(
"VirtualSite match keyword not understood. Choose from 'once' or 'all_permutations'. This error should be impossible to reach; please submit an issue at https://github.com/openforcefield/openff-toolkit"
)
orders = [
order for order in orders if sorted(key) == sorted(order)
]
# Find these matches is from the older implementation that allows
# specifying specific orientations. Leaving in for now..
if len(orientation) > len(orders):
error_msg = (
"For parameter of type\n{:s}\norientations {} "
+ "exceeds length of possible orders "
+ "({:d}):\n{:s}"
).format(
str(parameter_type),
orientation,
len(orders),
str(orders),
)
raise IndexError(error_msg)
if not expand_permutations:
key = tdc.key_transform(key)
hit = sum([index_of_key == ornt for ornt in orientation])
assert (
hit < 2
), "VirtualSite orientation for {:s} indices invalid: Has duplicates".format(
parameter_type.__repr__
)
if hit == 1:
matches_for_this_type[key].append(handler_match)
# Resolve match overriding by the use of the name attribute
# If two parameters match but have the same name, use most recent,
# but if the names are different, keep and apply both parameters
if use_named_slots:
for k in matches_for_this_type:
if k not in matches:
matches[k] = {}
for k, v in matches_for_this_type.items():
marginal_matches = []
for new_match in v:
unique = True
new_item = new_match._parameter_type
for idx, (name, existing_match) in enumerate(
matches[k].items()
):
existing_item = existing_match._parameter_type
same_parameter = False
same_type = type(existing_item) == type(new_item)
if same_type:
same_parameter = existing_item == new_item
# same, so replace it to have a FIFO priority
# and the last parameter matching wins
if same_parameter:
matches[k][new_item.name] = new_match
unique = False
if unique:
marginal_matches.append(new_match)
matches[k].update(
{p._parameter_type.name: p for p in marginal_matches}
)
else:
matches.update(matches_for_this_type)
logger.debug(
"{:64} : {:8} matches".format(
parameter_type.smirks, len(matches_for_this_type)
)
)
logger.debug("{} matches identified".format(len(matches)))
if use_named_slots:
for k, v in matches.items():
matches[k] = list(v.values())
return matches
def create_force(self, system, topology, **kwargs):
"""
Parameters
----------
Returns
-------
"""
force = super().create_force(system, topology, **kwargs)
# Separate the logic of adding vsites in the oFF state and the OpenMM
# system. Operating on the topology is not ideal (a hack), so hopefully
# this loop, which adds the oFF vsites to the topology, will live
# somewhere else
logger.debug("Creating OpenFF virtual site representations...")
topology = self._create_openff_virtual_sites(topology)
# The toolkit now has a representation of the vsites in the topology,
# and here we create the OpenMM parameters/objects/exclusions
logger.debug("Creating OpenMM VSite particles...")
for ref_mol in topology.reference_molecules:
logger.debug("Adding vsites for reference mol: {}".format(str(ref_mol)))
self._create_openmm_virtual_sites(system, force, topology, ref_mol)
def check_handler_compatibility(self, other_handler):
"""
Checks whether this ParameterHandler encodes compatible physics as
another ParameterHandler. This is called if a second handler is
attempted to be initialized for the same tag.
Parameters
----------
other_handler : a ParameterHandler object
The handler to compare to.
Raises
------
IncompatibleParameterError if handler_kwargs are incompatible with
existing parameters.
"""
string_attrs_to_compare = [
"exclusion_policy",
]
self._check_attributes_are_equal(
other_handler, identical_attrs=string_attrs_to_compare
)
def find_matches(self, entity, expand_permutations=True):
"""Find the virtual sites in the topology/molecule matched by a
parameter type.
Parameters
----------
entity : openff.toolkit.topology.Topology
Topology to search.
Returns
---------
matches : Dict[Tuple[int], ParameterHandler._Match]
``matches[atom_indices]`` is the ``ParameterType`` object
matching the n-tuple of atom indices in ``entity``.
"""
return self._find_matches(
entity,
transformed_dict_cls=dict,
use_named_slots=True,
expand_permutations=expand_permutations,
)
def _apply_charge_increment(self, force, atom_key, charge_increment):
vsite_charge = charge_increment[0]
vsite_charge *= 0.0
for charge_i, atom in enumerate(atom_key):
o_charge, o_sigma, o_epsilon = force.getParticleParameters(atom)
vsite_charge -= charge_increment[charge_i]
o_charge += charge_increment[charge_i]
force.setParticleParameters(atom, o_charge, o_sigma, o_epsilon)
return vsite_charge
def _same_virtual_site_type(self, vs_i, vs_j):
if type(vs_i) != type(vs_j):
return False
if vs_i.name != vs_j.name:
return False
return True
def _reduce_virtual_particles_to_sites(self, atom_matches):
combined_orientations = []
# These are the indices representing the tuples (VSITE_TYPE, KEY_LIST).
# Basically an ordered dictionary with ints as keys
VSITE_TYPE = 0
KEY_LIST = 1
for key, atom_match_lst in atom_matches.items():
for match in atom_match_lst:
# For each match, loop through existing virtual sites found,
# and determine if this match is a unique virtual site,
# or a member of an existing virtual site (e.g. TIP5)
vs_i = match.parameter_type
found = False
for i, vsite_struct in enumerate(combined_orientations):
vs_j = vsite_struct[VSITE_TYPE]
# The logic to determine if the particles should be
# combined into a single virtual site. If the atoms
# are the same, the vsite is the same, but the absolute
# ordering of the match is different, then we say
# this is part of the same virtual site.
# Note that the expand_permutations=True above is what
# returns the different orders for each match (normally,
# this is not the case for e.g. bonds where 1-2 is the
# same parameter as 2-1 and is always returned as 1-2.
same_atoms = all(
[sorted(key) == sorted(k) for k in vsite_struct[KEY_LIST]]
)
diff_keys = key not in vsite_struct[KEY_LIST]
same_vsite = self._same_virtual_site_type(vs_i, vs_j)
if same_atoms and same_vsite and diff_keys:
combined_orientations[i][KEY_LIST].append(key)
found = True
# Skip out early since there is no reason to keep
# searching since we will never add the same
# particle twice
if found:
break
# If the entire loop did not produce a hit, then this is
# a brand new virtual site
if not found:
newsite = [None, None]
newsite[VSITE_TYPE] = vs_i
newsite[KEY_LIST] = [key]
combined_orientations.append(newsite)
return combined_orientations
def _create_openff_virtual_sites(self, topology):
for molecule in topology.reference_molecules:
"""The following two lines below should be avoided but is left
until a better solution is found (see #699). The issue is that a
topology should not be required since `find_matches` works on
FrozenMolecules. However, the signature is different, as they return
different results.
Also, we are using a topology to retreive the indices for the
matches, but then using those indices as a direct `Atom` object
lookup in the molecule. This is unsafe because there is no reason to
believe that the indices should be consistent. However, there is
currently no `FrozenMolecule.atom(index)` functionality so using the
topology match indices is the only clear way forward. See the
contents of `add_virtual_site` called below for the code that shows
this."""
top_mol = Topology.from_molecules([molecule])
matches = self.find_matches(top_mol, expand_permutations=True)
virtual_sites = self._reduce_virtual_particles_to_sites(matches)
# Now handle the vsites for this molecule
# This call batches the key tuples into a single list, in order
# for the virtual site to represent multiple particles
for vsite_type, orientations in virtual_sites:
vsite_type.add_virtual_site(molecule, orientations, replace=True)
return topology
def _create_openmm_virtual_sites(self, system, force, topology, ref_mol):
"""
Here we must assume that
1. All atoms in the topology are already present
2. The order we iterate these virtual sites is the order they
appear in the OpenMM system
If 1 is not met, then 2 will fail, and it will be quite difficult to
find the mapping since we currently do not keep track of any OpenMM
state, and it is unlikely that we will ever do so. If 1 is met, then 2
should fall into place naturally.
This means that we will not check that our index matches the OpenMM
index, as there is no reason, from a purely technical and/or API
standpoint, to require them to be.
"""
for vsite in ref_mol.virtual_sites:
ref_key = [atom.molecule_atom_index for atom in vsite.atoms]
logger.debug("VSite ref_key: {}".format(ref_key))
ms = topology._reference_molecule_to_topology_molecules[ref_mol]
for top_mol in ms:
logger.debug("top_mol: {}".format(top_mol))
ids = self._create_openmm_virtual_particle(
system, force, top_mol, vsite, ref_key
)
# Go and exclude each of the vsite particles; this makes
# sense because these particles cannot "feel" forces, only
# exert them
policy = self._parameter_to_policy[self.exclusion_policy]
if policy.value != self._ExclusionPolicy.NONE.value:
# Default here is to always exclude vsites which are
# of the same virtual site. Their positions are rigid,
# and so any energy that would be added to the system
# due to their pairwise interaction would not make sense.
for i, j in combinations(ids, 2):
logger.debug("Excluding vsite {} vsite {}".format(i, j))
force.addException(i, j, 0.0, 0.0, 0.0, replace=True)
def _create_openmm_virtual_particle(self, system, force, top_mol, vsite, ref_key):
policy = self._parameter_to_policy[self.exclusion_policy]
ids = []
for vp in vsite.particles:
orientation = vp.orientation
sort_key = [orientation.index(i) for i in ref_key]
atom_key = [ref_key[i] for i in sort_key]
logger.debug("sort_key: {}".format(sort_key))
atom_key = [top_mol.atom_start_topology_index + i for i in atom_key]
omm_vsite = vsite.get_openmm_virtual_site(atom_key)
vsite_q = self._apply_charge_increment(
force, atom_key, vsite.charge_increments
)
ljtype = vsite
if ljtype.sigma is None:
sigma = 2.0 * ljtype.rmin_half / (2.0 ** (1.0 / 6.0))
else:
sigma = ljtype.sigma
# create the vsite particle
mass = 0.0
vsite_idx = system.addParticle(mass)
ids.append(vsite_idx)
logger.debug(
"vsite_id: {} orientation: {} atom_key: {}".format(
vsite_idx, orientation, atom_key
)
)
system.setVirtualSite(vsite_idx, omm_vsite)
force.addParticle(vsite_q, sigma, ljtype.epsilon)
logger.debug(f"Added virtual site particle with charge {vsite_q}")
logger.debug(f" charge_increments: {vsite.charge_increments}")
# add exclusion to the "parent" atom of the vsite
if policy.value >= self._ExclusionPolicy.MINIMAL.value:
keylen = len(atom_key)
if keylen == 2:
owning_atom = atom_key[0]
elif keylen == 3:
owning_atom = atom_key[1]
else:
# The second atom of an improper is considered the
# "owning" atom
owning_atom = atom_key[1]
logger.debug(f"Excluding vsite {vsite_idx} atom {owning_atom}")
force.addException(owning_atom, vsite_idx, 0.0, 0.0, 0.0, replace=True)
# add exclusions to all atoms in the vsite definition (the parents)
if policy.value >= self._ExclusionPolicy.PARENTS.value:
for i in atom_key:
if i == owning_atom:
continue
logger.debug(f"Excluding vsite {vsite_idx} atom {i}")
force.addException(i, vsite_idx, 0.0, 0.0, 0.0, replace=True)
if policy.value > self._ExclusionPolicy.PARENTS.value:
raise NotImplementedError(
"Only the 'parents', 'minimal', and 'none' exclusion_policies are implemented"
)
return ids
if __name__ == "__main__":
import doctest
doctest.testmod()
# doctest.run_docstring_examples(_ParameterAttributeHandler, globals())
|
open-forcefield-group/openforcefield
|
openff/toolkit/typing/engines/smirnoff/parameters.py
|
Python
|
mit
| 238,850
|
[
"Amber",
"OpenFF Toolkit",
"OpenMM"
] |
92e3f2cae1349d976494e58f0d3df6c268e444f45d812f7ecd7f99d70b89e083
|
#!/usr/bin/env python
import logging
import os.path
import pysam
import sys
def get_chromosomes_info(bam_path):
# Check if there is an index file, create one if there isn't
if not os.path.isfile(bam_path + ".bai"):
pysam.index(bam_path)
logging.info('No BAM index file was found, new index was generated : `{}`'.format(bam_path + ".bai"))
# Take chromosome data from BAM index:
# (ref.seq. name, ref.seq. length, number of mapped reads and number of unmapped reads)
chromosomes_info = []
logging.info('Collecting information about sample from .bai file: '
'[ref.seq. name, ref.seq. length, number of mapped and unmapped reads]')
logging.info("\nGenome ID {} \nEstimated mappability {}".format('?', '?'))
try:
for chr in pysam.idxstats(bam_path):
chromosomes_info.append(chr.split("\t")[:-1])
# Last line is unmapped reads, we don't need them
chromosomes_info.pop()
except:
logging.error("\nPROBLEM WITH BAM FILE OR pysam.idxstats() COMMAND\nYour BAM file {} probably is not sorted."
"\n\nTo sort it with samtools use comand: \n'samtools sort {} {}'"
.format(bam_path, bam_path, bam_path[:-3] + 'sorted'))
sys.exit(1)
# print(chromosomes_info)
return chromosomes_info
def count_unique_reads(bam_path, chromosomes_info):
bamfile = pysam.AlignmentFile(bam_path, 'rb')
total_reads_count = 0
total_unique_reads_count = 0
previous_read_strand = 0
plus_reads_count = 0
minus_reads_count = 0
all_read_length = set()
for chromosome in chromosomes_info:
chr_unique_reads_count = 0
chr_total_reads_count = 0
beginning_of_the_previous_read = 0
current_chromosome_name = chromosome[0]
# currentChromosomeSize = int(chromosome[1])
all_reads_in_chromosome = bamfile.fetch(current_chromosome_name)
for read in all_reads_in_chromosome:
read_str = str(read)
# read strand: 0 = + 16 = -
read_strand = ([int(s) for s in read_str.split() if s.isdigit()][0])
beginning_of_the_read = ([int(s) for s in read_str.split() if s.isdigit()][2])
if beginning_of_the_read != beginning_of_the_previous_read or read_strand != previous_read_strand:
beginning_of_the_previous_read = beginning_of_the_read
if read_strand == 0:
minus_reads_count += 1
else:
plus_reads_count += 1
previous_read_strand = read_strand
all_read_length.add(len(read_str.split()[9]))
total_unique_reads_count += 1
chr_unique_reads_count += 1
chr_total_reads_count += 1
logging.info("On {} there are {} unique reads among {}".format(current_chromosome_name,
chr_unique_reads_count,
chr_total_reads_count,))
total_reads_count += chr_total_reads_count
# print("Unique reads counted")
bamfile.close()
if len(all_read_length) == 1:
logging.info("\nAverage read length {} bp".format(all_read_length.pop()))
else:
logging.warn('\nVariable read length {} bp'.format([_ for _ in all_read_length]))
logging.info("Library depth: there are {} unique reads out of {}.\nIn other words {} % of reads are unique".
format(total_unique_reads_count, total_reads_count,
round(float(total_unique_reads_count)/float(total_reads_count)*100, 1)))
logging.info("Strand symmetry: \n {} (+) \n {} (-)".format(plus_reads_count, minus_reads_count))
return total_unique_reads_count
# normalising_coefficient = total_unique_reads_count / 1000000
# it can help to calculate experiments with "control"
# read_coverage has to be multiplied on normalising_coefficient
def count_effective_length(effective_proportion, chromosomes_info):
total_genome_length = sum(int(row[1]) for row in chromosomes_info)
effective_length = effective_proportion * total_genome_length
return effective_length
def count_lambda(unique_reads_count, window_size, effective_length):
lambdaa = float(window_size) * float(unique_reads_count) / float(effective_length)
logging.info("\nAverage density of reads per {} bp window is {}".format(window_size, round(lambdaa, 2)))
return lambdaa
|
BroadPeaksBioinf/BroadPeaks
|
BroadPeaks1/pre_counting.py
|
Python
|
gpl-2.0
| 4,543
|
[
"pysam"
] |
49f4099a1ef8d6e49510ced5a7dce1acfde268a8eeec4639bd157f9bb52e7d25
|
##
# Copyright 2012-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Unit tests for modules.py.
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import os
import re
import tempfile
import shutil
import sys
from distutils.version import StrictVersion
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered, init_config
from unittest import TextTestRunner
import easybuild.tools.modules as mod
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig.easyconfig import EasyConfig
from easybuild.tools import config
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import mkdir, read_file, write_file
from easybuild.tools.modules import Lmod, curr_module_paths, get_software_root, get_software_version
from easybuild.tools.modules import get_software_libdir, invalidate_module_caches_for, modules_tool
from easybuild.tools.run import run_cmd
# number of modules included for testing purposes
TEST_MODULES_COUNT = 76
class ModulesTest(EnhancedTestCase):
"""Test cases for modules."""
def init_testmods(self, test_modules_paths=None):
"""Initialize set of test modules for test."""
if test_modules_paths is None:
test_modules_paths = [os.path.abspath(os.path.join(os.path.dirname(__file__), 'modules'))]
self.reset_modulepath(test_modules_paths)
# for Lmod, this test has to run first, to avoid that it fails;
# no modules are found if another test ran before it, but using a (very) long module path works fine interactively
def test_long_module_path(self):
"""Test dealing with a (very) long module path."""
# create a really long modules install path
tmpdir = tempfile.mkdtemp()
long_mod_path = tmpdir
subdir = 'foo'
# Lmod v5.1.5 doesn't support module paths longer than 256 characters, so stay just under that magic limit
while (len(os.path.abspath(long_mod_path)) + len(subdir)) < 240:
long_mod_path = os.path.join(long_mod_path, subdir)
# copy one of the test modules there
gcc_mod_dir = os.path.join(long_mod_path, 'GCC')
os.makedirs(gcc_mod_dir)
gcc_mod_path = os.path.join(os.path.dirname(__file__), 'modules', 'GCC', '4.6.3')
shutil.copy2(gcc_mod_path, gcc_mod_dir)
# try and use long modules path
self.init_testmods(test_modules_paths=[long_mod_path])
ms = self.modtool.available()
self.assertEqual(ms, ['GCC/4.6.3'])
shutil.rmtree(tmpdir)
def test_avail(self):
"""Test if getting a (restricted) list of available modules works."""
self.init_testmods()
# test modules include 3 GCC modules
ms = self.modtool.available('GCC')
self.assertEqual(ms, ['GCC/4.6.3', 'GCC/4.6.4', 'GCC/4.7.2'])
# test modules include one GCC/4.6.3 module
ms = self.modtool.available(mod_name='GCC/4.6.3')
self.assertEqual(ms, ['GCC/4.6.3'])
# all test modules are accounted for
ms = self.modtool.available()
if isinstance(self.modtool, Lmod) and StrictVersion(self.modtool.version) >= StrictVersion('5.7.5'):
# with recent versions of Lmod, also the hidden modules are included in the output of 'avail'
self.assertEqual(len(ms), TEST_MODULES_COUNT + 3)
self.assertTrue('bzip2/.1.0.6' in ms)
self.assertTrue('toy/.0.0-deps' in ms)
self.assertTrue('OpenMPI/.1.6.4-GCC-4.6.4' in ms)
else:
self.assertEqual(len(ms), TEST_MODULES_COUNT)
def test_exists(self):
"""Test if testing for module existence works."""
self.init_testmods()
self.assertEqual(self.modtool.exist(['OpenMPI/1.6.4-GCC-4.6.4']), [True])
self.assertEqual(self.modtool.exist(['OpenMPI/1.6.4-GCC-4.6.4'], skip_avail=True), [True])
self.assertEqual(self.modtool.exist(['foo/1.2.3']), [False])
self.assertEqual(self.modtool.exist(['foo/1.2.3'], skip_avail=True), [False])
# exists works on hidden modules
self.assertEqual(self.modtool.exist(['toy/.0.0-deps']), [True])
self.assertEqual(self.modtool.exist(['toy/.0.0-deps'], skip_avail=True), [True])
# also partial module names work
self.assertEqual(self.modtool.exist(['OpenMPI']), [True])
self.assertEqual(self.modtool.exist(['OpenMPI'], skip_avail=True), [True])
# but this doesn't...
self.assertEqual(self.modtool.exist(['OpenMPI/1.6.4']), [False])
self.assertEqual(self.modtool.exist(['OpenMPI/1.6.4'], skip_avail=True), [False])
# exists works on hidden modules in Lua syntax (only with Lmod)
if isinstance(self.modtool, Lmod):
test_modules_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'modules'))
# make sure only the .lua module file is there, otherwise this test doesn't work as intended
self.assertTrue(os.path.exists(os.path.join(test_modules_path, 'bzip2', '.1.0.6.lua')))
self.assertFalse(os.path.exists(os.path.join(test_modules_path, 'bzip2', '.1.0.6')))
self.assertEqual(self.modtool.exist(['bzip2/.1.0.6']), [True])
# exists also works on lists of module names
# list should be sufficiently long, since for short lists 'show' is always used
mod_names = ['OpenMPI/1.6.4-GCC-4.6.4', 'foo/1.2.3', 'GCC',
'ScaLAPACK/1.8.0-gompi-1.1.0-no-OFED',
'ScaLAPACK/1.8.0-gompi-1.1.0-no-OFED-ATLAS-3.8.4-LAPACK-3.4.0-BLACS-1.1',
'Compiler/GCC/4.7.2/OpenMPI/1.6.4', 'toy/.0.0-deps']
self.assertEqual(self.modtool.exist(mod_names), [True, False, True, False, True, True, True])
self.assertEqual(self.modtool.exist(mod_names, skip_avail=True), [True, False, True, False, True, True, True])
def test_load(self):
""" test if we load one module it is in the loaded_modules """
self.init_testmods()
ms = self.modtool.available()
# exclude modules not on the top level of a hierarchy
ms = [m for m in ms if not (m.startswith('Core') or m.startswith('Compiler/') or m.startswith('MPI/') or
m.startswith('CategorizedHMNS'))]
for m in ms:
self.modtool.load([m])
self.assertTrue(m in self.modtool.loaded_modules())
self.modtool.purge()
# trying to load a module not on the top level of a hierarchy should fail
mods = [
# module use on non-existent dir (Tcl-based env mods), or missing dep (Lmod)
'Compiler/GCC/4.7.2/OpenMPI/1.6.4',
# missing dep
'MPI/GCC/4.7.2/OpenMPI/1.6.4/ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2',
]
for mod in mods:
self.assertErrorRegex(EasyBuildError, '.*', self.modtool.load, [mod])
def test_prepend_module_path(self):
"""Test prepend_module_path method."""
test_path = tempfile.mkdtemp(prefix=self.test_prefix)
self.modtool.prepend_module_path(test_path)
self.assertTrue(os.path.samefile(curr_module_paths()[0], test_path))
# prepending same path again is fine, no changes to $MODULEPATH
modulepath = curr_module_paths()
self.modtool.prepend_module_path(test_path)
self.assertEqual(modulepath, curr_module_paths())
# prepending path that is 'deeper down' in $MODULEPATH works, brings it back to front
test_mods_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules')
self.assertTrue(any(os.path.samefile(test_mods_dir, p) for p in modulepath))
self.modtool.prepend_module_path(test_mods_dir)
self.assertTrue(os.path.samefile(curr_module_paths()[0], test_mods_dir))
# prepending path that is a symlink to the current head of $MODULEPATH is a no-op
modulepath = curr_module_paths()
symlink_path = os.path.join(self.test_prefix, 'symlink_modules')
os.symlink(modulepath[0], symlink_path)
self.modtool.prepend_module_path(symlink_path)
self.assertEqual(modulepath, curr_module_paths())
def test_ld_library_path(self):
"""Make sure LD_LIBRARY_PATH is what it should be when loaded multiple modules."""
self.init_testmods()
testpath = '/this/is/just/a/test'
os.environ['LD_LIBRARY_PATH'] = testpath
# load module and check that previous LD_LIBRARY_PATH is still there, at the end
self.modtool.load(['GCC/4.6.3'])
self.assertTrue(re.search("%s$" % testpath, os.environ['LD_LIBRARY_PATH']))
self.modtool.purge()
# check that previous LD_LIBRARY_PATH is still there, at the end
self.assertTrue(re.search("%s$" % testpath, os.environ['LD_LIBRARY_PATH']))
self.modtool.purge()
def test_purge(self):
"""Test if purging of modules works."""
self.init_testmods()
ms = self.modtool.available()
self.modtool.load([ms[0]])
self.assertTrue(len(self.modtool.loaded_modules()) > 0)
self.modtool.purge()
self.assertTrue(len(self.modtool.loaded_modules()) == 0)
self.modtool.purge()
self.assertTrue(len(self.modtool.loaded_modules()) == 0)
def test_get_software_root_version_libdir(self):
"""Test get_software_X functions."""
tmpdir = tempfile.mkdtemp()
test_cases = [
('GCC', 'GCC'),
('grib_api', 'GRIB_API'),
('netCDF-C++', 'NETCDFMINCPLUSPLUS'),
('Score-P', 'SCOREMINP'),
]
for (name, env_var_name) in test_cases:
# mock stuff that get_software_X functions rely on
root = os.path.join(tmpdir, name)
os.makedirs(os.path.join(root, 'lib'))
os.environ['EBROOT%s' % env_var_name] = root
version = '0.0-%s' % root
os.environ['EBVERSION%s' % env_var_name] = version
self.assertEqual(get_software_root(name), root)
self.assertEqual(get_software_version(name), version)
self.assertEqual(get_software_libdir(name), 'lib')
os.environ.pop('EBROOT%s' % env_var_name)
os.environ.pop('EBVERSION%s' % env_var_name)
# check expected result of get_software_libdir with multiple lib subdirs
root = os.path.join(tmpdir, name)
os.makedirs(os.path.join(root, 'lib64'))
os.environ['EBROOT%s' % env_var_name] = root
self.assertErrorRegex(EasyBuildError, "Multiple library subdirectories found.*", get_software_libdir, name)
self.assertEqual(get_software_libdir(name, only_one=False), ['lib', 'lib64'])
# only directories containing files in specified list should be retained
open(os.path.join(root, 'lib64', 'foo'), 'w').write('foo')
self.assertEqual(get_software_libdir(name, fs=['foo']), 'lib64')
# clean up for previous tests
os.environ.pop('EBROOT%s' % env_var_name)
# if root/version for specified software package can not be found, these functions should return None
self.assertEqual(get_software_root('foo'), None)
self.assertEqual(get_software_version('foo'), None)
self.assertEqual(get_software_libdir('foo'), None)
# if no library subdir is found, get_software_libdir should return None
os.environ['EBROOTFOO'] = tmpdir
self.assertEqual(get_software_libdir('foo'), None)
os.environ.pop('EBROOTFOO')
shutil.rmtree(tmpdir)
def test_wrong_modulepath(self):
"""Test whether modules tool can deal with a broken $MODULEPATH."""
test_modules_path = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules'))
modules_test_installpath = os.path.join(self.test_installpath, 'modules', 'all')
os.environ['MODULEPATH'] = '/some/non-existing/path:/this/doesnt/exists/anywhere:%s' % test_modules_path
init_config()
# purposely *not* using self.modtool here;
# need to check whether creating new ModulesTool instance doesn't break when $MODULEPATH contains faulty paths
modtool = modules_tool()
self.assertEqual(len(modtool.mod_paths), 2)
self.assertTrue(os.path.samefile(modtool.mod_paths[0], modules_test_installpath))
self.assertEqual(modtool.mod_paths[1], test_modules_path)
self.assertTrue(len(modtool.available()) > 0)
def test_path_to_top_of_module_tree(self):
"""Test function to determine path to top of the module tree."""
path = self.modtool.path_to_top_of_module_tree([], 'gompi/1.3.12', '', ['GCC/4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4'])
self.assertEqual(path, [])
path = self.modtool.path_to_top_of_module_tree([], 'toy/.0.0-deps', '', ['gompi/1.3.12'])
self.assertEqual(path, [])
path = self.modtool.path_to_top_of_module_tree([], 'toy/0.0', '', [])
self.assertEqual(path, [])
def test_path_to_top_of_module_tree_hierarchical_mns(self):
"""Test function to determine path to top of the module tree for a hierarchical module naming scheme."""
ecs_dir = os.path.join(os.path.dirname(__file__), 'easyconfigs')
all_stops = [x[0] for x in EasyBlock.get_steps()]
build_options = {
'check_osdeps': False,
'robot_path': [ecs_dir],
'valid_stops': all_stops,
'validate': False,
}
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'HierarchicalMNS'
init_config(build_options=build_options)
self.setup_hierarchical_modules()
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
init_modpaths = [os.path.join(mod_prefix, 'Core')]
deps = ['GCC/4.7.2', 'OpenMPI/1.6.4', 'FFTW/3.3.3', 'OpenBLAS/0.2.6-LAPACK-3.4.2',
'ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2']
core = os.path.join(mod_prefix, 'Core')
path = self.modtool.path_to_top_of_module_tree(init_modpaths, 'goolf/1.4.10', core, deps)
self.assertEqual(path, [])
path = self.modtool.path_to_top_of_module_tree(init_modpaths, 'GCC/4.7.2', core, [])
self.assertEqual(path, [])
full_mod_subdir = os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2')
deps = ['GCC/4.7.2', 'hwloc/1.6.2']
path = self.modtool.path_to_top_of_module_tree(init_modpaths, 'OpenMPI/1.6.4', full_mod_subdir, deps)
self.assertEqual(path, ['GCC/4.7.2'])
full_mod_subdir = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')
deps = ['GCC/4.7.2', 'OpenMPI/1.6.4']
path = self.modtool.path_to_top_of_module_tree(init_modpaths, 'FFTW/3.3.3', full_mod_subdir, deps)
self.assertEqual(path, ['OpenMPI/1.6.4', 'GCC/4.7.2'])
def test_path_to_top_of_module_tree_lua(self):
"""Test path_to_top_of_module_tree function on modules in Lua syntax."""
if isinstance(self.modtool, Lmod):
orig_modulepath = os.environ.get('MODULEPATH')
self.modtool.unuse(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules'))
curr_modulepath = os.environ.get('MODULEPATH')
error_msg = "Incorrect $MODULEPATH value after unuse: %s (orig: %s)" % (curr_modulepath, orig_modulepath)
self.assertEqual(curr_modulepath, None, error_msg)
top_moddir = os.path.join(self.test_prefix, 'test_modules')
core_dir = os.path.join(top_moddir, 'Core')
mkdir(core_dir, parents=True)
self.modtool.use(core_dir)
self.assertTrue(os.path.samefile(os.environ.get('MODULEPATH'), core_dir))
# install toy modules in Lua syntax that are sufficient to test path_to_top_of_module_tree with
intel_mod_dir = os.path.join(top_moddir, 'Compiler', 'intel', '2016')
intel_mod = 'prepend_path("MODULEPATH", "%s")\n' % intel_mod_dir
write_file(os.path.join(core_dir, 'intel', '2016.lua'), intel_mod)
impi_mod_dir = os.path.join(top_moddir, 'MPI', 'intel', '2016', 'impi', '2016')
impi_mod = 'prepend_path("MODULEPATH", "%s")\n' % impi_mod_dir
write_file(os.path.join(intel_mod_dir, 'impi', '2016.lua'), impi_mod)
imkl_mod = 'io.stderr:write("Hi from the imkl module")\n'
write_file(os.path.join(impi_mod_dir, 'imkl', '2016.lua'), imkl_mod)
self.assertEqual(self.modtool.available(), ['intel/2016'])
imkl_deps = ['intel/2016', 'impi/2016']
# modules that compose toolchain are expected to be loaded
self.modtool.load(imkl_deps)
res = self.modtool.path_to_top_of_module_tree(core_dir, 'imkl/2016', impi_mod_dir, imkl_deps)
self.assertEqual(res, ['impi/2016', 'intel/2016'])
else:
print "Skipping test_path_to_top_of_module_tree_lua, required Lmod as modules tool"
def test_modpath_extensions_for(self):
"""Test modpath_extensions_for method."""
self.setup_hierarchical_modules()
mod_dir = os.path.join(self.test_installpath, 'modules', 'all')
expected = {
'GCC/4.7.2': [os.path.join(mod_dir, 'Compiler', 'GCC', '4.7.2')],
'OpenMPI/1.6.4': [os.path.join(mod_dir, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')],
'FFTW/3.3.3': [],
}
res = self.modtool.modpath_extensions_for(['GCC/4.7.2', 'OpenMPI/1.6.4', 'FFTW/3.3.3'])
self.assertEqual(res, expected)
expected = {
'icc/2013.5.192-GCC-4.8.3': [os.path.join(mod_dir, 'Compiler', 'intel', '2013.5.192-GCC-4.8.3')],
'ifort/2013.5.192-GCC-4.8.3': [os.path.join(mod_dir, 'Compiler', 'intel', '2013.5.192-GCC-4.8.3')],
}
res = self.modtool.modpath_extensions_for(['icc/2013.5.192-GCC-4.8.3', 'ifort/2013.5.192-GCC-4.8.3'])
self.assertEqual(res, expected)
# error for non-existing modules
error_pattern = "Can't get value from a non-existing module"
self.assertErrorRegex(EasyBuildError, error_pattern, self.modtool.modpath_extensions_for, ['nosuchmodule/1.2'])
# test result in case conditional loads are used
test_mod = 'test-modpaths/1.2.3.4'
test_modfile = os.path.join(mod_dir, test_mod)
test_modtxt = '\n'.join([
'#%Module',
" module use %s/Compiler/intel/2013.5.192-GCC-4.8.3" % mod_dir, # indented without guard
# quoted path
'module use "%s/Compiler/GCC/4.7.2"' % mod_dir,
# using prepend-path & quoted
' prepend-path MODULEPATH "%s/MPI/GCC/4.7.2/OpenMPI/1.6.4"' % mod_dir,
# conditional 'use' on subdirectory in $HOME, e.g. when --subdir-user-modules is used
"if { [ file isdirectory %s/modules/Compiler/GCC/4.7.2 ] } {" % os.environ['HOME'],
" module use %s/modules/Compiler/GCC/4.7.2" % os.environ['HOME'],
"}",
])
write_file(test_modfile, test_modtxt)
expected = {
test_mod: [
os.path.join(mod_dir, 'Compiler', 'intel', '2013.5.192-GCC-4.8.3'),
os.path.join(mod_dir, 'Compiler', 'GCC', '4.7.2'),
os.path.join(mod_dir, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4'),
os.path.join(os.environ['HOME'], 'modules', 'Compiler', 'GCC', '4.7.2'),
]
}
self.assertEqual(self.modtool.modpath_extensions_for([test_mod]), expected)
# also test with module file in Lua syntax if Lmod is used as modules tool
if isinstance(self.modtool, Lmod):
test_mod = 'test-modpaths-lua/1.2.3.4'
test_modfile = os.path.join(mod_dir, test_mod + '.lua')
test_modtxt = '\n'.join([
# indented without guard
' prepend_path("MODULEPATH", "%s/Compiler/intel/2013.5.192-GCC-4.8.3")' % mod_dir,
'prepend_path("MODULEPATH","%s/Compiler/GCC/4.7.2")' % mod_dir,
'prepend_path("MODULEPATH", "%s/MPI/GCC/4.7.2/OpenMPI/1.6.4")' % mod_dir,
# conditional 'use' on subdirectory in $HOME, e.g. when --subdir-user-modules is used
'if isDir("%s/modules/Compiler/GCC/4.7.2") then' % os.environ['HOME'],
' prepend_path("MODULEPATH", "%s/modules/Compiler/GCC/4.7.2")' % os.environ['HOME'],
'end',
])
write_file(test_modfile, test_modtxt)
expected = {test_mod: expected['test-modpaths/1.2.3.4']}
self.assertEqual(self.modtool.modpath_extensions_for([test_mod]), expected)
def test_path_to_top_of_module_tree_categorized_hmns(self):
"""
Test function to determine path to top of the module tree for a categorized hierarchical module naming
scheme.
"""
ecs_dir = os.path.join(os.path.dirname(__file__), 'easyconfigs')
all_stops = [x[0] for x in EasyBlock.get_steps()]
build_options = {
'check_osdeps': False,
'robot_path': [ecs_dir],
'valid_stops': all_stops,
'validate': False,
}
os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'CategorizedHMNS'
init_config(build_options=build_options)
self.setup_categorized_hmns_modules()
mod_prefix = os.path.join(self.test_installpath, 'modules', 'all')
init_modpaths = [os.path.join(mod_prefix, 'Core', 'compiler'), os.path.join(mod_prefix, 'Core', 'toolchain')]
deps = ['GCC/4.7.2', 'OpenMPI/1.6.4', 'FFTW/3.3.3', 'OpenBLAS/0.2.6-LAPACK-3.4.2',
'ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2']
core = os.path.join(mod_prefix, 'Core')
path = self.modtool.path_to_top_of_module_tree(init_modpaths, 'goolf/1.4.10', os.path.join(core, 'toolchain'), deps)
self.assertEqual(path, [])
path = self.modtool.path_to_top_of_module_tree(init_modpaths, 'GCC/4.7.2', os.path.join(core, 'compiler'), [])
self.assertEqual(path, [])
full_mod_subdir = os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2', 'mpi')
deps = ['GCC/4.7.2', 'hwloc/1.6.2']
path = self.modtool.path_to_top_of_module_tree(init_modpaths, 'OpenMPI/1.6.4', full_mod_subdir, deps)
self.assertEqual(path, ['GCC/4.7.2'])
full_mod_subdir = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4', 'numlib')
deps = ['GCC/4.7.2', 'OpenMPI/1.6.4']
path = self.modtool.path_to_top_of_module_tree(init_modpaths, 'FFTW/3.3.3', full_mod_subdir, deps)
self.assertEqual(path, ['OpenMPI/1.6.4', 'GCC/4.7.2'])
def test_modules_tool_stateless(self):
"""Check whether ModulesTool instance is stateless between runs."""
test_modules_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules')
# copy test Core/Compiler modules, we need to rewrite the 'module use' statement in the one we're going to load
shutil.copytree(os.path.join(test_modules_path, 'Core'), os.path.join(self.test_prefix, 'Core'))
shutil.copytree(os.path.join(test_modules_path, 'Compiler'), os.path.join(self.test_prefix, 'Compiler'))
modtxt = read_file(os.path.join(self.test_prefix, 'Core', 'GCC', '4.7.2'))
modpath_extension = os.path.join(self.test_prefix, 'Compiler', 'GCC', '4.7.2')
modtxt = re.sub('module use .*', 'module use %s' % modpath_extension, modtxt, re.M)
write_file(os.path.join(self.test_prefix, 'Core', 'GCC', '4.7.2'), modtxt)
modtxt = read_file(os.path.join(self.test_prefix, 'Compiler', 'GCC', '4.7.2', 'OpenMPI', '1.6.4'))
modpath_extension = os.path.join(self.test_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')
mkdir(modpath_extension, parents=True)
modtxt = re.sub('module use .*', 'module use %s' % modpath_extension, modtxt, re.M)
write_file(os.path.join(self.test_prefix, 'Compiler', 'GCC', '4.7.2', 'OpenMPI', '1.6.4'), modtxt)
# force reset of any singletons by reinitiating config
init_config()
# make sure $LMOD_DEFAULT_MODULEPATH, since Lmod picks it up and tweaks $MODULEPATH to match it
if 'LMOD_DEFAULT_MODULEPATH' in os.environ:
del os.environ['LMOD_DEFAULT_MODULEPATH']
self.reset_modulepath([os.path.join(self.test_prefix, 'Core')])
if isinstance(self.modtool, Lmod):
# GCC/4.6.3 is nowhere to be found (in $MODULEPATH)
load_err_msg = r"The[\s\n]*following[\s\n]*module\(s\)[\s\n]*are[\s\n]*unknown"
else:
load_err_msg = "Unable to locate a modulefile"
# GCC/4.6.3 is *not* an available Core module
self.assertErrorRegex(EasyBuildError, load_err_msg, self.modtool.load, ['GCC/4.6.3'])
# GCC/4.7.2 is one of the available Core modules
self.modtool.load(['GCC/4.7.2'])
# OpenMPI/1.6.4 becomes available after loading GCC/4.7.2 module
self.modtool.load(['OpenMPI/1.6.4'])
self.modtool.purge()
if 'LMOD_DEFAULT_MODULEPATH' in os.environ:
del os.environ['LMOD_DEFAULT_MODULEPATH']
# reset $MODULEPATH, obtain new ModulesTool instance,
# which should not remember anything w.r.t. previous $MODULEPATH value
os.environ['MODULEPATH'] = test_modules_path
self.modtool = modules_tool()
# GCC/4.6.3 is available
self.modtool.load(['GCC/4.6.3'])
self.modtool.purge()
# GCC/4.7.2 is available (note: also as non-Core module outside of hierarchy)
self.modtool.load(['GCC/4.7.2'])
# OpenMPI/1.6.4 is *not* available with current $MODULEPATH (loaded GCC/4.7.2 was not a hierarchical module)
if isinstance(self.modtool, Lmod):
# OpenMPI/1.6.4 exists, but is not available for load;
# exact error message depends on Lmod version
load_err_msg = '|'.join([
r'These[\s\sn]*module\(s\)[\s\sn]*exist[\s\sn]*but[\s\sn]*cannot[\s\sn]*be',
'The[\s\sn]*following[\s\sn]*module\(s\)[\s\sn]*are[\s\sn]*unknown',
])
else:
load_err_msg = "Unable to locate a modulefile"
self.assertErrorRegex(EasyBuildError, load_err_msg, self.modtool.load, ['OpenMPI/1.6.4'])
def test_mk_module_cache_key(self):
"""Test mk_module_cache_key method."""
os.environ['MODULEPATH'] = '%s:/tmp/test' % self.test_prefix
res = self.modtool.mk_module_cache_key('thisisapartialkey')
self.assertTrue(isinstance(res, tuple))
self.assertEqual(res, ('MODULEPATH=%s:/tmp/test' % self.test_prefix, self.modtool.COMMAND, 'thisisapartialkey'))
del os.environ['MODULEPATH']
res = self.modtool.mk_module_cache_key('thisisapartialkey')
self.assertEqual(res, ('MODULEPATH=', self.modtool.COMMAND, 'thisisapartialkey'))
def test_module_caches(self):
"""Test module caches and invalidate_module_caches_for function."""
self.assertEqual(mod.MODULE_AVAIL_CACHE, {})
# purposely extending $MODULEPATH with non-existing path, should be handled fine
nonpath = os.path.join(self.test_prefix, 'nosuchfileordirectory')
self.modtool.use(nonpath)
modulepaths = [p for p in os.environ.get('MODULEPATH', '').split(os.pathsep) if p]
self.assertTrue(any([os.path.samefile(nonpath, mp) for mp in modulepaths]))
shutil.rmtree(nonpath)
# create symlink to entry in $MODULEPATH we're going to use, and add it to $MODULEPATH
# invalidate_module_caches_for should be able to deal with this
test_mods_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules')
mods_symlink = os.path.join(self.test_prefix, 'modules_symlink')
os.symlink(test_mods_path, mods_symlink)
self.modtool.use(mods_symlink)
# no caching for 'avail' commands with an argument
self.assertTrue(self.modtool.available('GCC'))
self.assertEqual(mod.MODULE_AVAIL_CACHE, {})
# run 'avail' without argument, result should get cached
res = self.modtool.available()
# just a single cache entry
self.assertEqual(len(mod.MODULE_AVAIL_CACHE), 1)
# fetch cache entry
avail_cache_key = mod.MODULE_AVAIL_CACHE.keys()[0]
cached_res = mod.MODULE_AVAIL_CACHE[avail_cache_key]
self.assertTrue(cached_res == res)
# running avail again results in getting cached result, exactly the same result as before
# depending on the modules tool being used, it may not be the same list instance, because of post-processing
self.assertTrue(self.modtool.available() == res)
# run 'show', should be all cached
show_res_gcc = self.modtool.show('GCC/4.7.2')
show_res_fftw = self.modtool.show('FFTW')
self.assertEqual(len(mod.MODULE_SHOW_CACHE), 2)
self.assertTrue(show_res_gcc in mod.MODULE_SHOW_CACHE.values())
self.assertTrue(show_res_fftw in mod.MODULE_SHOW_CACHE.values())
self.assertTrue(self.modtool.show('GCC/4.7.2') is show_res_gcc)
self.assertTrue(self.modtool.show('FFTW') is show_res_fftw)
# invalidate caches with correct path
modulepaths = [p for p in os.environ.get('MODULEPATH', '').split(os.pathsep) if p]
self.assertTrue(any([os.path.exists(mp) and os.path.samefile(test_mods_path, mp) for mp in modulepaths]))
paths_in_key = [p for p in avail_cache_key[0].split('=')[1].split(os.pathsep) if p]
self.assertTrue(any([os.path.exists(p) and os.path.samefile(test_mods_path, p) for p in paths_in_key]))
# verify cache invalidation, caches should be empty again
invalidate_module_caches_for(test_mods_path)
self.assertEqual(mod.MODULE_AVAIL_CACHE, {})
self.assertEqual(mod.MODULE_SHOW_CACHE, {})
def test_module_use_bash(self):
"""Test whether effect of 'module use' is preserved when a new bash session is started."""
# this test is here as check for a nasty bug in how the modules tool is deployed
# cfr. https://github.com/hpcugent/easybuild-framework/issues/1756,
# https://bugzilla.redhat.com/show_bug.cgi?id=1326075
modules_dir = os.path.abspath(os.path.join(self.test_prefix, 'modules'))
self.assertFalse(modules_dir in os.environ['MODULEPATH'])
mkdir(modules_dir, parents=True)
self.modtool.use(modules_dir)
modulepath = os.environ['MODULEPATH']
self.assertTrue(modules_dir in modulepath)
out, _ = run_cmd("bash -c 'echo MODULEPATH: $MODULEPATH'", simple=False)
self.assertEqual(out.strip(), "MODULEPATH: %s" % modulepath)
self.assertTrue(modules_dir in out)
def test_load_in_hierarchy(self):
"""Test whether loading a module in a module hierarchy results in loading the correct module."""
self.setup_hierarchical_modules()
mod_dir = os.path.join(self.test_installpath, 'modules', 'all')
core_mod_dir = os.path.join(mod_dir, 'Core')
# create an extra (dummy) hwloc module in Core
hwloc_mod = os.path.join(core_mod_dir, 'hwloc', '1.6.2')
write_file(hwloc_mod, "#%Module\nsetenv EBROOTHWLOC /path/to/dummy/hwloc")
# set up $MODULEPATH to point to top of hierarchy
self.modtool.use(core_mod_dir)
self.assertEqual(os.environ.get('EBROOTHWLOC'), None)
# check whether dummy hwloc is loaded
self.modtool.load(['hwloc/1.6.2'])
self.assertEqual(os.environ['EBROOTHWLOC'], '/path/to/dummy/hwloc')
# make sure that compiler-dependent hwloc test module exists
gcc_mod_dir = os.path.join(mod_dir, 'Compiler', 'GCC', '4.7.2')
self.assertTrue(os.path.exists(os.path.join(gcc_mod_dir, 'hwloc', '1.6.2')))
# test loading of compiler-dependent hwloc test module
self.modtool.purge()
self.modtool.use(gcc_mod_dir)
self.modtool.load(['hwloc/1.6.2'])
self.assertEqual(os.environ['EBROOTHWLOC'], '/tmp/software/Compiler/GCC/4.7.2/hwloc/1.6.2')
# ensure that correct module is loaded when hierarchy is defined by loading the GCC module
# (side-effect is that ModulesTool instance doesn't track the change being made to $MODULEPATH)
# verifies bug fixed in https://github.com/hpcugent/easybuild-framework/pull/1795
self.modtool.purge()
self.modtool.unuse(gcc_mod_dir)
self.modtool.load(['GCC/4.7.2'])
self.assertEqual(os.environ['EBROOTGCC'], '/tmp/software/Core/GCC/4.7.2')
self.modtool.load(['hwloc/1.6.2'])
self.assertEqual(os.environ['EBROOTHWLOC'], '/tmp/software/Compiler/GCC/4.7.2/hwloc/1.6.2')
def suite():
""" returns all the testcases in this module """
return TestLoaderFiltered().loadTestsFromTestCase(ModulesTest, sys.argv[1:])
if __name__ == '__main__':
TextTestRunner(verbosity=1).run(suite())
|
wpoely86/easybuild-framework
|
test/framework/modules.py
|
Python
|
gpl-2.0
| 34,083
|
[
"NetCDF"
] |
965adf62c82293c3b2b5ce98c2fcefb00c65d3788270d1095b488f01026fb0a1
|
"""
Convenience routines for performing common operations.
@since: 0.28
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import os, sys, logging
from zeroinstall import support
from zeroinstall.support import tasks
def get_selections_gui(iface_uri, gui_args, test_callback = None):
"""Run the GUI to choose and download a set of implementations.
The user may ask the GUI to submit a bug report about the program. In that case,
the GUI may ask us to test it. test_callback is called in that case with the implementations
to be tested; the callback will typically call L{zeroinstall.injector.run.test_selections} and return the result of that.
@param iface_uri: the required program, or None to show just the preferences dialog
@type iface_uri: str
@param gui_args: any additional arguments for the GUI itself
@type gui_args: [str]
@param test_callback: function to use to try running the program
@type test_callback: L{zeroinstall.injector.selections.Selections} -> str
@return: the selected implementations
@rtype: L{zeroinstall.injector.selections.Selections}
@since: 0.28
"""
from zeroinstall.injector import selections, qdom
from StringIO import StringIO
from os.path import join, dirname
gui_exe = join(dirname(__file__), '0launch-gui', '0launch-gui')
import socket
cli, gui = socket.socketpair()
try:
child = os.fork()
if child == 0:
# We are the child (GUI)
try:
try:
cli.close()
# We used to use pipes to support Python2.3...
os.dup2(gui.fileno(), 1)
os.dup2(gui.fileno(), 0)
if iface_uri is not None:
gui_args = gui_args + ['--', iface_uri]
os.execvp(sys.executable, [sys.executable, gui_exe] + gui_args)
except:
import traceback
traceback.print_exc(file = sys.stderr)
finally:
sys.stderr.flush()
os._exit(1)
# We are the parent (CLI)
gui.close()
gui = None
while True:
logging.info("Waiting for selections from GUI...")
reply = support.read_bytes(cli.fileno(), len('Length:') + 9, null_ok = True)
if reply:
if not reply.startswith('Length:'):
raise Exception("Expected Length:, but got %s" % repr(reply))
xml = support.read_bytes(cli.fileno(), int(reply.split(':', 1)[1], 16))
dom = qdom.parse(StringIO(xml))
sels = selections.Selections(dom)
if dom.getAttribute('run-test'):
logging.info("Testing program, as requested by GUI...")
if test_callback is None:
output = "Can't test: no test_callback was passed to get_selections_gui()\n"
else:
output = test_callback(sels)
logging.info("Sending results to GUI...")
output = ('Length:%8x\n' % len(output)) + output
logging.debug("Sending: %s", repr(output))
while output:
sent = cli.send(output)
output = output[sent:]
continue
else:
sels = None
pid, status = os.waitpid(child, 0)
assert pid == child
if status == 1 << 8:
logging.info("User cancelled the GUI; aborting")
return None # Aborted
if status != 0:
raise Exception("Error from GUI: code = %d" % status)
break
finally:
for sock in [cli, gui]:
if sock is not None: sock.close()
return sels
def ensure_cached(uri, command = 'run', config = None):
"""Ensure that an implementation of uri is cached.
If not, it downloads one. It uses the GUI if a display is
available, or the console otherwise.
@param uri: the required interface
@type uri: str
@return: the selected implementations, or None if the user cancelled
@rtype: L{zeroinstall.injector.selections.Selections}
"""
from zeroinstall.injector.driver import Driver
if config is None:
from zeroinstall.injector.config import load_config
config = load_config()
from zeroinstall.injector.requirements import Requirements
requirements = Requirements(uri)
requirements.command = command
d = Driver(config, requirements)
if d.need_download() or not d.solver.ready:
if os.environ.get('DISPLAY', None):
return get_selections_gui(uri, ['--command', command])
else:
done = d.solve_and_download_impls()
tasks.wait_for_blocker(done)
return d.solver.selections
|
dabrahams/zeroinstall
|
zeroinstall/helpers.py
|
Python
|
lgpl-2.1
| 4,165
|
[
"VisIt"
] |
3868e47d1346f2e4dadc62c901323a7b0627fa78fc1f2b810a36cda1c676ab74
|
# Copyright 2012 Patrick Varilly
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import uwham
import numpy as np
import math
import scipy.stats
# Test sampling of a Gaussian...
mu_star = 4.0
sigma_star = 1.0
# ...with lots of Gaussian umbrellas
np.random.seed(1)
K = 30
sigma_k = np.random.normal(1.0, 0.25, (K,))
sigma_k[sigma_k < 0.25] = 0.25 # Ensure sigma_k isn't too small
mu_k = np.random.uniform(0.0, 10.0, (K,))
N_k = np.asarray(np.ceil(np.random.uniform(500.0, 1500.0, (K,))),
dtype=np.int)
# Generate samples
u_kln = np.zeros([K, K, N_k.max()], np.float64)
samples_ln = np.zeros([K, N_k.max()], np.float64)
for l in xrange(K):
sample_sigma = (sigma_star ** -2 + sigma_k[l] ** -2) ** (-0.5)
sample_mu = sample_sigma ** 2 * (mu_star / sigma_star ** 2 +
mu_k[l] / sigma_k[l] ** 2)
samples_ln[l, :N_k[l]] = np.random.normal(
loc=sample_mu, scale=sample_sigma, size=[N_k[l]])
for k in xrange(K):
u_kln[k, l, :N_k[l]] = (samples_ln[l, :N_k[l]] - mu_k[k])**2 / (2*sigma_k[k]**2)
# Run UWHAM
results = uwham.UWHAM(u_kln, N_k)
# Collect statistics
dN = 0.5
x, y, y_expect = [], [], []
expect_dist = scipy.stats.norm(loc=mu_star, scale=sigma_star)
for Nmin in np.arange(0.0, 10.0, dN):
Nmax = Nmin + dN
P = 0.0
for l in xrange(K):
for n in xrange(N_k[l]):
if Nmin <= samples_ln[l, n] < Nmax:
P += results.weight_ln[l, n]
x.append(0.5*(Nmin + Nmax))
y.append(P)
y_expect.append(expect_dist.cdf(Nmax) - expect_dist.cdf(Nmin))
x = np.asarray(x)
y = np.asarray(y)
y_expect = np.asarray(y_expect)
# Make a plot to compare
import matplotlib.pyplot as plt
plt.plot(x, np.log(y_expect), label='Exact')
plt.plot(x, np.log(y), 'ro', label='UWHAM')
plt.ylabel("P(x)")
plt.xlabel("x")
plt.legend()
plt.show()
|
patvarilly/uwham
|
test_uwham.py
|
Python
|
gpl-3.0
| 2,528
|
[
"Gaussian"
] |
2093689c67829ae78af0db405256293f33af8798f20b88ae3e164e3eea5079fd
|
# proxy module
from __future__ import absolute_import
from mayavi.scripts.mayavi2 import *
|
enthought/etsproxy
|
enthought/mayavi/scripts/mayavi2.py
|
Python
|
bsd-3-clause
| 91
|
[
"Mayavi"
] |
35548ef7bcc730852fdad56f64bfa28b43ba1fdad8f5a1601c66c90f1386225e
|
from PIL import Image
from pyimage import PyImage
import numpy as np
class GaussPyramid(object):
'''This class represent a gaussian pyramid of a given image. It contains
an array of PyImage instances, where the higher the index, the deeper you
are in the pyramid; and also an array of loss information, so that we can
prevent pixel lines and columns from being lost when reducing an image.'''
def __init__(self, reduce_default=4):
self.pyramid = []
self.info_loss = []
if (isinstance(reduce_default, int)):
self.reduce_default = reduce_default
else:
self.reduce_default = 4
# ------------------------------------------------------------------------
# Input and Output functions
# ------------------------------------------------------------------------
def loadFile(self, filepath):
'''This function should initialize the class by loading an image from
the path specified. It then places that image at the pyramid's lowest
level, and specifies no pixels were lost in this process.'''
# Reset pyramid and loss info
if self.pyramid:
self.pyramid = []
self.info_loss = []
# Create image, append things
img = PyImage()
img.loadFile(filepath)
self.pyramid.append(img)
self.info_loss.append((False, False))
def loadImage(self, image):
'''This function should initialize the class by loading an image from
the function call. It then places that image at the pyramid's lowest
level, and specifies no pixels were lost in this process.'''
# Reset pyramid and loss info
if self.pyramid:
self.pyramid = []
self.info_loss = []
# Create image, append things
img = PyImage()
img.loadImage(image)
self.pyramid.append(img)
self.info_loss.append((False, False))
def savePyramid(self, filepath):
'''This function should save the images in the pyramid into different
files, following the filename given in the function call. Images are
saved in format "<name>-<level>.<extension>", where <name> and
<extension> are given with filepath, and <level> specifies which level
is being saved.'''
# Level counter, starts at lowest level [0]
count = 0
# Separate name from extension
path = filepath.split('.')
extension = '.' + path[-1]
path = "".join(path[:-1]) + '-'
# Save each level separately
for image in self.pyramid:
image.saveFile(path + str(count) + extension)
count += 1
# ------------------------------------------------------------------------
# Pyramid operations
# ------------------------------------------------------------------------
def reduceMax(self):
'''This function should reduce the image a fixed number of times. This
adds a specific amount of levels to the gaussian pyramid.'''
for i in range(self.reduce_default):
self.reduce()
def reduce(self):
'''This function should reduce the image by a single level. This adds a
new level to the pyramid, so the reducing is based on the current
highest level. Before halving the image's dimensions, we blur it to
prevent sharpening pixel intensity differences.'''
# Check if pyramid has been generated
if not self.pyramid:
print "\nERROR: Please load an image first\n"
return
# Copy highest level's pixel matrix
img = self.pyramid[-1].copy()
# Blur filter we should apply to the imag
arr = np.array([1, 4, 6, 4, 1]) / 16.0
# To create a proper 5x5 filter, we create the weights matrix, taking
# into account the number of channels in the image
if img.pixels.ndim < 3:
weights = np.empty((5, 5))
else:
weights = np.empty((5, 5, len(img.pixels[0][0])))
# Then, we convolve the 1D filter with itself, creating a proper 5x5
# filter we can use with our generic filter function
for i in range(5):
for j in range(5):
if img.pixels.ndim < 3:
weights[i][j] = arr[i] * arr[j]
else:
weights[i][j] = (arr[i] * arr[j],) * len(img.pixels[0][0])
img.filter(2, weights, np.sum)
# We now check if any pixels will be lost when halving the image - this
# happens when we have an odd dimension
loss = []
if len(img.pixels) % 2:
loss.append(True)
else:
loss.append(False)
if len(img.pixels[0]) % 2:
loss.append(True)
else:
loss.append(False)
# Half the image, taking lines and columns alternatedly
img.pixels = img.pixels[:-1:2, :-1:2]
img.updateImage()
# Append new stuff to new level
self.pyramid.append(img)
self.info_loss.append(loss)
def expand(self, level):
'''This function should expand the image stored at the specified level.
To do so, we simply call the expand function for that image, specifying
whether there was a loss of pixels or not in its reduction to that
level.'''
# Check if pyramid exists
if not self.pyramid:
print "\nERROR: Please load an image first\n"
return
# Check if index is not negative
if level < 0:
print "\nERROR: Please use non-negative index values\n"
return
# Check if index is valid
try:
img = self.pyramid[level].copy()
loss = self.info_loss[level]
except IndexError:
print "\nERROR: Please specify a valid index\n"
return
# Expand image
img.expand(loss)
return img
|
Thurler/imgprocess
|
gausspyramid.py
|
Python
|
gpl-3.0
| 5,999
|
[
"Gaussian"
] |
f6e79bb25845de80b59d43b6ba710470c3dbc8f0dbddf33306579970a55172e2
|
AGENTS = [
"Avant Browser/1.2.789rel1 (http://www.avantbrowser.com)",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0 )",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)",
"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a",
"Mozilla/2.02E (Win95; U)",
"Mozilla/3.01Gold (Win95; I)",
"Mozilla/4.8 [en] (Windows NT 5.1; U)",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"Opera/7.50 (Windows XP; U)",
"Opera/7.50 (Windows ME; U) [en]",
"Opera/7.51 (Windows NT 5.1; U) [en]",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.0",
"Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )",
"Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser; Avant Browser; .NET CLR 1.0.3705; .NET CLR 1.1.4322; Media Center PC 4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.8 (KHTML, like Gecko) Beamrise/17.2.0.9 Chrome/17.0.939.0 Safari/535.8",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.0; rv:14.0) Gecko/20100101 Firefox/14.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1",
"Mozilla/5.0 (Windows NT 6.2; Win64; x64; rv:16.0) Gecko/16.0 Firefox/16.0",
"iTunes/9.0.2 (Windows; N)",
"Mozilla/5.0 (compatible; Konqueror/4.5; Windows) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.2; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.2; WOW64; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0",
"Opera/9.25 (Windows NT 6.0; U; en)",
"Opera/9.80 (Windows NT 5.2; U; en) Presto/2.2.15 Version/10.10",
"Opera/9.80 (Windows NT 5.1; U; ru) Presto/2.7.39 Version/11.00",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.7.62 Version/11.01",
"Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10",
"Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:9.0) Gecko/20100101 Firefox/9.0",
"Mozilla/4.0 (compatible; MSIE 5.15; Mac_PowerPC)",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.15",
"Opera/9.0 (Macintosh; PPC Mac OS X; U; en)",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/85.8",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/125.8",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr-fr) AppleWebKit/312.5 (KHTML, like Gecko) Safari/312.3",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/418.8 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Camino/2.2.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre Camino/2.2a1pre",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.4 (KHTML like Gecko) Chrome/22.0.1229.79 Safari/537.4",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:16.0) Gecko/20120813 Firefox/16.0",
"iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)",
"iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US) AppleWebKit/528.16 (KHTML, like Gecko, Safari/528.16) OmniWeb/v622.8.0.112941",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-US) AppleWebKit/528.16 (KHTML, like Gecko, Safari/528.16) OmniWeb/v622.8.0",
"Opera/9.20 (Macintosh; Intel Mac OS X; U; en)",
"Opera/9.64 (Macintosh; PPC Mac OS X; U; en) Presto/2.1.1",
"Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.6.30 Version/10.61",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.4.11; U; en) Presto/2.7.62 Version/11.00",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-us) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; de-de) AppleWebKit/534.15 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7; en-us) AppleWebKit/534.20.8 (KHTML, like Gecko) Version/5.1 Safari/534.20.8",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.5; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"ELinks (0.4pre5; Linux 2.6.10-ac7 i686; 80x33)",
"ELinks/0.9.3 (textmode; Linux 2.6.9-kanotix-8 i686; 127x41)",
"ELinks/0.12~pre5-4",
"Links/0.9.1 (Linux 2.4.24; i386;)",
"Links (2.1pre15; Linux 2.4.26 i686; 158x61)",
"Links (2.3pre1; Linux 2.6.38-8-generic x86_64; 170x48)",
"Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/0.8.12",
"w3m/0.5.1",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1",
"Mozilla/5.0 Slackware/13.37 (X11; U; Linux x86_64; en-US) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.5",
"Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8",
"Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9",
"Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1",
"Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14",
"Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)",
"Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2",
"Konqueror/3.0-rc4; (Konqueror/3.0-rc4; i686 Linux;;datecode)",
"Mozilla/5.0 (compatible; Konqueror/3.3; Linux 2.6.8-gentoo-r3; X11;",
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux 2.6.30-7.dmz.1-liquorix-686; X11) KHTML/3.5.10 (like Gecko) (Debian package 4:3.5.10.dfsg.1-1 b1)",
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux; en_US) KHTML/3.5.6 (like Gecko) (Kubuntu)",
"Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7",
"MSIE (MSIE 6.0; X11; Linux; i686) Opera 7.23",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.4 (KHTML like Gecko) Chrome/22.0.1229.56 Safari/537.4",
"Mozilla/4.0 (compatible; Dillo 3.0)",
"Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0",
"Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:14.0) Gecko/20100101 Firefox/14.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:16.0) Gecko/20100101 Firefox/16.0",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8",
"Mozilla/5.0 (X11; Linux i686; rv:14.0) Gecko/20100101 Firefox/14.0.1 Iceweasel/14.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:15.0) Gecko/20120724 Debian Iceweasel/15.02",
"Mozilla/5.0 (compatible; Konqueror/4.2; Linux) KHTML/4.2.4 (like Gecko) Slackware/13.0",
"Mozilla/5.0 (compatible; Konqueror/4.3; Linux) KHTML/4.3.1 (like Gecko) Fedora/4.3.1-3.fc11",
"Mozilla/5.0 (compatible; Konqueror/4.4; Linux) KHTML/4.4.1 (like Gecko) Fedora/4.4.1-1.fc12",
"Mozilla/5.0 (compatible; Konqueror/4.4; Linux 2.6.32-22-generic; X11; en_US) KHTML/4.4.3 (like Gecko) Kubuntu",
"Midori/0.1.10 (X11; Linux i686; U; en-us) WebKit/(531).(2) ",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330",
"Opera/9.64 (X11; Linux i686; U; Linux Mint; nb) Presto/2.1.1",
"Opera/9.80 (X11; Linux i686; U; en) Presto/2.2.15 Version/10.10",
"Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12",
"Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0",
"Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)",
"Uzbl (Webkit 1.3) (Linux i686 [i686])",
"ELinks (0.4.3; NetBSD 3.0.2PATCH sparc64; 141x19)",
"Links (2.1pre15; FreeBSD 5.3-RELEASE i386; 196x84)",
"Lynx/2.8.7dev.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8d",
"w3m/0.5.1",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3",
"Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16",
"Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15",
"Mozilla/5.0 (compatible; Konqueror/3.5; NetBSD 4.0_RC3; X11) KHTML/3.5.7 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/3.5; SunOS) KHTML/3.5.1 (like Gecko)",
"Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko",
"Mozilla/4.77 [en] (X11; I; IRIX;64 6.5 IP30)",
"Mozilla/4.8 [en] (X11; U; SunOS; 5.7 sun4u)",
"Mozilla/5.0 (Unknown; U; UNIX BSD/SYSV system; C -) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.2",
"Mozilla/5.0 (X11; FreeBSD amd64) AppleWebKit/536.5 (KHTML like Gecko) Chrome/19.0.1084.56 Safari/536.5",
"Mozilla/5.0 (X11; FreeBSD amd64) AppleWebKit/537.4 (KHTML like Gecko) Chrome/22.0.1229.79 Safari/537.4",
"Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5",
"Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (compatible; Konqueror/4.1; DragonFly) KHTML/4.1.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.1; OpenBSD) KHTML/4.1.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.5; NetBSD 5.0.2; X11; amd64; en_US) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.5; FreeBSD) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15",
"NetSurf/1.2 (NetBSD; amd64)",
"Opera/9.80 (X11; FreeBSD 8.1-RELEASE i386; Edition Next) Presto/2.12.388 Version/12.10",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; BOLT/2.800) AppleWebKit/534.6 (KHTML, like Gecko) Version/5.0 Safari/534.6.3",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.12; Microsoft ZuneHD 4.3)",
"Mozilla/1.22 (compatible; MSIE 5.01; PalmOS 3.0) EudoraWeb 2.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
"Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020",
"Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025",
"Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0",
"Opera/9.51 Beta (Microsoft Windows; PPC; Opera Mobi/1718; U; en)",
"Opera/9.60 (J2ME/MIDP; Opera Mini/4.1.11320/608; U; en) Presto/2.2.0",
"Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14320/554; U; cs) Presto/2.2.0",
"Opera/9.80 (S60; SymbOS; Opera Mobi/499; U; ru) Presto/2.4.18 Version/10.00",
"Opera/10.61 (J2ME/MIDP; Opera Mini/5.1.21219/19.999; en-US; rv:1.9.3a5) WebKit/534.5 Presto/2.6.30",
"POLARIS/6.01 (BREW 3.1.5; U; en-us; LG; LX265; POLARIS/6.01/WAP) MMP/2.0 profile/MIDP-2.1 Configuration/CLDC-1.1",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25",
"Mozilla/5.0 (Linux; U; Android 3.0.1; fr-fr; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5",
"Mozilla/5.0 (iPad; CPU OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5355d Safari/8536.25",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_7;en-us) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Safari/530.17",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; Galaxy Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-ca; GT-P1000M Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; GT-P7100 Build/HRI83) AppleWebkit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/4.0 (compatible; Linux 2.6.22) NetFront/3.4 Kindle/2.0 (screen 600x800)",
"Mozilla/5.0 (Linux U; en-US) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) Version/4.0 Kindle/3.0 (screen 600x800; rotate)",
"Mozilla/5.0 (Linux; U; Android 3.0.1; fr-fr; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 2_0 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5A347 Safari/525.200",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/531.22.7",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 2_2_1 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5H11a Safari/525.20",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 3_1_1 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Mobile/7C145",
"nook browser/1.0",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_7;en-us) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.3.4; en-us; BNTV250 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Safari/533.1",
"BlackBerry7100i/4.1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/103",
"BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0",
"BlackBerry8320/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/100",
"BlackBerry8330/4.3.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/105",
"BlackBerry9000/4.6.0.167 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/102",
"BlackBerry9530/4.7.0.167 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/102 UP.Link/6.3.1.20.0",
"BlackBerry9700/5.0.0.351 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/123",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1 (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; sdk Build/CUPCAKE) AppleWebkit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; htc_bahamas Build/CRB17) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.1-update1; de-de; HTC Desire 1.19.161.5 Build/ERE27) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 1.5; de-ch; HTC Hero Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; HTC Legend Build/cupcake) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1 FirePHP/0.3",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"HTC-ST7377/1.59.502.3 (67150) Opera/9.50 (Windows NT 5.1; U; en) UP.Link/6.3.1.17.0",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; HTC_TATTOO_A3288 Build/DRC79) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"LG-LX550 AU-MIC-LX550/2.0 MMP/2.0 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"POLARIS/6.01(BREW 3.1.5;U;en-us;LG;LX265;POLARIS/6.01/WAP;)MMP/2.0 profile/MIDP-201 Configuration /CLDC-1.1",
"LG-GC900/V10a Obigo/WAP2.0 Profile/MIDP-2.1 Configuration/CLDC-1.1",
"Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; MDA Pro/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1)",
"Mozilla/5.0 (Linux; U; Android 1.0; en-us; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; T-Mobile G1 Build/CRB43) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari 525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-gb; T-Mobile_G2_Touch Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Droid Build/FRG22D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"MOT-L7v/08.B7.5DR MIB/2.2.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Milestone Build/ SHOLS_U2_01.03.1) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.0.1; de-de; Milestone Build/SHOLS_U2_01.14.0) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"MOT-V9mm/00.62 UP.Browser/6.2.3.4.c.1.123 (GUI) MMP/2.0",
"MOTORIZR-Z8/46.00.00 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 356) Opera 8.65 [it] UP.Link/6.3.0.0.0",
"MOT-V177/0.1.75 UP.Browser/6.2.3.9.c.12 (GUI) MMP/2.0 UP.Link/6.3.1.13.0",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"portalmmm/2.0 N410i(c20;TB) ",
"Nokia3230/2.0 (5.0614.0) SymbianOS/7.0s Series60/2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia5700/3.27; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia6120c/3.70; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Nokia6230/2.0 (04.44) Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6230i/2.0 (03.80) Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Mozilla/4.1 (compatible; MSIE 5.0; Symbian OS; Nokia 6600;452) Opera 6.20 [en-US]",
"Nokia6630/1.0 (2.39.15) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia7250/1.0 (3.14) Profile/MIDP-1.0 Configuration/CLDC-1.0",
"Mozilla/4.0 (compatible; MSIE 5.0; Series80/2.0 Nokia9500/4.51 Profile/MIDP-2.0 Configuration/CLDC-1.1)",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaC6-01/011.010; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.2 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaC7-00/012.003; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.3 3gpp-gba",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es50",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaE6-00/021.002; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.16 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es65",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaE7-00/010.016; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.3 3gpp-gba",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es70",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaE90-1/07.24.0.3; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.2.3.18.0",
"NokiaN70-1/5.0609.2.0.1 Series60/2.8 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"NokiaN73-1/3.0649.0.0.1 Series60/3.0 Profile/MIDP2.0 Configuration/CLDC-1.1",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaN8-00/014.002; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.6.4 3gpp-gba",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13",
"Mozilla/5.0 (SymbianOS/9.1; U; de) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95/10.0.018; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.3.0.0.0",
"Mozilla/5.0 (MeeGo; NokiaN950-00/00) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13",
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaX7-00/021.004; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.21 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (webOS/1.3; U; en-US) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/1.0 Safari/525.27.1 Desktop/1.0",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; PalmSource/hspr-H102; Blazer/4.0) 16;320x320",
"SEC-SGHE900/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4509/1378; nl; U; ssr)",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; Galaxy Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-ca; GT-P1000M Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-de; Galaxy S II Build/GRJ22) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; GT-P7100 Build/HRI83) AppleWebkit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"SAMSUNG-S8000/S8000XXIF3 SHP/VPP/R5 Jasmine/1.0 Nextreaming SMM-MMS/1.2.0 profile/MIDP-2.1 configuration/CLDC-1.1 FirePHP/0.3",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; SPH-M900 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"SAMSUNG-SGH-A867/A867UCHJ3 SHP/VPP/R5 NetFront/35 SMM-MMS/1.2.0 profile/MIDP-2.0 configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SEC-SGHX210/1.0 UP.Link/6.3.1.13.0",
"Mozilla/5.0 (Linux; U; Android 1.5; fr-fr; GT-I5700 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"SEC-SGHX820/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK310iv/R4DA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0",
"SonyEricssonK550i/R1JD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK610i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK750i/R1CA Browser/SEMC-Browser/4.2 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Opera/9.80 (J2ME/MIDP; Opera Mini/5.0.16823/1428; U; en) Presto/2.2.0",
"SonyEricssonK800i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SonyEricssonK810i/R1KG Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Opera/8.01 (J2ME/MIDP; Opera Mini/1.0.1479/HiFi; SonyEricsson P900; no; U; ssr)",
"SonyEricssonS500i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 SonyEricssonP100/01; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525",
"SonyEricssonT68/R201A",
"SonyEricssonT100/R101",
"SonyEricssonT610/R201 Profile/MIDP-1.0 Configuration/CLDC-1.0",
"SonyEricssonT650i/R7AA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW580i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW660i/R6AD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW810i/R4EA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SonyEricssonW850i/R1ED Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW950i/R100 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 323) Opera 8.60 [en-US]",
"SonyEricssonW995/R1EA Profile/MIDP-2.1 Configuration/CLDC-1.1 UNTRUSTED/1.0",
"Mozilla/5.0 (Linux; U; Android 1.6; es-es; SonyEricssonX10i Build/R1FA016) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; SonyEricssonX10i Build/R1AA056) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Opera/9.5 (Microsoft Windows; PPC; Opera Mobi; U) SonyEricssonX1i/R2AA Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonZ800/R1Y Browser/SEMC-Browser/4.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; winfx; .NET CLR 1.1.4322; .NET CLR 2.0.50727; Zune 2.0) ",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.12; Microsoft ZuneHD 4.3)",
"Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Linux; U; Android 1.1; en-gb; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-ca; GT-P1000M Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (Linux; U; Android 3.0.1; fr-fr; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-de; Galaxy S II Build/GRJ22) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Opera/9.80 (Android 4.0.4; Linux; Opera Mobi/ADR-1205181138; U; pl) Presto/2.10.254 Version/12.00",
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 2_0 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5A347 Safari/525.200",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 2_2_1 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5H11a Safari/525.20",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16",
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; de-de) AppleWebKit/533.17.9 (KHTML, like Gecko) Mobile/8F190",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 5_1_1 like Mac OS X; da-dk) AppleWebKit/534.46.0 (KHTML, like Gecko) CriOS/19.0.1084.60 Mobile/9B206 Safari/7534.48.3",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (webOS/1.3; U; en-US) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/1.0 Safari/525.27.1 Desktop/1.0",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; PalmSource/hspr-H102; Blazer/4.0) 16;320x320",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaN8-00/014.002; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.6.4 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaX7-00/021.004; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.21 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaE90-1/07.24.0.3; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.2.3.18.0",
"Mozilla/5.0 (SymbianOS 9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344",
"Opera/9.80 (S60; SymbOS; Opera Mobi/499; U; ru) Presto/2.4.18 Version/10.00",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.12; Microsoft ZuneHD 4.3)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0) Asus;Galaxy6",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)",
"DoCoMo/2.0 SH901iC(c100;TB;W24H12)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)",
"Mozilla/4.0 (compatible; MSIE 6.0; j2me) ReqwirelessWeb/3.5",
"Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1",
"BlackBerry7520/4.0.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/5.0.3.3 UP.Link/5.1.2.12 (Google WAP Proxy/1.0)",
"Nokia6100/1.0 (04.01) Profile/MIDP-1.0 Configuration/CLDC-1.0",
"Nokia6630/1.0 (2.3.129) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Mozilla/2.0 (compatible; Ask Jeeves/Teoma)",
"Baiduspider ( http://www.baidu.com/search/spider.htm)",
"Mozilla/5.0 (compatible; bingbot/2.0 http://www.bing.com/bingbot.htm)",
"Mozilla/5.0 (compatible; Exabot/3.0; http://www.exabot.com/go/robot) ",
"FAST-WebCrawler/3.8 (crawler at trd dot overture dot com; http://www.alltheweb.com/help/webmaster/crawler)",
"AdsBot-Google ( http://www.google.com/adsbot.html)",
"Mozilla/5.0 (compatible; Googlebot/2.1; http://www.google.com/bot.html)",
"Googlebot/2.1 ( http://www.googlebot.com/bot.html)",
"Googlebot-Image/1.0",
"Mediapartners-Google",
"DoCoMo/2.0 N905i(c100;TB;W24H16) (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS) (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"SAMSUNG-SGH-E250/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/6.2.3.3.c.1.101 (GUI) MMP/2.0 (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"Googlebot-News",
"Googlebot-Video/1.0",
"Mozilla/4.0 (compatible; GoogleToolbar 4.0.1019.5266-big; Windows XP 5.1; MSIE 6.0.2900.2180)",
"Mozilla/5.0 (en-us) AppleWebKit/525.13 (KHTML, like Gecko; Google Web Preview) Version/3.1 Safari/525.13",
"msnbot/1.0 ( http://search.msn.com/msnbot.htm)",
"msnbot/1.1 ( http://search.msn.com/msnbot.htm)",
"msnbot/0.11 ( http://search.msn.com/msnbot.htm)",
"msnbot-media/1.1 ( http://search.msn.com/msnbot.htm)",
"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
"Mozilla/5.0 (compatible; Yahoo! Slurp China; http://misc.yahoo.com.cn/help.html)",
"EmailWolf 1.00",
"Gaisbot/3.0 (robot@gais.cs.ccu.edu.tw; http://gais.cs.ccu.edu.tw/robot.php)",
"grub-client-1.5.3; (grub-client-1.5.3; Crawl your own stuff with http://grub.org)",
"Gulper Web Bot 0.2.4 (www.ecsl.cs.sunysb.edu/~maxim/cgi-bin/Link/GulperBot)",
"Mozilla/3.0 (compatible; NetPositive/2.1.1; BeOS)",
"Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.9a1) Gecko/20060702 SeaMonkey/1.5a",
"Download Demon/3.5.0.11",
"Offline Explorer/2.5",
"SuperBot/4.4.0.60 (Windows XP)",
"WebCopier v4.6",
"Web Downloader/6.9",
"WebZIP/3.5 (http://www.spidersoft.com)",
"Wget/1.9 cvs-stable (Red Hat modified)",
"Wget/1.9.1",
"Bloglines/3.1 (http://www.bloglines.com)",
"everyfeed-spider/2.0 (http://www.everyfeed.com)",
"FeedFetcher-Google; ( http://www.google.com/feedfetcher.html)",
"Gregarius/0.5.2 ( http://devlog.gregarius.net/docs/ua)",
"Mozilla/5.0 (PLAYSTATION 3; 2.00)",
"Mozilla/5.0 (PLAYSTATION 3; 1.10)",
"Mozilla/4.0 (PSP (PlayStation Portable); 2.00)",
"Opera/9.30 (Nintendo Wii; U; ; 2047-7; en)",
"wii libnup/1.0",
"Java/1.6.0_13",
"libwww-perl/5.820",
"Peach/1.01 (Ubuntu 8.04 LTS; U; en)",
"Python-urllib/2.5",
"HTMLParser/1.6",
"Jigsaw/2.2.5 W3C_CSS_Validator_JFouffa/2.0",
"W3C_Validator/1.654",
"W3C_Validator/1.305.2.12 libwww-perl/5.64",
"P3P Validator",
"CSSCheck/1.2.2",
"WDG_Validator/1.6.2",
"facebookscraper/1.0( http://www.facebook.com/sharescraper_help.php)",
"grub-client-1.5.3; (grub-client-1.5.3; Crawl your own stuff with http://grub.org)",
"iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)",
"Microsoft URL Control - 6.00.8862",
"SearchExpress",
]
|
carpedm20/hali
|
spider/spider/agents.py
|
Python
|
bsd-3-clause
| 48,876
|
[
"Galaxy"
] |
f3e40c15260a850e74f88b62f7f823907540b51973b9a36ec7b985d78f1137df
|
#!/usr/bin/env python
import argparse
import logging
logging.basicConfig(level=logging.INFO)
def filter_blast(blast_results, top_n=5):
id_counts = {}
for line in blast_results:
# evalue = line.split('\t')[10]
id = line.split("\t")[0]
if id in id_counts:
id_counts[id] += 1
else:
id_counts[id] = 1
if id_counts[id] <= top_n:
print(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Filter blast results")
parser.add_argument(
"blast_results", type=argparse.FileType("r"), help="Tabular Blast Results"
)
parser.add_argument("top_n", type=int, help="Top N hits")
args = parser.parse_args()
filter_blast(**vars(args))
|
TAMU-CPT/galaxy-tools
|
tools/blast/cpt_blast_filter.py
|
Python
|
gpl-3.0
| 765
|
[
"BLAST"
] |
09ae0b9ffaa3c073ad41b76ca9245be79b8b3c553d22746dab71ce9732cc0d68
|
#!/usr/bin/env python
import numpy
import argparse
import os
import math
from Scientific.IO import NetCDF
def main():
parser = argparse.ArgumentParser(
prog="gaussian_bump",
description="""Create a Gaussian bump in a netcdf file"""
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Verbose output: mainly progress reports.",
default=False
)
parser.add_argument(
'-d',
'--domain',
help="Domain size. Defualt is 1000x1000m",
default=1000.0,
type=float
)
parser.add_argument(
'-b',
'--bumpheight',
help="Distance between seabed and top of bump. Default is 100m",
default=100,
type=float
)
parser.add_argument(
'-r',
'--resolution',
help="Resolution of output netcdf file. Default is 10m",
default=10.0,
type=float
)
parser.add_argument(
'--shift',
help="Shift the bump in the 'north-south' direction, wrapping along the top/bottom",
default = 0,
type=float
)
parser.add_argument(
'--spread',
help="Spread of Gaussian",
default = 100.0,
type=float
)
parser.add_argument(
'output_file',
metavar='output_file',
nargs=1,
help='The output netcdf file'
)
args = parser.parse_args()
verbose = args.verbose
output_file = args.output_file[0]
domain_size = args.domain
bump_height = args.bumpheight
resolution = args.resolution
shift = args.shift
spread = args.spread
nPoints = int(domain_size / resolution)
shift = int(shift/resolution)
if (verbose):
print nPoints, shift
# generate regular grid
X, Y = numpy.meshgrid(numpy.linspace(0.0, domain_size, nPoints), numpy.linspace(0.0, domain_size, nPoints))
Z = numpy.zeros((nPoints,nPoints))
#for each point calculate the Gaussian
centre = domain_size/2.0
for i in range(0,len(X)):
for j in range(0,len(X[0])):
r = ((X[i][j]-centre)**2/(2.0*spread**2) + (Y[i][j]-centre)**2/(2.0*spread**2))
Z[i][j] = bump_height * math.exp(-1.0*r)
if (not shift == 0.0):
Z = numpy.roll(Z, shift, 0)
f = NetCDF.NetCDFFile(output_file, 'w')
xDim = f.createDimension("X", nPoints)
yDim = f.createDimension("Y", nPoints)
x = f.createVariable("X","d",("X",))
y = f.createVariable("Y","d",("Y",))
zVar = f.createVariable("Z","d",("X","Y"))
x.assignValue(X[0,0:nPoints])
y.assignValue(Y[0:nPoints,0])
zVar.assignValue(Z)
f.close()
os.system('grdreformat '+output_file+' '+output_file)
os.system('rm -f 1_contour.* 50_contour.*')
os.system('gdal_contour -fl 1.0 NETCDF:"'+output_file+'":z 1_contour.shp')
os.system('gdal_contour -fl 50.0 NETCDF:"'+output_file+'":z 50_contour.shp')
if __name__ == "__main__":
main()
|
adamcandy/QGIS-Meshing
|
tests/support_files/gaussian_bump.py
|
Python
|
lgpl-2.1
| 3,191
|
[
"Gaussian",
"NetCDF"
] |
766a13549f0f0eb3684dc4a9eb2d705e39728298265988b413b943ab32c93221
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
"""
This module contains classes centered around account, banks and transactions
between accounts.
The main class is an :class:`Account` holds a set of :class:`AccountTransaction`.
For accounts that are banks there's a :class:`BankAccount` class for
the bank specific state and for bill generation there's also
:class:`BillOption`.
Finally there's a :class:`AccountTransactionView` that is used by
the financial application to efficiently display a ledger.
"""
# pylint: enable=E1101
import datetime
from kiwi.currency import currency
from storm.expr import And, LeftJoin, Or
from storm.info import ClassAlias
from storm.references import Reference
from zope.interface import implementer
from stoqlib.database.expr import TransactionTimestamp, Date
from stoqlib.database.properties import (DateTimeCol, EnumCol, IdCol,
IntCol, PriceCol, UnicodeCol)
from stoqlib.database.viewable import Viewable
from stoqlib.domain.base import Domain
from stoqlib.domain.interfaces import IDescribable
from stoqlib.domain.station import BranchStation
from stoqlib.exceptions import PaymentError
from stoqlib.lib.parameters import sysparam
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
class BillOption(Domain):
"""List of values for bill (boleto) generation
See also:
`schema <http://doc.stoq.com.br/schema/tables/bill_option.html>`__
"""
__storm_table__ = 'bill_option'
#: option name, such as nosso_numero
option = UnicodeCol()
#: value of the option
value = UnicodeCol()
bank_account_id = IdCol()
#: the |bankaccount| this option belongs to
bank_account = Reference(bank_account_id, 'BankAccount.id')
class BankAccount(Domain):
"""Information specific to a bank
See also:
`schema <http://doc.stoq.com.br/schema/tables/bank_account.html>`__
"""
__storm_table__ = 'bank_account'
account_id = IdCol()
#: the |account| for this bank account
account = Reference(account_id, 'Account.id')
# FIXME: This is brazil specific, should probably be replaced by a
# bank reference to a separate class with name in addition to
# the bank number
#: an identify for the bank type of this account,
bank_number = IntCol(default=0)
#: an identifier for the bank branch/agency which is responsible
#: for this
bank_branch = UnicodeCol(default=None)
#: an identifier for this bank account
bank_account = UnicodeCol(default=None)
@property
def options(self):
"""Get the bill options for this bank account
:returns: a list of :class:`BillOption`
"""
return self.store.find(BillOption,
bank_account=self)
@implementer(IDescribable)
class Account(Domain):
"""An account, a collection of |accounttransactions| that may be controlled
by a bank.
See also: `schema <http://doc.stoq.com.br/schema/tables/account.html>`__,
`manual <http://doc.stoq.com.br/manual/account.html>`__
"""
__storm_table__ = 'account'
#: Bank
TYPE_BANK = u'bank'
#: Cash/Till
TYPE_CASH = u'cash'
#: Assets, like investement account
TYPE_ASSET = u'asset'
#: Credit
TYPE_CREDIT = u'credit'
#: Income/Salary
TYPE_INCOME = u'income'
#: Expenses
TYPE_EXPENSE = u'expense'
#: Equity, like unbalanced
TYPE_EQUITY = u'equity'
account_labels = {
TYPE_BANK: (_(u"Deposit"), _(u"Withdrawal")),
TYPE_CASH: (_(u"Receive"), _(u"Spend")),
TYPE_ASSET: (_(u"Increase"), _(u"Decrease")),
TYPE_CREDIT: (_(u"Payment"), _(u"Charge")),
TYPE_INCOME: (_(u"Income"), _(u"Charge"),),
TYPE_EXPENSE: (_(u"Rebate"), _(u"Expense")),
TYPE_EQUITY: (_(u"Increase"), _(u"Decrease")),
}
account_type_descriptions = [
(_(u"Bank"), TYPE_BANK),
(_(u"Cash"), TYPE_CASH),
(_(u"Asset"), TYPE_ASSET),
(_(u"Credit"), TYPE_CREDIT),
(_(u"Income"), TYPE_INCOME),
(_(u"Expense"), TYPE_EXPENSE),
(_(u"Equity"), TYPE_EQUITY),
]
#: name of the account
description = UnicodeCol(default=None)
#: code which identifies the account
code = UnicodeCol(default=None)
#: parent account id, can be None
parent_id = IdCol(default=None)
#: parent account
parent = Reference(parent_id, 'Account.id')
station_id = IdCol(default=None)
#: the |branchstation| tied
#: to this account, mainly for TYPE_CASH accounts
station = Reference(station_id, 'BranchStation.id')
#: kind of account, one of the TYPE_* defines in this class
account_type = EnumCol(allow_none=False, default=TYPE_BANK)
#: |bankaccount| for this account, used by TYPE_BANK accounts
bank = Reference('id', 'BankAccount.account_id', on_remote=True)
#
# IDescribable implementation
#
def get_description(self):
return self.description
#
# Class Methods
#
@classmethod
def get_by_station(cls, store, station):
"""Fetch the account assoicated with a station
:param store: a store
:param station: a |branchstation|
:returns: the account
"""
if station is None:
raise TypeError("station cannot be None")
if not isinstance(station, BranchStation):
raise TypeError("station must be a BranchStation, not %r" %
(station, ))
return store.find(cls, station=station).one()
@classmethod
def get_children_for(cls, store, parent):
"""Get a list of child accounts for
:param store:
:param |account| parent: parent account
:returns: the child accounts
:rtype: resultset
"""
return store.find(cls, parent=parent)
@classmethod
def get_accounts(cls, store):
"""Get a list of all accounts
:param store: a store
:returns all accounts
:rtype: resultset
"""
return store.find(cls)
#
# Properties
#
@property
def long_description(self):
"""Get a long description, including all the parent accounts,
such as Tills:cotovia"""
parts = []
account = self
while account:
if not account in parts:
parts.append(account)
account = account.parent
return u':'.join([a.description for a in reversed(parts)])
@property
def transactions(self):
"""Returns a list of transactions to this account.
:returns: list of |accounttransaction|
"""
return self.store.find(AccountTransaction,
Or(self.id == AccountTransaction.account_id,
self.id == AccountTransaction.source_account_id))
#
# Public API
#
def get_total_for_interval(self, start, end):
"""Fetch total value for a given interval
:param datetime start: beginning of interval
:param datetime end: of interval
:returns: total value or one
"""
if not isinstance(start, datetime.datetime):
raise TypeError("start must be a datetime.datetime, not %s" % (
type(start), ))
if not isinstance(end, datetime.datetime):
raise TypeError("end must be a datetime.datetime, not %s" % (
type(end), ))
query = And(Date(AccountTransaction.date) >= start,
Date(AccountTransaction.date) <= end,
AccountTransaction.source_account_id != AccountTransaction.account_id)
transactions = self.store.find(AccountTransaction, query)
incoming = transactions.find(AccountTransaction.account_id == self.id)
outgoing = transactions.find(AccountTransaction.source_account_id == self.id)
positive_values = incoming.sum(AccountTransaction.value) or 0
negative_values = outgoing.sum(AccountTransaction.value) or 0
return currency(positive_values - negative_values)
def can_remove(self):
"""If the account can be removed.
Not all accounts can be removed, some are internal to Stoq
and cannot be removed"""
# Can't remove accounts that are used in a parameter
if (sysparam.compare_object('IMBALANCE_ACCOUNT', self) or
sysparam.compare_object('TILLS_ACCOUNT', self) or
sysparam.compare_object('BANKS_ACCOUNT', self)):
return False
# Can't remove station accounts
if self.station:
return False
# When we remove this Account, all the related AccountTransaction will
# be assigned to the IMBALANCE_ACCOUNT and BankAccount will be removed,
# so we need to skip them here
return super(Account, self).can_remove(
skip=[('account_transaction', 'account_id'),
('account_transaction', 'source_account_id'),
('bank_account', 'account_id')])
def remove(self, store):
"""Remove the current account. This updates all transactions which
refers to this account and removes them.
:param store: a store
"""
if not self.can_remove():
raise TypeError("Account %r cannot be removed" % (self, ))
imbalance_account_id = sysparam.get_object_id('IMBALANCE_ACCOUNT')
for transaction in store.find(AccountTransaction,
account=self):
transaction.account_id = imbalance_account_id
store.flush()
for transaction in store.find(AccountTransaction,
source_account=self):
transaction.source_account_id = imbalance_account_id
store.flush()
bank = self.bank
if bank:
for option in bank.options:
store.remove(option)
store.remove(bank)
self.store.remove(self)
def has_child_accounts(self):
"""If this account has child accounts
:returns: True if any other accounts has this account as a parent"""
return not self.store.find(Account, parent=self).is_empty()
def get_type_label(self, out):
"""Returns the label to show for the increases/decreases
for transactions of this account.
See :obj:`~..account_labels`
:param out: if the transaction is going out
"""
return self.account_labels[self.account_type][int(out)]
def matches(self, account_id):
"""Check if this account or it's parent account is the same
as another account id.
:param account_id: the account id to compare with
:returns: if the accounts matches.
"""
if self.id == account_id:
return True
if self.parent_id and self.parent_id == account_id:
return True
return False
class AccountTransaction(Domain):
"""Transaction between two accounts.
A transaction is a transfer of money from the
:obj:`~.source_account` to the
:obj:`~.account`.
It removes a negative amount of money from the source and increases
the account by the same amount.
There's only one value, but depending on the view it's either negative
or positive, it can never be zero though.
A transaction can optionally be tied to a |payment|
See also:
`schema <http://doc.stoq.com.br/schema/tables/account_transaction.html>`__
`manual <http://doc.stoq.com.br/manual/transaction.html>`__
"""
__storm_table__ = 'account_transaction'
# operation_type values
TYPE_IN = u'in'
TYPE_OUT = u'out'
# FIXME: It's way to tricky to calculate the direction and it's
# values for an AccountTransaction due to the fact that
# we're only store one value. We should store two values,
# one for how much the current account should be increased
# with and another one which is how much the other account
# should be increased with. For split transaction we might
# want to store more values, so it might make sense to allow
# N values per transaction.
account_id = IdCol()
#: destination |account|
account = Reference(account_id, 'Account.id')
source_account_id = IdCol()
#: source |account|
source_account = Reference(source_account_id, 'Account.id')
#: short human readable summary of the transaction
description = UnicodeCol()
#: identifier of this transaction within a account
code = UnicodeCol()
#: value transfered
value = PriceCol(default=0)
#: date the transaction was done
date = DateTimeCol()
payment_id = IdCol(default=None)
#: |payment| this transaction relates to, can also be ``None``
payment = Reference(payment_id, 'Payment.id')
#: operation_type represents the type of transaction (debit/credit)
operation_type = EnumCol(allow_none=False, default=TYPE_IN)
class sqlmeta:
lazyUpdate = True
@classmethod
def get_inverted_operation_type(cls, operation_type):
""" Get the inverted operation_type (IN->OUT / OUT->IN)
:param operation_type: the type of transaction
:returns: the inverted transaction type
"""
if operation_type == cls.TYPE_IN:
return cls.TYPE_OUT
return cls.TYPE_IN
@classmethod
def create_from_payment(cls, payment, account=None):
"""Create a new transaction based on a |payment|.
It's normally used when creating a transaction which represents
a payment, for instance when you receive a bill or a check from
a |client| which will enter a |bankaccount|.
:param payment: the |payment| to create the transaction for.
:param account: if an outgoing payment, the |account| will be the source of
transaction. Otherwise will be the destination account.
:returns: the transaction
"""
if not payment.is_paid():
raise PaymentError(_("Payment needs to be paid"))
store = payment.store
value = payment.paid_value
if payment.is_outpayment():
operation_type = cls.TYPE_OUT
source = account or payment.method.destination_account
destination = sysparam.get_object(store, 'IMBALANCE_ACCOUNT')
else:
operation_type = cls.TYPE_IN
source = sysparam.get_object(store, 'IMBALANCE_ACCOUNT')
destination = account or payment.method.destination_account
return cls(source_account=source,
account=destination,
value=value,
description=payment.description,
code=unicode(payment.identifier),
date=payment.paid_date,
store=store,
payment=payment,
operation_type=operation_type)
def create_reverse(self):
"""Reverse this transaction, this happens when a payment
is set as not paid.
:returns: the newly created account transaction representing
the reversal
"""
# We're effectively canceling the old transaction here,
# to avoid having more than one transaction referencing the same
# payment we reset the payment to None.
#
# It would be nice to have all of them reference the same payment,
# but it makes it harder to create the reversal.
self.payment = None
new_type = self.get_inverted_operation_type(self.operation_type)
return AccountTransaction(
source_account=self.account,
account=self.source_account,
value=self.value,
description=_(u"Reverted: %s") % (self.description),
code=self.code,
date=TransactionTimestamp(),
store=self.store,
payment=None,
operation_type=new_type)
def invert_transaction_type(self):
""" Invert source/destination accounts and operation_type
When change a incoming transaction to outgoing or vice-versa. The source and
destination accounts must be inverted. Thus, the outgoing value always
will belong to the source account.
"""
temp_account = self.account
operation_type = self.operation_type
self.account = self.source_account
self.source_account = temp_account
self.operation_type = self.get_inverted_operation_type(operation_type)
def get_other_account(self, account):
"""Get the other end of a transaction
:param account: an |account|
:returns: the other end
"""
if self.source_account == account:
return self.account
elif self.account == account:
return self.source_account
else:
raise AssertionError
def set_other_account(self, other, account):
"""Set the other end of a transaction
:param other: an |account| which we do not want to set
:param account: the |account| to set
"""
other = self.store.fetch(other)
if self.source_account == other:
self.account = account
elif self.account == other:
self.source_account = account
else:
raise AssertionError
class AccountTransactionView(Viewable):
"""AccountTransactionView provides a fast view
of the transactions tied to a specific |account|.
It's mainly used to show a ledger.
"""
Account_Dest = ClassAlias(Account, 'account_dest')
Account_Source = ClassAlias(Account, 'account_source')
transaction = AccountTransaction
id = AccountTransaction.id
code = AccountTransaction.code
description = AccountTransaction.description
value = AccountTransaction.value
date = AccountTransaction.date
operation_type = AccountTransaction.operation_type
dest_account_id = Account_Dest.id
dest_account_description = Account_Dest.description
source_account_id = Account_Source.id
source_account_description = Account_Source.description
tables = [
AccountTransaction,
LeftJoin(Account_Dest,
AccountTransaction.account_id == Account_Dest.id),
LeftJoin(Account_Source,
AccountTransaction.source_account_id == Account_Source.id),
]
@classmethod
def get_for_account(cls, account, store):
"""Get all transactions for this |account|, see Account.transaction"""
return store.find(cls, Or(account.id == AccountTransaction.account_id,
account.id == AccountTransaction.source_account_id))
def get_account_description(self, account):
"""Get description of the other |account|, eg.
the one which is transfered to/from.
"""
if self.source_account_id == account.id:
return self.dest_account_description
elif self.dest_account_id == account.id:
return self.source_account_description
else:
raise AssertionError
def get_value(self, account):
""" Gets the transaction value according to an |account|.
If this |account| is the source, the value returned will be negative.
Representing a outgoing transaction.
"""
# A transaction that was not adjusted, will have the source equals
# to destination account. So get the value based on operation type.
if self.source_account_id == self.dest_account_id:
return self.get_value_by_type()
elif self.source_account_id == account.id:
return -self.value
else:
return self.value
def get_value_by_type(self):
""" Returns the transaction value, based on operation type.
"""
if self.operation_type == AccountTransaction.TYPE_IN:
return self.value
else:
return -self.value
|
tiagocardosos/stoq
|
stoqlib/domain/account.py
|
Python
|
gpl-2.0
| 20,945
|
[
"VisIt"
] |
8664683a47d98f4bc1d04e9438eed6a7928aedfa71a6888b7e04111b891fbe9e
|
"""Plugin docstring.
"""
__version__ = '0.1'
__author__ = 'Psi4 Developer'
# Load Python modules
from .pymodule import *
# Load C++ plugin
import os
plugdir = os.path.split(os.path.abspath(__file__))[0]
#sofile = plugdir + '/' + os.path.split(plugdir)[1] + '.so'
#psi4.plugin_load(sofile)
|
CDSherrill/psi4
|
tests/psithon2/psiaux1/myplugin1/__init__.py
|
Python
|
lgpl-3.0
| 294
|
[
"Psi4"
] |
97e24ada265c62682925ad0b6ad1f66bacaa89223e147a17d38474310fb708bc
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test logfiles with (non)linear response output in cclib"""
import os
import unittest
import numpy
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericPolarTest(unittest.TestCase):
"""Generic static polarizability unittest"""
def testshape(self):
"""Is the dimension of the polarizability tensor 3 x 3?"""
self.assertEqual(len(self.data.polarizabilities), 1)
self.assertEqual(self.data.polarizabilities[0].shape, (3, 3))
class ReferencePolarTest(GenericPolarTest):
"""Customized static polarizability unittest"""
# Reference values are from DALTON2015/Trp_polar_abalnr.out
isotropic = 74.12424
principal_components = [30.29431523, 91.5361917, 100.54220307]
isotropic_delta = 0.01
principal_components_delta = 0.01
def testisotropic(self):
"""Is the isotropic polarizability (average of the diagonal elements)
+/- 0.01 from a reference?
"""
isotropic = numpy.average(numpy.diag(self.data.polarizabilities[0]))
self.assertAlmostEqual(isotropic, self.isotropic, delta=self.isotropic_delta)
def testprincomponents(self):
"""Are each of the principal components (eigenvalues) of the
polarizability tensor +/- 0.01 from a reference?
"""
principal_components = numpy.linalg.eigvalsh(self.data.polarizabilities[0])
for c in range(3):
self.assertAlmostEqual(principal_components[c],
self.principal_components[c],
delta=self.principal_components_delta)
if __name__=="__main__":
import sys
sys.path.append(os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['Polar'])
suite.testall()
|
Schamnad/cclib
|
test/data/testPolar.py
|
Python
|
bsd-3-clause
| 1,986
|
[
"cclib"
] |
e2e120e91898746eb4a683c7fd70025c00f444233a6d710e1e90a65e1b28d0ed
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import glob
import os
import tempfile
class Ncl(Package):
"""NCL is an interpreted language designed specifically for
scientific data analysis and visualization. Supports NetCDF 3/4,
GRIB 1/2, HDF 4/5, HDF-EOD 2/5, shapefile, ASCII, binary.
Numerous analysis functions are built-in."""
homepage = "https://www.ncl.ucar.edu"
url = "https://github.com/NCAR/ncl/archive/6.4.0.tar.gz"
version('6.4.0', 'd891452cda7bb25afad9b6c876c73986')
patch('spack_ncl.patch', when="@6.4.0")
# Make ncl compile with hdf5 1.10
patch('hdf5.patch', when="@6.4.0")
# ymake-filter's buffer may overflow
patch('ymake-filter.patch', when="@6.4.0")
# This installation script is implemented according to this manual:
# http://www.ncl.ucar.edu/Download/build_from_src.shtml
variant('hdf4', default=False, description='Enable HDF4 support.')
variant('gdal', default=False, description='Enable GDAL support.')
variant('triangle', default=True, description='Enable Triangle support.')
variant('udunits2', default=True, description='Enable UDUNITS-2 support.')
variant('openmp', default=True, description='Enable OpenMP support.')
# Non-optional dependencies according to the manual:
depends_on('jpeg')
depends_on('netcdf')
depends_on('cairo+X')
# Extra dependencies that may be missing from build system:
depends_on('bison', type='build')
depends_on('flex+lex')
depends_on('libiconv')
depends_on('tcsh')
# Also, the manual says that ncl requires zlib, but that comes as a
# mandatory dependency of libpng, which is a mandatory dependency of cairo.
# The following dependencies are required, otherwise several components
# fail to compile:
depends_on('curl')
depends_on('libiconv')
depends_on('libx11')
depends_on('libxaw')
depends_on('libxmu')
# In Spack, we do not have an option to compile netcdf without netcdf-4
# support, so we will tell the ncl configuration script that we want
# support for netcdf-4, but the script assumes that hdf5 is compiled with
# szip support. We introduce this restriction with the following dependency
# statement.
depends_on('hdf5+szip')
depends_on('szip')
# ESMF is only required at runtime (for ESMF_regridding.ncl)
depends_on('esmf', type='run')
# In Spack, we also do not have an option to compile netcdf without DAP
# support, so we will tell the ncl configuration script that we have it.
# Some of the optional dependencies according to the manual:
depends_on('hdf', when='+hdf4')
depends_on('gdal', when='+gdal')
depends_on('udunits2', when='+udunits2')
# We need src files of triangle to appear in ncl's src tree if we want
# triangle's features.
resource(
name='triangle',
url='http://www.netlib.org/voronoi/triangle.zip',
md5='10aff8d7950f5e0e2fb6dd2e340be2c9',
placement='triangle_src',
when='+triangle')
sanity_check_is_file = ['bin/ncl']
def patch(self):
# Make configure scripts use Spack's tcsh
files = ['Configure'] + glob.glob('config/*')
filter_file('^#!/bin/csh -f', '#!/usr/bin/env csh', *files)
@run_before('install')
def filter_sbang(self):
# Filter sbang before install so Spack's sbang hook can fix it up
files = glob.glob('ncarg2d/src/bin/scripts/*')
files += glob.glob('ncarview/src/bin/scripts/*')
files += glob.glob('ni/src/scripts/*')
csh = join_path(self.spec['tcsh'].prefix.bin, 'csh')
filter_file('^#!/bin/csh', '#!{0}'.format(csh), *files)
def install(self, spec, prefix):
if (self.compiler.fc is None) or (self.compiler.cc is None):
raise InstallError('NCL package requires both '
'C and Fortran compilers.')
self.prepare_site_config()
self.prepare_install_config()
self.prepare_src_tree()
make('Everything', parallel=False)
def setup_environment(self, spack_env, run_env):
run_env.set('NCARG_ROOT', self.spec.prefix)
def prepare_site_config(self):
fc_flags = []
cc_flags = []
c2f_flags = []
if '+openmp' in self.spec:
fc_flags.append(self.compiler.openmp_flag)
cc_flags.append(self.compiler.openmp_flag)
if self.compiler.name == 'gcc':
fc_flags.append('-fno-range-check')
c2f_flags.extend(['-lgfortran', '-lm'])
elif self.compiler.name == 'intel':
fc_flags.append('-fp-model precise')
cc_flags.append('-fp-model precise')
c2f_flags.extend(['-lifcore', '-lifport'])
with open('./config/Spack', 'w') as f:
f.writelines([
'#define HdfDefines\n',
'#define CppCommand \'/usr/bin/env cpp -traditional\'\n',
'#define CCompiler cc\n',
'#define FCompiler fc\n',
('#define CtoFLibraries ' + ' '.join(c2f_flags) + '\n'
if len(c2f_flags) > 0
else ''),
('#define CtoFLibrariesUser ' + ' '.join(c2f_flags) + '\n'
if len(c2f_flags) > 0
else ''),
('#define CcOptions ' + ' '.join(cc_flags) + '\n'
if len(cc_flags) > 0
else ''),
('#define FcOptions ' + ' '.join(fc_flags) + '\n'
if len(fc_flags) > 0
else ''),
'#define BuildShared NO'
])
def prepare_install_config(self):
# Remove the results of the previous configuration attempts.
self.delete_files('./Makefile', './config/Site.local')
# Generate an array of answers that will be passed to the interactive
# configuration script.
config_answers = [
# Enter Return to continue
'\n',
# Build NCL?
'y\n',
# Parent installation directory :
'\'' + self.spec.prefix + '\'\n',
# System temp space directory :
'\'' + tempfile.gettempdir() + '\'\n',
# Build NetCDF4 feature support (optional)?
'y\n'
]
if '+hdf4' in self.spec:
config_answers.extend([
# Build HDF4 support (optional) into NCL?
'y\n',
# Also build HDF4 support (optional) into raster library?
'y\n',
# Did you build HDF4 with szip support?
'y\n' if self.spec.satisfies('^hdf+szip') else 'n\n'
])
else:
config_answers.extend([
# Build HDF4 support (optional) into NCL?
'n\n',
# Also build HDF4 support (optional) into raster library?
'n\n'
])
config_answers.extend([
# Build Triangle support (optional) into NCL
'y\n' if '+triangle' in self.spec else 'n\n',
# If you are using NetCDF V4.x, did you enable NetCDF-4 support?
'y\n',
# Did you build NetCDF with OPeNDAP support?
'y\n',
# Build GDAL support (optional) into NCL?
'y\n' if '+gdal' in self.spec else 'n\n',
# Build EEMD support (optional) into NCL?
'n\n',
# Build Udunits-2 support (optional) into NCL?
'y\n' if '+uduints2' in self.spec else 'n\n',
# Build Vis5d+ support (optional) into NCL?
'n\n',
# Build HDF-EOS2 support (optional) into NCL?
'n\n',
# Build HDF5 support (optional) into NCL?
'y\n',
# Build HDF-EOS5 support (optional) into NCL?
'n\n',
# Build GRIB2 support (optional) into NCL?
'n\n',
# Enter local library search path(s) :
# The paths will be passed by the Spack wrapper.
' \n',
# Enter local include search path(s) :
# All other paths will be passed by the Spack wrapper.
'\'' + join_path(self.spec['freetype'].prefix.include,
'freetype2') + '\'\n',
# Go back and make more changes or review?
'n\n',
# Save current configuration?
'y\n'
])
config_answers_filename = 'spack-config.in'
config_script = Executable('./Configure')
with open(config_answers_filename, 'w') as f:
f.writelines(config_answers)
with open(config_answers_filename, 'r') as f:
config_script(input=f)
def prepare_src_tree(self):
if '+triangle' in self.spec:
triangle_src = join_path(self.stage.source_path, 'triangle_src')
triangle_dst = join_path(self.stage.source_path, 'ni', 'src',
'lib', 'hlu')
copy(join_path(triangle_src, 'triangle.h'), triangle_dst)
copy(join_path(triangle_src, 'triangle.c'), triangle_dst)
@staticmethod
def delete_files(*filenames):
for filename in filenames:
if os.path.exists(filename):
try:
os.remove(filename)
except OSError as e:
raise InstallError('Failed to delete file %s: %s' % (
e.filename, e.strerror))
|
mfherbst/spack
|
var/spack/repos/builtin/packages/ncl/package.py
|
Python
|
lgpl-2.1
| 10,721
|
[
"NetCDF"
] |
3816543c17ee018c51560cfa0530eb602c7ba1a0149f707964aedd6804816896
|
"""
Module to call mcsqs, distributed with AT-AT
https://www.brown.edu/Departments/Engineering/Labs/avdw/atat/
"""
import os
import tempfile
import warnings
from pathlib import Path
from subprocess import Popen, TimeoutExpired
from collections import namedtuple
from typing import Dict, List, Optional, Union
from monty.dev import requires
from monty.os.path import which
from pymatgen.core.structure import Structure
Sqs = namedtuple("Sqs", "bestsqs objective_function allsqs clusters directory")
"""
Return type for run_mcsqs.
bestsqs: Structure
objective_function: Union[float, str]
allsqs: List
clusters: List
directory: str
"""
@requires(
which("mcsqs") and which("str2cif"),
"run_mcsqs requires first installing AT-AT, " "see https://www.brown.edu/Departments/Engineering/Labs/avdw/atat/",
)
def run_mcsqs(
structure: Structure,
clusters: Dict[int, float],
scaling: Union[int, List[int]] = 1,
search_time: float = 60,
directory: Optional[str] = None,
instances: Optional[int] = None,
temperature: Union[int, float] = 1,
wr: float = 1,
wn: float = 1,
wd: float = 0.5,
tol: float = 1e-3,
) -> Sqs:
"""
Helper function for calling mcsqs with different arguments
Args:
structure (Structure): Disordered pymatgen Structure object
clusters (dict): Dictionary of cluster interactions with entries in the form
number of atoms: cutoff in angstroms
scaling (int or list): Scaling factor to determine supercell. Two options are possible:
a. (preferred) Scales number of atoms, e.g., for a structure with 8 atoms,
scaling=4 would lead to a 32 atom supercell
b. A sequence of three scaling factors, e.g., [2, 1, 1], which
specifies that the supercell should have dimensions 2a x b x c
Defaults to 1.
search_time (float): Time spent looking for the ideal SQS in minutes (default: 60)
directory (str): Directory to run mcsqs calculation and store files (default: None
runs calculations in a temp directory)
instances (int): Specifies the number of parallel instances of mcsqs to run
(default: number of cpu cores detected by Python)
temperature (int or float): Monte Carlo temperature (default: 1), "T" in atat code
wr (int or float): Weight assigned to range of perfect correlation match in objective
function (default = 1)
wn (int or float): Multiplicative decrease in weight per additional point in cluster (default: 1)
wd (int or float): Exponent of decay in weight as function of cluster diameter (default: 0.5)
tol (int or float): Tolerance for matching correlations (default: 1e-3)
Returns:
Tuple of Pymatgen structure SQS of the input structure, the mcsqs objective function,
list of all SQS structures, and the directory where calculations are run
"""
num_atoms = len(structure)
if structure.is_ordered:
raise ValueError("Pick a disordered structure")
if instances is None:
# os.cpu_count() can return None if detection fails
instances = os.cpu_count()
original_directory = os.getcwd()
if not directory:
directory = tempfile.mkdtemp()
os.chdir(directory)
if isinstance(scaling, (int, float)):
if scaling % 1:
raise ValueError("Scaling should be an integer, not {}".format(scaling))
mcsqs_find_sqs_cmd = ["mcsqs", "-n {}".format(scaling * num_atoms)]
else:
# Set supercell to identity (will make supercell with pymatgen)
with open("sqscell.out", "w") as f:
f.write("1\n1 0 0\n0 1 0\n0 0 1\n")
structure = structure * scaling
mcsqs_find_sqs_cmd = ["mcsqs", "-rc", "-n {}".format(num_atoms)]
structure.to(filename="rndstr.in")
# Generate clusters
mcsqs_generate_clusters_cmd = ["mcsqs"]
for num in clusters:
mcsqs_generate_clusters_cmd.append("-" + str(num) + "=" + str(clusters[num]))
# Run mcsqs to find clusters
with Popen(mcsqs_generate_clusters_cmd) as p:
p.communicate()
# Generate SQS structures
add_ons = [
"-T {}".format(temperature),
"-wr {}".format(wr),
"-wn {}".format(wn),
"-wd {}".format(wd),
"-tol {}".format(tol),
]
mcsqs_find_sqs_processes = []
if instances and instances > 1:
# if multiple instances, run a range of commands using "-ip"
for i in range(instances):
instance_cmd = ["-ip {}".format(i + 1)]
cmd = mcsqs_find_sqs_cmd + add_ons + instance_cmd
p = Popen(cmd) # pylint: disable=R1732
mcsqs_find_sqs_processes.append(p)
else:
# run normal mcsqs command
cmd = mcsqs_find_sqs_cmd + add_ons
p = Popen(cmd) # pylint: disable=R1732
mcsqs_find_sqs_processes.append(p)
try:
for idx, p in enumerate(mcsqs_find_sqs_processes):
p.communicate(timeout=search_time * 60)
if instances and instances > 1:
p = Popen(["mcsqs", "-best"]) # pylint: disable=R1732
p.communicate()
if os.path.exists("bestsqs.out") and os.path.exists("bestcorr.out"):
return _parse_sqs_path(".")
raise RuntimeError("mcsqs exited before timeout reached")
except TimeoutExpired:
for p in mcsqs_find_sqs_processes:
p.kill()
p.communicate()
# Find the best sqs structures
if instances and instances > 1:
if not os.path.exists("bestcorr1.out"):
raise RuntimeError(
"mcsqs did not generate output files, "
"is search_time sufficient or are number of instances too high?"
)
p = Popen(["mcsqs", "-best"]) # pylint: disable=R1732
p.communicate()
if os.path.exists("bestsqs.out") and os.path.exists("bestcorr.out"):
sqs = _parse_sqs_path(".")
return sqs
os.chdir(original_directory)
raise TimeoutError("Cluster expansion took too long.")
def _parse_sqs_path(path) -> Sqs:
"""
Private function to parse mcsqs output directory
Args:
path: directory to perform parsing
Returns:
Tuple of Pymatgen structure SQS of the input structure, the mcsqs objective function,
list of all SQS structures, and the directory where calculations are run
"""
path = Path(path)
# detected instances will be 0 if mcsqs was run in series, or number of instances
detected_instances = len(list(path.glob("bestsqs*[0-9]*.out")))
# Convert best SQS structure to cif file and pymatgen Structure
with Popen("str2cif < bestsqs.out > bestsqs.cif", shell=True, cwd=path) as p:
p.communicate()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
bestsqs = Structure.from_file(path / "bestsqs.out")
# Get best SQS objective function
with open(path / "bestcorr.out", "r") as f:
lines = f.readlines()
objective_function_str = lines[-1].split("=")[-1].strip()
objective_function: Union[float, str]
if objective_function_str != "Perfect_match":
objective_function = float(objective_function_str)
else:
objective_function = "Perfect_match"
# Get all SQS structures and objective functions
allsqs = []
for i in range(detected_instances):
sqs_out = "bestsqs{}.out".format(i + 1)
sqs_cif = "bestsqs{}.cif".format(i + 1)
corr_out = "bestcorr{}.out".format(i + 1)
with Popen("str2cif < {} > {}".format(sqs_out, sqs_cif), shell=True, cwd=path) as p:
p.communicate()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
sqs = Structure.from_file(path / sqs_out)
with open(path / corr_out, "r") as f:
lines = f.readlines()
objective_function_str = lines[-1].split("=")[-1].strip()
obj: Union[float, str]
if objective_function_str != "Perfect_match":
obj = float(objective_function_str)
else:
obj = "Perfect_match"
allsqs.append({"structure": sqs, "objective_function": obj})
clusters = _parse_clusters(path / "clusters.out")
return Sqs(
bestsqs=bestsqs,
objective_function=objective_function,
allsqs=allsqs,
directory=str(path.resolve()),
clusters=clusters,
)
def _parse_clusters(filename):
"""
Private function to parse clusters.out file
Args:
path: directory to perform parsing
Returns:
List of dicts
"""
with open(filename, "r") as f:
lines = f.readlines()
clusters = []
cluster_block = []
for line in lines:
line = line.split("\n")[0]
if line == "":
clusters.append(cluster_block)
cluster_block = []
else:
cluster_block.append(line)
cluster_dicts = []
for cluster in clusters:
cluster_dict = {
"multiplicity": int(cluster[0]),
"longest_pair_length": float(cluster[1]),
"num_points_in_cluster": int(cluster[2]),
}
points = []
for point in range(cluster_dict["num_points_in_cluster"]):
line = cluster[3 + point].split(" ")
point_dict = {}
point_dict["coordinates"] = [float(line) for line in line[0:3]]
point_dict["num_possible_species"] = int(line[3]) + 2 # see ATAT manual for why +2
point_dict["cluster_function"] = float(line[4]) # see ATAT manual for what "function" is
points.append(point_dict)
cluster_dict["coordinates"] = points
cluster_dicts.append(cluster_dict)
return cluster_dicts
|
gmatteo/pymatgen
|
pymatgen/command_line/mcsqs_caller.py
|
Python
|
mit
| 9,898
|
[
"pymatgen"
] |
729340f6838d5b49f0615dde230d32b8825ff1427aa0ea3fa6e2fae2ad20fddd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from django.utils import timezone
from snisi_sms.reply import SMSReply
from snisi_core.models.Providers import Provider
from snisi_cataract import PROJECT_BRAND
from snisi_cataract.integrity import (CATMissionStartChecker,
CATMissionEndChecker,
CATSurgeryChecker,
CATSurgeryResultChecker,
create_mission_report,
create_surgery_report,
create_result_report,
close_mission_report)
logger = logging.getLogger(__name__)
def cataract_handler(message):
if message.content.lower().startswith('cat '):
if message.content.lower().startswith('cat start'):
return cataract_start_mission(message)
elif message.content.lower().startswith('cat visit'):
return cataract_surgery(message)
elif message.content.lower().startswith('cat fixe'):
return cataract_surgery(message, fixed=True)
elif message.content.lower().startswith('cat result'):
return cataract_result(message)
elif message.content.lower().startswith('cat end'):
return cataract_end_mission(message)
else:
return False
return False
KEYWORDS = {
'cat': cataract_handler,
}
def check_create_provider(username, password, reply):
try:
provider = Provider.active.get(username=username)
except Provider.DoesNotExist:
reply.error("Ce nom d'utilisateur ({}) n'existe pas.".format(username))
return None
if provider.role.slug not in ('tt_tso', 'tt_opt', 'tt_amo', 'tt_surgeon'):
reply.error("Votre rôle ne vous permet pas de créer "
"des rapports Cataract")
return None
if not provider.check_password(password):
reply.error("Votre mot de passe est incorrect.")
return None
return provider
def cataract_start_mission(message, **kwargs):
reply = SMSReply(message, PROJECT_BRAND)
try:
args_names = ['kw1', 'kw2', 'username', 'password',
'district', 'started_on', 'operator_type', 'strategy']
args_values = message.content.strip().lower().split()
arguments = dict(zip(args_names, args_values))
except ValueError:
# failure to split means w e proabably lack a data or more
# we can't process it.
return reply.error("Le format du SMS est incorrect.")
# check credentials
provider = check_create_provider(arguments['username'],
arguments['password'], reply)
if provider is None:
return True
checker = CATMissionStartChecker()
# feed data holder with sms provided data
for key, value in arguments.items():
checker.set(key, value)
checker.set('submit_time', message.event_on)
checker.set('submitter', provider)
# test the data (existing report, values)
checker.check()
if not checker.is_valid():
return reply.error(checker.feedbacks.pop().render(short=True))
report, text_message = create_mission_report(
provider=provider,
expected_reporting=checker.get('expected_reporting'),
completed_on=None,
integrity_checker=checker,
data_source=message.content)
if report:
return reply.success(text_message)
else:
return reply.error(text_message)
def cataract_surgery(message, fixed=False, **kwargs):
reply = SMSReply(message, PROJECT_BRAND)
try:
args_names = ['kw1', 'kw2', 'username', 'password', 'location',
'surgery_date', 'gender', 'eye', 'age', 'number']
args_values = message.content.strip().lower().split()
arguments = dict(zip(args_names, args_values))
except ValueError:
# failure to split means we proabably lack a data or more
# we can't process it.
return reply.error("Le format du SMS est incorrect.")
# check credentials
provider = check_create_provider(arguments['username'],
arguments['password'], reply)
if provider is None:
return True
# Visit depends on an ExpectedReporting AND an open MissionR from which
# the expected period is set.
checker = CATSurgeryChecker()
# feed data holder with sms provided data
for key, value in arguments.items():
checker.set(key, value)
checker.set('submit_time', message.event_on)
checker.set('submitter', provider)
# test the data (existing report, values)
checker.check(fixed=fixed)
if not checker.is_valid():
return reply.error(checker.feedbacks.pop().render(short=True))
report, text_message = create_surgery_report(
provider=provider,
expected_reporting=None,
completed_on=timezone.now(),
integrity_checker=checker,
data_source=message.content)
if report:
return reply.success(text_message)
else:
return reply.error(text_message)
def cataract_result(message, **kwargs):
reply = SMSReply(message, PROJECT_BRAND)
try:
args_names = ['kw1', 'kw2', 'username', 'password',
'result_date', 'surgery_ident', 'visual_acuity']
args_values = message.content.strip().lower().split()
arguments = dict(zip(args_names, args_values))
except ValueError:
# failure to split means we proabably lack a data or more
# we can't process it.
return reply.error("Le format du SMS est incorrect.")
# check credentials
provider = check_create_provider(arguments['username'],
arguments['password'], reply)
if provider is None:
return True
# Visit depends on an ExpectedReporting AND an open MissionR from which
# the expected period is set.
checker = CATSurgeryResultChecker()
# feed data holder with sms provided data
for key, value in arguments.items():
checker.set(key, value)
checker.set('submit_time', message.event_on)
checker.set('submitter', provider)
# test the data (existing report, values)
checker.check()
if not checker.is_valid():
return reply.error(checker.feedbacks.pop().render(short=True))
report, text_message = create_result_report(
provider=provider,
expected_reporting=None,
completed_on=timezone.now(),
integrity_checker=checker,
data_source=message.content)
if report:
return reply.success(text_message)
else:
return reply.error(text_message)
def cataract_end_mission(message, **kwargs):
reply = SMSReply(message, PROJECT_BRAND)
try:
args_names = ['kw1', 'kw2', 'username', 'password',
'district', 'ended_on']
args_values = message.content.strip().lower().split()
arguments = dict(zip(args_names, args_values))
except ValueError:
# failure to split means we proabably lack a data or more
# we can't process it.
return reply.error("Le format du SMS est incorrect.")
# check credentials
provider = check_create_provider(arguments['username'],
arguments['password'], reply)
if provider is None:
return True
checker = CATMissionEndChecker()
# feed data holder with sms provided data
for key, value in arguments.items():
checker.set(key, value)
checker.set('submit_time', message.event_on)
checker.set('submitter', provider)
# test the data (existing report, values)
checker.check()
if not checker.is_valid():
return reply.error(checker.feedbacks.pop().render(short=True))
report, text_message = close_mission_report(
provider=provider,
expected_reporting=checker.get('expected_reporting'),
completed_on=timezone.now(),
integrity_checker=checker,
data_source=message.content)
if report:
return reply.success(text_message)
else:
return reply.error(text_message)
|
yeleman/snisi
|
snisi_cataract/sms_handlers.py
|
Python
|
mit
| 8,413
|
[
"VisIt"
] |
856858e30147eb3a7f9ac6757c32f6088d07573d2b43229aba52163c5686f2f1
|
"""Tests performing linter-like checks"""
import ast
from os.path import dirname, join
from subprocess import check_output
# Root of the repository. Note that this is relative to this file,
# so if this file is moved, this may need to be changed:
source_root = dirname(dirname(__file__))
def test_logger_format_strings():
"""Scan for proper use of logger format strings
Per @zenhack's comment on issue #629:
> All over the codebase you can find statments like:
>
> logger.error('Foo: %r' % bar)
>
> The % operator being python's format-string splicing operator. The
> problem with this is that the logging functions do the formation
> string splicing themselves, i.e. what you want in this case is:
>
> logger.error('Foo: %r', bar)
>
> This opens up the possibility of format-string injection
> vulnerabilities. Frankly, this is too easy to do, especially
> since in other contexts % is the correct thing. We ought to
> (a) make sure all instances of this mistake are fixed, and (b)
> come up with a way to catch this mistake automatically going
> forward; perhaps some kind of linter.
This is that linter; it scans the source tree looking for places
where the logging functions are called with any first argument
that isn't a string literal.
"""
files = check_output([join(source_root, 'ci', 'list_tracked_pyfiles.sh')])\
.strip().split('\n')
for filename in files:
with open(join(source_root, filename)) as f:
tree = ast.parse(f.read(), filename=filename)
LogCallVisitor(filename).visit(tree)
class LogCallVisitor(ast.NodeVisitor):
"""Ast node visitor used by test_logger_format_strings."""
def __init__(self, filename):
self.filename = filename
def visit_Call(self, node):
"""
This function is called on all "Call" nodes in the ast, i.e.
anything where an expression is being called:
foo(bar)
foo.baz(bar)
foo[quux](bar, baz)
"""
# First, filter this out to the set of calls we care about:
#
# 1. Make sure this a call to an attribute (method), e.g.
# foo.bar(baz):
if not isinstance(node.func, ast.Attribute):
return
# 2. Make sure the name of the method is one of the recognized
# logging method names. In theory this could give us
# false positives if someone names another function after
# one of these, or false negatives if we store one of these
# in a variable (don't do that). We could be smarter about
# figuring out what function is being called, but this is
# probably good enough:
logfunc_names = {
'critical',
'error',
'warn',
'warning',
'info',
'debug',
}
if node.func.attr not in logfunc_names:
return
# We've decided this is a logging call; sanity check it:
assert len(node.args) != 0, (
"Logging function called with zero arguments at %r "
"line %d column %d." % (self.filename,
node.lineno,
node.col_offset)
)
assert isinstance(node.args[0], ast.Str), (
"Logging function called with non-string literal format "
"string at %r line %d column %d." % (self.filename,
node.lineno,
node.col_offset)
)
|
CCI-MOC/haas
|
tests/custom_lint.py
|
Python
|
apache-2.0
| 3,640
|
[
"VisIt"
] |
55a74811a3ba6a88cf2465c60868289957eb5bf8aa771e755972b175fe556f57
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
from pycparser import CParser
from pycparser import c_ast
def extractTypeAndName(n, defaultName=None):
if isinstance(n, c_ast.EllipsisParam):
return ('int', 0, 'vararg')
t = n.type
d = 0
while isinstance(t, c_ast.PtrDecl) or isinstance(t, c_ast.ArrayDecl):
d += 1
children = dict(t.children())
t = children['type']
if isinstance(t, c_ast.FuncDecl):
return extractTypeAndName(t)
if isinstance(t.type, c_ast.Struct) \
or isinstance(t.type, c_ast.Union) \
or isinstance(t.type, c_ast.Enum):
typename = t.type.name
else:
typename = t.type.names[0]
if typename == 'void' and d == 0 and not t.declname:
return None
name = t.declname or defaultName or ''
return typename.lstrip('_'),d,name.lstrip('_')
Function = collections.namedtuple('Function', ('type', 'derefcnt', 'name', 'args'))
Argument = collections.namedtuple('Argument', ('type', 'derefcnt', 'name'))
def Stringify(X):
return '%s %s %s' % (X.type, X.derefcnt * '*', X.name)
def ExtractFuncDecl(node, verbose=False):
# The function name needs to be dereferenced.
ftype, fderef, fname = extractTypeAndName(node)
if not fname:
print("Skipping function without a name!")
print(node.show())
return
fargs = []
for i, (argName, arg) in enumerate(node.args.children()):
defname = 'arg%i' % i
argdata = extractTypeAndName(arg, defname)
if argdata is not None:
a = Argument(*argdata)
fargs.append(a)
Func = Function(ftype, fderef, fname, fargs)
if verbose:
print(Stringify(Func) + '(' + ','.join(Stringify(a) for a in Func.args) + ');')
return Func
def ExtractAllFuncDecls(ast, verbose=False):
Functions = {}
class FuncDefVisitor(c_ast.NodeVisitor):
def visit_FuncDecl(self, node, *a):
f = ExtractFuncDecl(node, verbose)
Functions[f.name] = f
FuncDefVisitor().visit(ast)
return Functions
def ExtractFuncDeclFromSource(source):
try:
p = CParser()
ast = p.parse(source + ';')
funcs = ExtractAllFuncDecls(ast)
for name, func in funcs.items():
return func
except Exception as e:
import traceback
traceback.print_exc()
# eat it
|
anthraxx/pwndbg
|
pwndbg/funcparser.py
|
Python
|
mit
| 2,418
|
[
"VisIt"
] |
24e58d0510fa87e603f3bf7aff094d42863cb470f9b0d28e72cf2032ab573cf3
|
#!/usr/bin/env python
""" Create a DIRAC transfer/replicateAndRegister request to be executed
by the DMS Transfer Agent
"""
__RCSID__ = "$Id$"
import os
from hashlib import md5
import time
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.List import breakListIntoChunks
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[0],
__doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... DestSE LFN ...' % Script.scriptName,
'Arguments:',
' DestSE: Destination StorageElement',
' LFN: LFN or file containing a List of LFNs' ] ) )
Script.parseCommandLine( ignoreErrors = False )
monitor = False
args = Script.getPositionalArgs()
if len( args ) < 2:
Script.showHelp()
targetSE = args.pop( 0 )
lfns = []
for inputFileName in args:
if os.path.exists( inputFileName ):
inputFile = open( inputFileName, 'r' )
string = inputFile.read()
inputFile.close()
lfns.extend( [ lfn.strip() for lfn in string.splitlines() ] )
else:
lfns.append( inputFileName )
from DIRAC.Resources.Storage.StorageElement import StorageElement
import DIRAC
# Check is provided SE is OK
se = StorageElement( targetSE )
if not se.valid:
print se.errorReason
print
Script.showHelp()
from DIRAC.RequestManagementSystem.Client.ReqClient import ReqClient
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.RequestManagementSystem.private.RequestValidator import RequestValidator
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
reqClient = ReqClient()
fc = FileCatalog()
for lfnList in breakListIntoChunks( lfns, 100 ):
oRequest = Request()
oRequest.RequestName = "%s_%s" % ( md5( repr( time.time() ) ).hexdigest()[:16], md5( repr( time.time() ) ).hexdigest()[:16] )
replicateAndRegister = Operation()
replicateAndRegister.Type = 'ReplicateAndRegister'
replicateAndRegister.TargetSE = targetSE
res = fc.getFileMetadata( lfnList )
if not res['OK']:
print "Can't get file metadata: %s" % res['Message']
DIRAC.exit( 1 )
if res['Value']['Failed']:
print "Could not get the file metadata of the following, so skipping them:"
for fFile in res['Value']['Failed']:
print fFile
lfnMetadata = res['Value']['Successful']
for lfn in lfnMetadata:
try:
rarFile = File()
rarFile.LFN = lfn
rarFile.Size = lfnMetadata[lfn]['Size']
rarFile.Checksum = lfnMetadata[lfn]['Checksum']
rarFile.GUID = lfnMetadata[lfn]['GUID']
rarFile.ChecksumType = 'ADLER32'
replicateAndRegister.addFile( rarFile )
except ValueError as err:
print "Error", str(err), lfn
continue
oRequest.addOperation( replicateAndRegister )
isValid = RequestValidator().validate( oRequest )
if not isValid['OK']:
print "Request is not valid: ", isValid['Message']
DIRAC.exit( 1 )
result = reqClient.putRequest( oRequest )
if result['OK']:
print "Request %d submitted successfully" % result['Value']
else:
print "Failed to submit Request: ", result['Message']
|
Sbalbp/DIRAC
|
DataManagementSystem/scripts/dirac-dms-create-replication-request.py
|
Python
|
gpl-3.0
| 3,404
|
[
"DIRAC"
] |
6ec5fbdf818d604d09acb30b6f9ed1fa4de2dbf200a38fd6b915a75e982058d0
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests of the No U-Turn Sampler."""
import collections
# Dependency imports
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.distributions.internal import statistical_testing as st
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import distribute_lib
from tensorflow_probability.python.internal import distribute_test_lib
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
tfb = tfp.bijectors
tfde = tfp.experimental.distributions
JAX_MODE = False
if JAX_MODE:
_CompositeMultivariateNormalPrecisionFactorLinearOperator = tfde.MultivariateNormalPrecisionFactorLinearOperator
_CompositeJointDistributionSequential = tfd.JointDistributionSequential
else:
_CompositeMultivariateNormalPrecisionFactorLinearOperator = tfp.experimental.auto_composite_tensor(
tfde.MultivariateNormalPrecisionFactorLinearOperator,
omit_kwargs=('name',))
_CompositeJointDistributionSequential = tfp.experimental.auto_composite_tensor(
tfd.JointDistributionSequential, omit_kwargs=('name',))
@tf.function(autograph=False)
def run_nuts_chain(event_size, batch_size, num_steps, initial_state=None,
seed=None):
if seed is None:
seed = test_util.test_seed()
def target_log_prob_fn(event):
with tf.name_scope('nuts_test_target_log_prob'):
return tfd.MultivariateNormalDiag(
tf.zeros(event_size),
scale_identity_multiplier=1.).log_prob(event)
if initial_state is None:
initial_state = tf.zeros([batch_size, event_size])
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn,
step_size=[0.3],
unrolled_leapfrog_steps=2,
max_tree_depth=4)
chain_state, leapfrogs_taken = tfp.mcmc.sample_chain(
num_results=num_steps,
num_burnin_steps=0,
# Intentionally pass a list argument to test that singleton lists are
# handled reasonably (c.f. assert_univariate_target_conservation, which
# uses an unwrapped singleton).
current_state=[initial_state],
kernel=kernel,
trace_fn=lambda _, pkr: pkr.leapfrogs_taken,
seed=seed)
return chain_state, leapfrogs_taken
def assert_univariate_target_conservation(test, target_d, step_size):
# Sample count limited partly by memory reliably available on Forge. The test
# remains reasonable even if the nuts recursion limit is severely curtailed
# (e.g., 3 or 4 levels), so use that to recover some memory footprint and bump
# the sample count.
num_samples = int(5e4)
num_steps = 1
strm = test_util.test_seed_stream()
# We wrap the initial values in `tf.identity` to avoid broken gradients
# resulting from a bijector cache hit, since bijectors of the same
# type/parameterization now share a cache.
# TODO(b/72831017): Fix broken gradients caused by bijector caching.
initialization = tf.identity(target_d.sample([num_samples], seed=strm()))
@tf.function(autograph=False)
def run_chain():
nuts = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_d.log_prob,
step_size=step_size,
max_tree_depth=3,
unrolled_leapfrog_steps=2)
result = tfp.mcmc.sample_chain(
num_results=num_steps,
num_burnin_steps=0,
current_state=initialization,
trace_fn=None,
kernel=nuts,
seed=strm())
return result
result = run_chain()
test.assertAllEqual([num_steps, num_samples], result.shape)
answer = result[0]
check_cdf_agrees = st.assert_true_cdf_equal_by_dkwm(
answer, target_d.cdf, false_fail_rate=1e-6)
check_enough_power = assert_util.assert_less(
st.min_discrepancy_of_true_cdfs_detectable_by_dkwm(
num_samples, false_fail_rate=1e-6, false_pass_rate=1e-6), 0.025)
movement = tf.abs(answer - initialization)
test.assertAllEqual([num_samples], movement.shape)
# This movement distance (1 * step_size) was selected by reducing until 100
# runs with independent seeds all passed.
check_movement = assert_util.assert_greater_equal(
tf.reduce_mean(movement), 1 * step_size)
return (check_cdf_agrees, check_enough_power, check_movement)
def assert_mvn_target_conservation(event_size, batch_size, **kwargs):
strm = test_util.test_seed_stream()
initialization = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros(event_size),
covariance_matrix=tf.eye(event_size)).sample(
batch_size, seed=strm())
samples, _ = run_nuts_chain(
event_size, batch_size, num_steps=1,
initial_state=initialization, **kwargs)
answer = samples[0][-1]
check_cdf_agrees = (
st.assert_multivariate_true_cdf_equal_on_projections_two_sample(
answer, initialization, num_projections=100, false_fail_rate=1e-6))
check_sample_shape = assert_util.assert_equal(
tf.shape(answer)[0], batch_size)
movement = tf.linalg.norm(answer - initialization, axis=-1)
# This movement distance (0.3) was copied from the univariate case.
check_movement = assert_util.assert_greater_equal(
tf.reduce_mean(movement), 0.3)
check_enough_power = assert_util.assert_less(
st.min_discrepancy_of_true_cdfs_detectable_by_dkwm_two_sample(
batch_size, batch_size, false_fail_rate=1e-8, false_pass_rate=1e-6),
0.055)
return (
check_cdf_agrees,
check_sample_shape,
check_movement,
check_enough_power,
)
@test_util.test_graph_and_eager_modes
class NutsTest(test_util.TestCase):
def testLogAcceptRatio(self):
"""Ensure that `log_accept_ratio` is close to 0 if step size is small."""
seed = test_util.test_seed()
@tf.function(autograph=False)
def sample_from_banana():
def banana_model():
x0 = yield tfd.JointDistributionCoroutine.Root(tfd.Normal(0., 10.))
_ = yield tfd.Normal(0.03 * (tf.square(x0) - 100.), 1.)
banana = tfd.JointDistributionCoroutine(banana_model)
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
banana.log_prob, step_size=0.35)
trace_fn = lambda _, pkr: pkr.log_accept_ratio
return tfp.mcmc.sample_chain(50,
[0., 0.],
kernel=kernel,
trace_fn=trace_fn,
seed=seed)[1]
log_accept_ratio_trace = self.evaluate(sample_from_banana())
self.assertAllGreater(log_accept_ratio_trace, -0.35)
def testReproducibility(self):
seed = test_util.test_seed()
s1 = self.evaluate(run_nuts_chain(2, 5, 10, seed=seed)[0])
if tf.executing_eagerly():
tf.random.set_seed(seed)
s2 = self.evaluate(run_nuts_chain(2, 5, 10, seed=seed)[0])
self.assertAllEqual(s1, s2)
def testCorrectReadWriteInstruction(self):
mocknuts = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=lambda x: x,
max_tree_depth=4,
step_size=1.)
self.assertAllEqual(
mocknuts._write_instruction,
np.array([0, 4, 1, 4, 1, 4, 2, 4, 1, 4, 2, 4, 2, 4, 3, 4]))
self.assertAllEqual(
mocknuts._read_instruction,
np.array([[0, 0],
[0, 1],
[0, 0],
[0, 2],
[0, 0],
[1, 2],
[0, 0],
[0, 3],
[0, 0],
[1, 2],
[0, 0],
[1, 3],
[0, 0],
[2, 3],
[0, 0],
[0, 4]]))
def testUnivariateNormalTargetConservation(self):
normal_dist = tfd.Normal(loc=1., scale=2.)
self.evaluate(assert_univariate_target_conservation(
self, normal_dist, step_size=0.2))
def testSigmoidBetaTargetConservation(self):
sigmoid_beta_dist = tfb.Invert(tfb.Sigmoid())(
tfd.Beta(concentration0=1., concentration1=2.))
self.evaluate(assert_univariate_target_conservation(
self, sigmoid_beta_dist, step_size=0.2))
def testBetaTargetConservation(self):
# Not that we expect NUTS to do a good job without an unconstraining
# bijector, but...
beta_dist = tfd.Beta(concentration0=1., concentration1=2.)
self.evaluate(assert_univariate_target_conservation(
self, beta_dist, step_size=1e-3))
@parameterized.parameters(
(3, 50000,),
# (5, 2,),
)
def testMultivariateNormalNd(self, event_size, batch_size):
strm = test_util.test_seed_stream()
self.evaluate(assert_mvn_target_conservation(event_size, batch_size,
seed=strm()))
@parameterized.parameters(
([], 100), # test scalar case
([1], 100), # test size 1 case
([5], 100),
([2, 5], 100), # test rank 2 case
)
def testLatentsOfMixedRank(self, batch_shape, num_steps):
strm = test_util.test_seed_stream()
init0 = [tf.ones(batch_shape + [6])]
init1 = [tf.ones(batch_shape + []),
tf.ones(batch_shape + [1]),
tf.ones(batch_shape + [2, 2])]
@tf.function(autograph=False)
def run_two_chains(init0, init1):
def log_prob0(x):
return tf.squeeze(tfd.Independent(
tfd.Normal(tf.range(6, dtype=tf.float32),
tf.constant(1.)),
reinterpreted_batch_ndims=1).log_prob(x))
kernel0 = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
log_prob0,
step_size=0.3)
[results0] = tfp.mcmc.sample_chain(
num_results=num_steps,
num_burnin_steps=10,
current_state=init0,
kernel=kernel0,
trace_fn=None,
seed=strm())
def log_prob1(state0, state1, state2):
return tf.squeeze(
tfd.Normal(tf.constant(0.), tf.constant(1.)).log_prob(state0)
+ tfd.Independent(
tfd.Normal(tf.constant([1.]), tf.constant(1.)),
reinterpreted_batch_ndims=1).log_prob(state1)
+ tfd.Independent(
tfd.Normal(tf.constant([[2., 3.], [4., 5.]]), tf.constant(1.)),
reinterpreted_batch_ndims=2).log_prob(state2)
)
kernel1 = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
log_prob1,
step_size=0.3)
results1_ = tfp.mcmc.sample_chain(
num_results=num_steps,
num_burnin_steps=10,
current_state=init1,
kernel=kernel1,
trace_fn=None,
seed=strm())
results1 = tf.concat(
[tf.reshape(x, [num_steps] + batch_shape + [-1]) for x in results1_],
axis=-1)
return results0, results1
results0, results1 = run_two_chains(init0, init1)
self.evaluate(
st.assert_true_cdf_equal_by_dkwm_two_sample(results0, results1))
@parameterized.parameters(
(1000, 5, 3),
# (500, 1000, 20),
)
def testMultivariateNormalNdConvergence(self, nsamples, nchains, nd):
strm = test_util.test_seed_stream()
theta0 = np.zeros((nchains, nd))
mu = np.arange(nd)
w = np.random.randn(nd, nd) * 0.1
cov = w * w.T + np.diagflat(np.arange(nd) + 1.)
step_size = np.random.rand(nchains, 1) * 0.1 + 1.
@tf.function(autograph=False)
def run_chain_and_get_summary(mu, scale_tril, step_size, nsamples, state):
def target_log_prob_fn(event):
with tf.name_scope('nuts_test_target_log_prob'):
return tfd.MultivariateNormalTriL(
loc=tf.cast(mu, dtype=tf.float64),
scale_tril=tf.cast(scale_tril, dtype=tf.float64)).log_prob(event)
nuts = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn,
step_size=[step_size],
max_tree_depth=4)
def trace_fn(_, pkr):
return (pkr.is_accepted, pkr.leapfrogs_taken)
[x], (is_accepted, leapfrogs_taken) = tfp.mcmc.sample_chain(
num_results=nsamples,
num_burnin_steps=0,
current_state=[tf.cast(state, dtype=tf.float64)],
kernel=nuts,
trace_fn=trace_fn,
seed=strm())
return (
tf.shape(x),
# We'll average over samples (dim=0) and chains (dim=1).
tf.reduce_mean(x, axis=[0, 1]),
tfp.stats.covariance(x, sample_axis=[0, 1]),
leapfrogs_taken[is_accepted])
sample_shape, sample_mean, sample_cov, leapfrogs_taken = self.evaluate(
run_chain_and_get_summary(
mu, np.linalg.cholesky(cov), step_size, nsamples, theta0))
self.assertAllEqual(sample_shape, [nsamples, nchains, nd])
self.assertAllClose(mu, sample_mean, atol=0.1, rtol=0.1)
self.assertAllClose(cov, sample_cov, atol=0.15, rtol=0.15)
# Test early stopping in tree building
self.assertTrue(
np.any(np.isin(np.asarray([5, 9, 11, 13]), np.unique(leapfrogs_taken))))
def testCorrelated2dNormalwithinMCError(self):
strm = test_util.test_seed_stream()
dtype = np.float64
# We run nreplica independent test to improve test robustness.
nreplicas = 20
nchains = 100
num_steps = 1000
mu = np.asarray([0., 3.], dtype=dtype)
rho = 0.75
sigma1 = 1.
sigma2 = 2.
cov = np.asarray([[sigma1 * sigma1, rho * sigma1 * sigma2],
[rho * sigma1 * sigma2, sigma2 * sigma2]],
dtype=dtype)
true_param = np.hstack([mu, np.array([sigma1**2, sigma2**2, rho])])
scale_tril = np.linalg.cholesky(cov)
initial_state = np.zeros((nchains, nreplicas, 2), dtype)
@tf.function(autograph=False)
def run_chain_and_get_estimation_error():
target_log_prob = tfd.MultivariateNormalTriL(
loc=mu, scale_tril=scale_tril).log_prob
nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob,
step_size=tf.constant([sigma1, sigma2], dtype))
chain_state = tfp.mcmc.sample_chain(
num_results=num_steps,
num_burnin_steps=25,
current_state=initial_state,
kernel=tfp.mcmc.DualAveragingStepSizeAdaptation(nuts_kernel, 25, .8),
seed=strm(),
trace_fn=None)
variance_est = tf.square(chain_state - mu)
correlation_est = tf.reduce_prod(
chain_state - mu, axis=-1, keepdims=True) / (sigma1 * sigma2)
mcmc_samples = tf.concat([chain_state, variance_est, correlation_est],
axis=-1)
expected = tf.reduce_mean(mcmc_samples, axis=[0, 1])
ess = tf.reduce_sum(tfp.mcmc.effective_sample_size(mcmc_samples), axis=0)
avg_monte_carlo_standard_error = tf.reduce_mean(
tf.math.reduce_std(mcmc_samples, axis=0),
axis=0) / tf.sqrt(ess)
scaled_error = (
tf.abs(expected - true_param) / avg_monte_carlo_standard_error)
return tfd.Normal(
loc=tf.zeros([], dtype), scale=1.).survival_function(scaled_error)
# Run chains, compute the error, and compute the probability of getting
# a more extreme error. `error_prob` has shape (nreplica * 5)
error_prob = run_chain_and_get_estimation_error()
# Check convergence using Markov chain central limit theorem, this is a
# z-test at p=.01
is_converged = error_prob > .005
# Test at most 5% test fail out of total number of independent tests.
n_total_tests = nreplicas * len(true_param)
num_test_failed = self.evaluate(
tf.math.reduce_sum(tf.cast(is_converged, dtype)))
self.assertLessEqual(
n_total_tests - num_test_failed, np.round(n_total_tests * .05))
@parameterized.parameters(
# (7, 5, 3, None), TODO(b/182886159): Re-enable this test.
(7, 5, 1, tf.TensorShape([None, 1])),
)
def testDynamicShape(self, nsample, batch_size, nd, dynamic_shape):
dtype = np.float32
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=tfd.Independent(
tfd.Normal(tf.zeros(nd, dtype=dtype), 1.), 1).log_prob,
step_size=.1)
x_ = np.zeros([batch_size, nd], dtype=dtype)
x = tf1.placeholder_with_default(x_, shape=dynamic_shape)
mcmc_trace_ = tfp.mcmc.sample_chain(
num_results=nsample,
current_state=x,
kernel=kernel,
trace_fn=None,
seed=test_util.test_seed())
mcmc_trace = self.evaluate(mcmc_trace_)
self.assertAllEqual(mcmc_trace.shape, [nsample, batch_size, nd])
def testDivergence(self):
"""Neals funnel with large step size."""
strm = test_util.test_seed_stream()
neals_funnel = tfd.JointDistributionSequential(
[
tfd.Normal(loc=0., scale=3.), # b0
lambda y: tfd.Sample( # pylint: disable=g-long-lambda
tfd.Normal(loc=0., scale=tf.math.exp(y / 2)),
sample_shape=9),
],
validate_args=True
)
@tf.function(autograph=False)
def run_chain_and_get_divergence():
nchains = 5
init_states = neals_funnel.sample(nchains, seed=strm())
_, has_divergence = tfp.mcmc.sample_chain(
num_results=100,
kernel=tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=lambda *args: neals_funnel.log_prob(args),
step_size=[1., 1.]),
current_state=init_states,
trace_fn=lambda _, pkr: pkr.has_divergence,
seed=strm())
return tf.reduce_sum(tf.cast(has_divergence, dtype=tf.int32))
divergence_count = self.evaluate(run_chain_and_get_divergence())
# Test that we observe a fair among of divergence.
self.assertAllGreater(divergence_count, 100)
def testSampleEndtoEndXLA(self):
"""An end-to-end test of sampling using NUTS."""
if tf.executing_eagerly() or tf.config.experimental_functions_run_eagerly():
self.skipTest('No need to test XLA under all execution regimes.')
strm = test_util.test_seed_stream()
predictors = tf.cast([
201., 244., 47., 287., 203., 58., 210., 202., 198., 158., 165., 201.,
157., 131., 166., 160., 186., 125., 218., 146.
], tf.float32)
obs = tf.cast([
592., 401., 583., 402., 495., 173., 479., 504., 510., 416., 393., 442.,
317., 311., 400., 337., 423., 334., 533., 344.
], tf.float32)
y_sigma = tf.cast([
61., 25., 38., 15., 21., 15., 27., 14., 30., 16., 14., 25., 52., 16.,
34., 31., 42., 26., 16., 22.
], tf.float32)
# Robust linear regression model
robust_lm = tfd.JointDistributionSequential(
[
tfd.Normal(loc=0., scale=1.), # b0
tfd.Normal(loc=0., scale=1.), # b1
tfd.HalfNormal(5.), # df
lambda df, b1, b0: tfd.Independent( # pylint: disable=g-long-lambda
tfd.StudentT( # Likelihood
df=df[..., tf.newaxis],
loc=(b0[..., tf.newaxis] +
b1[..., tf.newaxis] * predictors[tf.newaxis]),
scale=y_sigma)),
],
validate_args=True)
log_prob = lambda b0, b1, df: robust_lm.log_prob([b0, b1, df, obs])
init_step_size = [1., .2, .5]
step_size0 = [tf.cast(x, dtype=tf.float32) for x in init_step_size]
number_of_steps, burnin, nchain = 200, 50, 10
@tf.function(autograph=False, jit_compile=True)
def run_chain_and_get_diagnostic():
# Ensure we're really in graph mode.
assert hasattr(tf.constant([]), 'graph')
# random initialization of the starting postion of each chain
b0, b1, df, _ = robust_lm.sample(nchain, seed=strm())
# bijector to map contrained parameters to real
unconstraining_bijectors = [
tfb.Identity(),
tfb.Identity(),
tfb.Exp(),
]
def trace_fn(_, pkr):
return (pkr.inner_results.inner_results.step_size,
pkr.inner_results.inner_results.log_accept_ratio)
kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=log_prob,
step_size=step_size0),
bijector=unconstraining_bijectors),
target_accept_prob=.8,
num_adaptation_steps=burnin,
)
# Sampling from the chain and get diagnostics
mcmc_trace, (step_size, log_accept_ratio) = tfp.mcmc.sample_chain(
num_results=number_of_steps,
num_burnin_steps=burnin,
current_state=[b0, b1, df],
kernel=kernel,
trace_fn=trace_fn,
seed=strm())
rhat = tfp.mcmc.potential_scale_reduction(mcmc_trace)
return (
[s[-1] for s in step_size], # final step size
tf.math.exp(tfp.math.reduce_logmeanexp(log_accept_ratio)),
[tf.reduce_mean(rhat_) for rhat_ in rhat], # average rhat
)
# Sample from posterior distribution and get diagnostic
[
final_step_size, average_accept_ratio, average_rhat
] = self.evaluate(run_chain_and_get_diagnostic())
# Check that step size adaptation reduced the initial step size
self.assertAllLess(
np.asarray(final_step_size) - np.asarray(init_step_size), 0.)
# Check that average acceptance ratio is close to target
self.assertAllClose(
average_accept_ratio,
.8 * np.ones_like(average_accept_ratio),
atol=0.1, rtol=0.1)
# Check that mcmc sample quality is acceptable with tuning
self.assertAllClose(
average_rhat, np.ones_like(average_rhat), atol=0.05, rtol=0.05)
def test_step_size_trace(self):
dist = tfd.Normal(0., 1.)
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
dist.log_prob, step_size=1.)
_, _, fkr = tfp.mcmc.sample_chain(10, 0., kernel=kernel,
return_final_kernel_results=True,
seed=test_util.test_seed())
self.assertAlmostEqual(1., self.evaluate(fkr.step_size))
def test_zero_sized_event(self):
tlp_fn = lambda x, y: x[:, 0] + tf.pad(y, [[0, 0], [0, 1]])[:, 0]
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
tlp_fn, step_size=0.1)
xy = [tf.ones([1, 1]), tf.ones([1, 0])]
results = kernel.bootstrap_results(xy)
self.evaluate(kernel.one_step(xy, results, seed=test_util.test_seed())[0])
# Allowed type of preconditioning schemes to use.
# See code for details.
PRECONDITION_SCHEMES = frozenset([
'direct', 'precision_factor', 'sqrtm', 'scale',
# `None` ==> No preconditioner. This is different than a "bad"
# preconditioner. We will be able to check asymptotics with "None".
'no_preconditioner'])
RunNUTSResults = collections.namedtuple('RunNUTSResults', [
'draws',
'step_size',
'final_step_size',
'accept_prob',
'mean_accept_prob',
'min_ess',
'sample_mean',
'sample_cov',
'sample_var',
'mean_atol',
'cov_atol',
'var_rtol',
])
@test_util.test_graph_mode_only
class PreconditionedNUTSCorrectnessTest(test_util.TestCase):
"""More careful tests that sampling/preconditioning is actually working."""
def _run_nuts_with_step_size(
self,
target_mvn,
precondition_scheme,
target_accept=0.75,
num_results=2000,
num_adaptation_steps=20,
):
"""Run NUTS with step_size adaptation, and return RunNUTSResults."""
assert precondition_scheme in PRECONDITION_SCHEMES
target_cov = target_mvn.covariance()
cov_linop = tf.linalg.LinearOperatorFullMatrix(
target_cov,
is_self_adjoint=True,
is_positive_definite=True)
if precondition_scheme == 'no_preconditioner':
momentum_distribution = None
elif precondition_scheme == 'direct':
momentum_distribution = tfd.MultivariateNormalLinearOperator(
# The covariance of momentum is inv(covariance of position), and we
# parameterize distributions by a square root of the covariance.
scale=cov_linop.inverse().cholesky(),
)
elif precondition_scheme == 'precision_factor':
momentum_distribution = _CompositeMultivariateNormalPrecisionFactorLinearOperator(
# The precision of momentum is the covariance of position.
# The "factor" is the cholesky factor.
precision_factor=cov_linop.cholesky(),
)
elif precondition_scheme == 'sqrtm':
if JAX_MODE:
self.skipTest('`sqrtm` is not yet implemented in JAX.')
momentum_distribution = _CompositeMultivariateNormalPrecisionFactorLinearOperator(
# The symmetric square root is a perfectly valid "factor".
precision_factor=tf.linalg.LinearOperatorFullMatrix(
tf.linalg.sqrtm(target_cov)),
)
elif precondition_scheme == 'scale':
momentum_distribution = _CompositeMultivariateNormalPrecisionFactorLinearOperator(
# Nothing wrong with using "scale", since the scale should be the
# same as cov_linop.cholesky().
precision_factor=target_mvn.scale,
)
else:
raise RuntimeError(
'Unhandled precondition_scheme: {}'.format(precondition_scheme))
nuts_kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=target_mvn.log_prob,
momentum_distribution=momentum_distribution,
max_tree_depth=4,
step_size=1.),
num_adaptation_steps=num_adaptation_steps,
target_accept_prob=target_accept)
def trace_fn(_, pkr):
results = pkr.inner_results
return {
'accept_prob':
tf.exp(tf.minimum(0., results.log_accept_ratio)),
'step_size':
results.step_size,
}
strm = test_util.test_seed_stream()
@tf.function
def do_run_run_run():
"""Do a run, return RunNUTSResults."""
states, trace = tfp.mcmc.sample_chain(
num_results,
current_state=tf.identity(target_mvn.sample(seed=strm())),
kernel=nuts_kernel,
num_burnin_steps=num_adaptation_steps,
seed=strm(),
trace_fn=trace_fn)
# If we had some number of chain dimensions, we would change sample_axis.
sample_axis = 0
sample_cov = tfp.stats.covariance(states, sample_axis=sample_axis)
max_variance = tf.reduce_max(tf.linalg.diag_part(sample_cov))
max_stddev = tf.sqrt(max_variance)
min_ess = tf.reduce_min(tfp.mcmc.effective_sample_size(states))
mean_accept_prob = tf.reduce_mean(trace['accept_prob'])
return RunNUTSResults(
draws=states,
step_size=trace['step_size'],
final_step_size=trace['step_size'][-1],
accept_prob=trace['accept_prob'],
mean_accept_prob=mean_accept_prob,
min_ess=tf.reduce_min(tfp.mcmc.effective_sample_size(states)),
sample_mean=tf.reduce_mean(states, axis=sample_axis),
sample_cov=sample_cov,
sample_var=tf.linalg.diag_part(sample_cov),
# Standard error in variance estimation is related to standard
# deviation of variance estimates. For a Normal, this is just Sqrt(2)
# times variance divided by sqrt sample size (or so my old notes say).
# So a relative tolerance is useful.
# Add in a factor of 5 as a buffer.
var_rtol=5 * tf.sqrt(2.) / tf.sqrt(min_ess),
# For covariance matrix estimates, there can be terms that have
# expectation = 0 (e.g. off diagonal entries). So the above doesn't
# hold. So use an atol.
cov_atol=5 * max_variance / tf.sqrt(min_ess),
# Standard error in mean estimation is stddev divided by sqrt
# sample size. This is an absolute tolerance.
# Add in a factor of 5 as a buffer.
mean_atol=5 * max_stddev / tf.sqrt(min_ess),
)
# Evaluate now, to ensure that states/accept_prob/etc... all match up with
# the same graph evaluation. This is a gotcha about TFP MCMC in graph mode.
return self.evaluate(do_run_run_run())
def _check_correctness_of_moments_and_preconditioning(
self,
target_mvn,
num_results,
precondition_scheme,
):
"""Test that step size adaptation finds the theoretical optimal step size.
See _caclulate_expected_step_size for formula details, but roughly, for a
high dimensional Gaussian posterior, we can calculate the approximate step
size to achieve a given target accept rate. For such a posterior,
`PreconditionedNoUTurnSampler` mimics the dynamics of sampling from a
standard normal distribution, and so should adapt to the step size where
the scales are all ones.
In the example below, `expected_step` is around 0.00002, so there is
significantly different behavior when conditioning.
Args:
target_mvn: Multivariate normal instance to sample from.
num_results: Number of samples to collect (post burn-in).
precondition_scheme: String telling how to do preconditioning.
Should be in PRECONDITION_SCHEMES.
Returns:
RunNUTSResults
"""
results = self._run_nuts_with_step_size(
target_mvn, precondition_scheme=precondition_scheme)
self.assertAllClose(
results.sample_mean, target_mvn.mean(), atol=results.mean_atol)
self.assertAllClose(
results.sample_var, target_mvn.variance(), rtol=results.var_rtol)
self.assertAllClose(
results.sample_cov, target_mvn.covariance(), atol=results.cov_atol)
return results
@parameterized.named_parameters(
dict(testcase_name='_' + str(scheme), precondition_scheme=scheme)
for scheme in PRECONDITION_SCHEMES)
def test_correctness_with_2d_mvn_tril(self, precondition_scheme):
# Low dimensional test to help people who want to step through and debug.
target_mvn = tfd.MultivariateNormalTriL(
loc=tf.constant([0., 0.]),
scale_tril=[[1., 0.], [0.5, 2.]],
)
self._check_correctness_of_moments_and_preconditioning(
target_mvn,
# Lots of results, to test tight tolerance.
# We're using a small dims here, so this isn't a big deal.
num_results=2000,
precondition_scheme=precondition_scheme)
@parameterized.named_parameters(
dict(testcase_name='_' + str(scheme), precondition_scheme=scheme)
for scheme in PRECONDITION_SCHEMES)
def test_correctness_with_20d_mvn_tril(self, precondition_scheme):
# This is an almost complete check of the Gaussian case.
dims = 20
scale_wishart = tfd.WishartLinearOperator(
# Important that df is just slightly bigger than dims. This makes the
# scale_wishart ill condtioned. The result is that tests fail if we do
# not handle transposes correctly.
df=1.1 * dims,
scale=tf.linalg.LinearOperatorIdentity(dims),
input_output_cholesky=True,
name='wishart_for_samples',
)
# evaluate right here to avoid working with a random target_mvn in graph
# mode....that would cause issues, since we read off expected statistics
# from looking at the mvn properties, so it would be bad if these properties
# changed with every graph eval.
scale_tril = self.evaluate(scale_wishart.sample(seed=test_util.test_seed()))
target_mvn = tfd.MultivariateNormalTriL(
# Non-trivial "loc" ensures we do not rely on being centered at 0.
loc=tf.range(0., dims),
scale_tril=scale_tril,
)
self._check_correctness_of_moments_and_preconditioning(
target_mvn,
# Lots of results, to test tight tolerance.
num_results=1000,
precondition_scheme=precondition_scheme)
@test_util.test_graph_mode_only
@parameterized.named_parameters(
dict(testcase_name='_default', use_default=True),
dict(testcase_name='_explicit', use_default=False))
class PreconditionedNUTSTest(test_util.TestCase):
def test_f64(self, use_default):
if use_default:
momentum_distribution = None
else:
momentum_distribution = tfd.Normal(0., tf.constant(.5, dtype=tf.float64))
if not JAX_MODE:
momentum_distribution = tfp.experimental.as_composite(
momentum_distribution)
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
lambda x: -x**2, step_size=.5, max_tree_depth=4,
momentum_distribution=momentum_distribution)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(kernel, num_adaptation_steps=3)
self.evaluate(tfp.mcmc.sample_chain(
1, kernel=kernel, current_state=tf.ones([], tf.float64),
num_burnin_steps=5, seed=test_util.test_seed(), trace_fn=None))
# TODO(b/175787154): Enable this test
def DISABLED_test_f64_multichain(self, use_default):
if use_default:
momentum_distribution = None
else:
momentum_distribution = tfp.experimental.as_composite(
tfd.Normal(0., tf.constant(.5, dtype=tf.float64)))
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
lambda x: -x**2, step_size=.5, max_tree_depth=2,
momentum_distribution=momentum_distribution)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(kernel, num_adaptation_steps=3)
nchains = 7
self.evaluate(tfp.mcmc.sample_chain(
1, kernel=kernel, current_state=tf.ones([nchains], tf.float64),
num_burnin_steps=5, seed=test_util.test_seed(), trace_fn=None))
def test_diag(self, use_default):
"""Test that a diagonal multivariate normal can be effectively sampled from.
Args:
use_default: bool, whether to use a custom momentum distribution, or
the default.
"""
mvn = tfd.MultivariateNormalDiag(
loc=[1., 2., 3.], scale_diag=[0.1, 1., 10.])
if use_default:
momentum_distribution = None
step_size = 0.1
else:
momentum_distribution = _CompositeMultivariateNormalPrecisionFactorLinearOperator(
precision_factor=mvn.scale,
)
step_size = 1.1
nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size, max_tree_depth=4)
draws = tfp.mcmc.sample_chain(
110,
tf.zeros(3),
kernel=nuts_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[-100:],
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not use_default:
self.assertGreaterEqual(self.evaluate(tf.reduce_min(ess)), 40.)
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
def test_tril(self, use_default):
if tf.executing_eagerly():
self.skipTest('b/169882656 Too many warnings are issued in eager logs')
cov = 0.9 * tf.ones([3, 3]) + 0.1 * tf.eye(3)
scale = tf.linalg.cholesky(cov)
mv_tril = tfd.MultivariateNormalTriL(loc=[1., 2., 3.],
scale_tril=scale)
if use_default:
momentum_distribution = None
step_size = 0.3
else:
momentum_distribution = _CompositeMultivariateNormalPrecisionFactorLinearOperator(
# TODO(b/170015229) Don't use the covariance as inverse scale,
# it is the wrong preconditioner.
precision_factor=tf.linalg.LinearOperatorFullMatrix(cov),
)
step_size = 1.1
nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=mv_tril.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size, max_tree_depth=4)
draws = tfp.mcmc.sample_chain(
120,
tf.zeros(3),
kernel=nuts_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[-100:],
filter_threshold=0,
filter_beyond_positive_pairs=False)
# TODO(b/170015229): These and other tests like it, which assert ess is
# greater than some number, were all passing, even though the preconditioner
# was the wrong one. Why is that? A guess is that since there are *many*
# ways to have larger ess, these tests don't really test correctness.
# Perhaps remove all tests like these.
if not use_default:
self.assertGreaterEqual(self.evaluate(tf.reduce_min(ess)), 40.)
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
def test_multi_state_part(self, use_default):
mvn = tfd.JointDistributionSequential([
tfd.Normal(1., 0.1),
tfd.Normal(2., 1.),
tfd.Independent(tfd.Normal(3 * tf.ones([2, 3, 4]), 10.), 3)
])
if use_default:
momentum_distribution = None
step_size = 0.1
else:
reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
reshape_to_234 = tfp.bijectors.Reshape(event_shape_out=[2, 3, 4])
momentum_distribution = _CompositeJointDistributionSequential([
reshape_to_scalar(
_CompositeMultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag([0.1]))),
reshape_to_scalar(
_CompositeMultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag([1.]))),
reshape_to_234(
_CompositeMultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([24], 10.))))
])
step_size = 0.3
nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size, max_tree_depth=4)
draws = tfp.mcmc.sample_chain(
100, [0., 0., tf.zeros((2, 3, 4))],
kernel=nuts_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws,
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not use_default:
self.assertGreaterEqual(
self.evaluate(
tf.reduce_min(tf.nest.map_structure(tf.reduce_min, ess))),
40.)
else:
self.assertLess(
self.evaluate(
tf.reduce_min(tf.nest.map_structure(tf.reduce_min, ess))),
50.)
def test_batched_state(self, use_default):
mvn = tfd.MultivariateNormalDiag(
loc=[1., 2., 3.], scale_diag=[0.1, 1., 10.])
batch_shape = [2, 4]
if use_default:
step_size = 0.1
momentum_distribution = None
else:
step_size = 1.0
momentum_distribution = _CompositeMultivariateNormalPrecisionFactorLinearOperator(
tf.zeros((2, 4, 3)), precision_factor=mvn.scale)
nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size,
max_tree_depth=5)
draws = tfp.mcmc.sample_chain(
110,
tf.zeros(batch_shape + [3]),
kernel=nuts_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(draws[10:], cross_chain_dims=[1, 2],
filter_threshold=0,
filter_beyond_positive_pairs=False)
if not use_default:
self.assertGreaterEqual(self.evaluate(tf.reduce_min(ess)), 40.)
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
def test_batches(self, use_default):
mvn = tfd.JointDistributionSequential(
[tfd.Normal(1., 0.1),
tfd.Normal(2., 1.),
tfd.Normal(3., 10.)])
n_chains = 10
if use_default:
momentum_distribution = None
step_size = 0.1
else:
reshape_to_scalar = tfp.bijectors.Reshape(event_shape_out=[])
momentum_distribution = _CompositeJointDistributionSequential([
reshape_to_scalar(
_CompositeMultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 0.1)))),
reshape_to_scalar(
_CompositeMultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 1.)))),
reshape_to_scalar(
_CompositeMultivariateNormalPrecisionFactorLinearOperator(
precision_factor=tf.linalg.LinearOperatorDiag(
tf.fill([n_chains, 1], 10.)))),
])
step_size = 1.1
nuts_kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob_fn=mvn.log_prob,
momentum_distribution=momentum_distribution,
step_size=step_size, max_tree_depth=4)
draws = tfp.mcmc.sample_chain(
100, [tf.zeros([n_chains]) for _ in range(3)],
kernel=nuts_kernel,
seed=test_util.test_seed(),
trace_fn=None)
ess = tfp.mcmc.effective_sample_size(
draws, cross_chain_dims=[1 for _ in draws],
filter_threshold=0, filter_beyond_positive_pairs=False)
if not use_default:
self.assertGreaterEqual(self.evaluate(tf.reduce_min(ess)), 40.)
else:
self.assertLess(self.evaluate(tf.reduce_min(ess)), 100.)
@test_util.test_all_tf_execution_regimes
class DistributedNutsTest(distribute_test_lib.DistributedTest):
def test_pnuts_kernel_tracks_axis_names(self):
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
tfd.Normal(0, 1).log_prob, step_size=1.9)
self.assertIsNone(kernel.experimental_shard_axis_names)
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
tfd.Normal(0, 1).log_prob,
step_size=1.9,
experimental_shard_axis_names=['a'])
self.assertListEqual(kernel.experimental_shard_axis_names, ['a'])
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
tfd.Normal(0, 1).log_prob,
step_size=1.9).experimental_with_shard_axes(['a'])
self.assertListEqual(kernel.experimental_shard_axis_names, ['a'])
def test_takes_same_number_leapfrog_steps_with_sharded_state(self):
if not JAX_MODE:
self.skipTest('Test in TF runs into `merge_call` error: see b/178944108')
def target_log_prob(a, b):
return (tfd.Normal(0., 1.).log_prob(a) + distribute_lib.psum(
tfd.Normal(distribute_lib.pbroadcast(a, 'foo'), 1.).log_prob(b),
'foo'))
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob, step_size=1.9)
sharded_kernel = kernel.experimental_with_shard_axes([None, ['foo']])
def run(seed):
state = [tf.convert_to_tensor(0.), tf.convert_to_tensor(0.)]
kr = sharded_kernel.bootstrap_results(state)
_, kr = sharded_kernel.one_step(state, kr, seed=seed)
return kr.leapfrogs_taken
leapfrogs_taken = self.evaluate(
self.per_replica_to_tensor(
self.strategy_run(
run,
args=(samplers.zeros_seed(),),
in_axes=None,
axis_name='foo'), 0))
for i in range(distribute_test_lib.NUM_DEVICES):
self.assertAllClose(leapfrogs_taken[i], leapfrogs_taken[0])
def test_unsharded_state_remains_synchronized_across_devices(self):
if not JAX_MODE:
self.skipTest('Test in TF runs into `merge_call` error: see b/178944108')
def target_log_prob(a, b):
return (tfd.Normal(0., 1.).log_prob(a) + distribute_lib.psum(
tfd.Normal(distribute_lib.pbroadcast(a, 'foo'), 1.).log_prob(b),
'foo'))
kernel = tfp.experimental.mcmc.PreconditionedNoUTurnSampler(
target_log_prob, step_size=1e-1)
sharded_kernel = kernel.experimental_with_shard_axes([None, ['foo']])
def run(seed):
state = [tf.convert_to_tensor(-10.), tf.convert_to_tensor(-10.)]
kr = sharded_kernel.bootstrap_results(state)
state, _ = sharded_kernel.one_step(state, kr, seed=seed)
return state
state = self.evaluate(
self.per_replica_to_tensor(
self.strategy_run(
run,
args=(samplers.zeros_seed(),),
in_axes=None,
axis_name='foo'), 0))
for i in range(distribute_test_lib.NUM_DEVICES):
self.assertAllClose(state[0][i], state[0][0])
if __name__ == '__main__':
test_util.main()
|
tensorflow/probability
|
tensorflow_probability/python/experimental/mcmc/pnuts_test.py
|
Python
|
apache-2.0
| 45,484
|
[
"Gaussian"
] |
670afe1b2bb008ee789f51d14a9bdce8ad147230d40033e38c8cb65180a2ee83
|
#! /usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2017, California Institute of Technology
# All rights reserved.
"""
This standalone python script can be used to convert force-field data
in FRC files (a.k.a. "MSI", "Accelrys", "BIOSYM", "DISCOVERY" files)
...into MOLTEMPLATE/LAMMPS compatible format (.LT files).
Once converted into moltemplate (.LT) format, users can use these files with
MOLTEMPLATE to prepare LAMMPS simulations of molecules using these force fields
(without needing any additional software such as msi2lmp).
There are several examples of MSI files in the "tools/msi2lmp/frc_files/"
directory which is distributed with LAMMPS.
Limitations:
Currently (2017-10) this script ignores the "template" information in .FRC files.
When defining a new type of molecule, the user must carefully choose the
complete atom type for each type of atom in the molecule. In other words,
MOLTEMPLATE will not attempt to determine (from local context) whether
a carbon atom somewhere in your molecule happens to be an SP3 carbon
(ie. "c4" in the COMPASS force-field), or an aromatic carbon ("c3a"),
or something else (for example). This information is typically contained
in the "templates" section of these files, and this script currently ignores
that information. Instead, the user must determine which type of carbon atom
it is manually, for all of the carbon atoms in that kind of molecule.
(This only needs to be done once per molecule definition.
Once a type of molecule is defined, it can be copied indefinitely.)
"""
__author__ = 'Andrew Jewett'
__version__ = '0.2.1'
__date__ = '2017-10-15'
import sys
import os
from collections import defaultdict, OrderedDict
from operator import itemgetter
g_program_name = __file__.split('/')[-1]
doc_msg = \
"Typical Usage:\n\n" + \
" " + g_program_name + " -name COMPASS < compass_published.frc > compass.lt\n\n" + \
" where \"compass_published.frc\" is a force-field file in MSI format.\n" + \
" \"comass.lt\" is the corresponding file converted to moltemplate format\n" + \
" and \"COMPASS\" is the name that future moltemplate users will use to refer\n" + \
" to this force-field (optional).\n" + \
"Optional Arguments\n" + \
" -name FORCEFIELDNAME # Give the force-field a name\n" + \
" -file FILE_NAME # Read force field parameters from a file\n" + \
" -url URL # Read force field parameters from a file on the web\n" + \
" -atoms \"QUOTED LIST\" # Restrict output to a subset of atom types\n" + \
" Sometimes an FRC file contains multiple versions. In that case,\n"+\
" you can select between them using these optional arguments:\n"+\
" -pair-style \"PAIRSTYLE ARGS\" # LAMMPS pair style and cutoff arg(s)\n" + \
" -bond-style BONDSTYLE # desired LAMMPS bond style (default: \"class2\")\n" + \
" -angle-style ANGLESTYLE # desired LAMMPS angle style\n" + \
" -dihedral-style DIHEDRALSTYLE # desired LAMMPS dihedral style\n" + \
" -improper-style IMPROPERSTYLE # desired LAMMPS improper style\n" + \
" -hbond-style \"HBONDTYLE ARGS\" # LAMMPS hydrogen-bond style and args\n"
# " -auto # Consider auto_equivalences in the .frc file \n"+\
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
# It seems like there are no ordered sets in python, (a set that remembers the
# order that you added elements), so I built one by wrapping OrderedDict()
class MyOrderedSet(object):
def __init__(self, l):
self.d = OrderedDict()
for x in l:
self.d[x] = True
def __add__(self, x):
self.d[x] = True
def __delitem__(self, x):
del self.d[x]
def __contains__(self, x):
return x in self.d
def __iter__(self):
self.p = iter(self.d)
return self
def __next__(self):
return next(self.p)
# the following wrappers might be necessary for python2/3 compatibility:
def add(self, x):
self.__add__(x)
def del_item(self, x):
self.__del_item__(x)
def iter(self):
return self.__iter__()
def next(self):
return self.__next__()
# no need to bother with set unions and intersections
def NSplitQuotedString(string,
nmax,
quotes,
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
"""
Split a quoted & commented string into at most "nmax" tokens (if nmax>0),
where each token is separated by one or more delimeter characters
in the origingal string, and quoted substrings are not split,
This function returns a list of strings. Once the string is split Nmax
times, any remaining text will be appended to the last entry of the list.
Comments are stripped from the string before splitting begins.
"""
tokens = []
token = ''
reading_token = True
escaped_state = False
quote_state = None
for c in string:
if (c in comment_char) and (not escaped_state) and (quote_state == None):
if len(token) > 0:
tokens.append(token)
return tokens
elif (c in delimiters) and (not escaped_state) and (quote_state == None):
if reading_token:
if (nmax == 0) or (len(tokens) < nmax-1):
if len(token) > 0:
tokens.append(token)
token = ''
reading_token = False
else:
token += c
elif c in escape:
if escaped_state:
token += c
reading_token = True
escaped_state = False
else:
escaped_state = True
# and leave c (the '\' character) out of token
elif (c in quotes) and (not escaped_state):
if (quote_state != None):
if (c == quote_state):
quote_state = None
else:
quote_state = c
token += c
reading_token = True
else:
if (c == 'n') and (escaped_state == True):
c = '\n'
elif (c == 't') and (escaped_state == True):
c = '\t'
elif (c == 'r') and (escaped_state == True):
c = '\r'
elif (c == 'f') and (escaped_state == True):
c = '\f'
token += c
reading_token = True
escaped_state = False
if len(token) > 0:
tokens.append(token)
return tokens
def SplitQuotedString(string,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
return NSplitQuotedString(string,
0,
quotes,
delimiters,
escape,
comment_char)
def RemoveOuterQuotes(text, quotes='\"\''):
if ((len(text) >= 2) and (text[0] in quotes) and (text[-1] == text[0])):
return text[1:-1]
else:
return text
def SortByEnds(l_orig):
"""
Convenient to have a one-line macro for swapping list order if first>last
"""
l = [x for x in l_orig]
if l[0] > l[-1]:
l.reverse()
return l
#def Repl(tokens, a, b):
# return [(b if x==a else x) for x in tokens]
def DecodeAName(s):
if s.find('auto') == 0:
s = s[4:]
if s == 'X': # special case: deal with strings like 'X'
return '*'
return s
def EncodeAName(s):
"""
Handle * characters in MSI atom names
"""
if s.find('auto') == 0:
s = s[4:]
# If the atom name begins with *, then it is a wildcard
if s[:1] == '*': # special case: deal with strings like *7
return 'X' # These have special meaning. Throw away the integer.
# (and replace the * with an X)
# If the * character occurs later on in the atom name, then it is actually
# part of the atom's name. (MSI force fields use many strange characters in
# atom names.) Here we change the * to \* to prevent the atom name from
# being interpreted as a wild card in the rules for generating bonds,
# angles, dihedrals, and impropers.
return s.replace('*','star').replace('\'','prime').replace('"','dblpr')
# '*' is reserved for wildcards in moltemplate
# 'star' is a string that is unused in any
# of the force fields I have seen so far.
# Similarly quote characters (' and ") confuse
# moltemplate, so we replace them with something else.
# The following approach doesn't work (mistakenly thinks '\*' = wildcard)
#return s.replace('*','\\*') # this prevents ttree_lex.MatchesAll()
# # from interpreting the '*' as a wildcard
def DetermineAutoPriority(anames):
"""
Given a list of atom names (including wildcards), generate a number
indicating the priority the interaction between these atoms should have:
Scan through list of strings anames, looking for patterns of the form
*n
where n is an integer.
Make sure this pattern only appears once and return n to the caller.
(These patterns are used by MSI software when using "auto_equivalences"
to look up force field parameters for bonded interactions.
The higher the integer, the lower the priority.
For details, see "Forcefield based simulations" PDF, Cerius2, p 87)
Ordinary wildcards ('*' characters not followed by integers) have the
lowest possible priority. (Each time a '*' string appears in the
list of arguments, the priority value increases by HUGE_VAL.)
"""
# This is terrible code.
n = -1.0
num_blank_wildcards = 0
for a in anames:
# Sometimes the first atom name contains the prefix 'auto'. Remove this
if a.find('auto') == 0:
a = a[4:]
if a[:1] == '*':
#if a[:1] == 'X':
if len(a) > 1:
if n == -1.0:
n = float(a[1:])
elif n != float(a[1:]):
# Make sure if present, the number appears only once in the list of atom names
raise InputError('Error: Inconsistent priority integers in the following interaction:\n'
' ' + ' '.join(anames) + '\n')
else:
num_blank_wildcards += 1
# A "blank" wildcard (not followed by a number eg '*') has a very low priority
# Give it a high number, because this corresponds to low priority. Very confusing
# For details, see "Forcefield based simulations" PDF, Cerius2, p 87)
HUGE_VAL = 1.0e5
return n + num_blank_wildcards*HUGE_VAL
#def DeterminePriority(is_auto,
# anames,
# version):
# """
# Determine the priority of an interaction from
# 1) whether or not it is an "auto" interaction
# 2) what is the force-field "version" (a number)
# 3) what are the names of the atoms (for auto_equivalences only,
# some atom "names" are wildcards followed by integers. use the integer)
# """
#
# if is_auto:
# n = DetermineAutoPriority(anames)
# return (is_auto, n)
# else:
# return (is_auto, -version)
def DetermineNumericPriority(is_auto,
anames,
version):
"""
Determine the priority of an interaction from
2) what is the force-field "version" (a number)
3) what are the names of the atoms (for auto_equivalences only,
some atom "names" are wildcards followed by integers. use the integer)
"""
if is_auto:
n = DetermineAutoPriority(anames)
return n # low priority integers <--> high priority ()
else:
return -float(version) # later version numbers <--> higher priority
# (multiplying by -1 compensates for this)
# Note: this means auto interactions always have
# lower priority because their numeric priority
# will be a positive number. Otherwise the
# numeric priority will be a negative number
# (...corresponding to a higher priority
# I don't like this complicated priority system
# but I didn't invent it. It's not my fault.)
def IsAutoAtom(atom_name):
return atom_name[-1:] == '_'
#def PossibleAutoAtom(atom_name):
# """ Auto-equivalences are alternate atom names used in "auto"
# interactions. (These are low priority interactions used as a
# last resort when the interaction parameters could not be located
# by any other means). Each atom is given an alternate name which
# is used in this kind of interaction. These names typically end
# '_' followed by an optional integer. Example "auto" atom names
# are 'c3m_' and 'c=_3'. Unfortunately some ordinary atom names
# also end in an integer preceeded by a _ character. But they
# never end in a '_' character. Here we check for both."""
#
# i = atom_name.rfind('_')
# if (i == -1) or str.isdigit(atom_name[i:]):
# return True
# return False
def IsAutoInteraction(interaction_name):
return interaction_name.find('auto') == 0
#def IsAutoInteraction(interaction_name):
# anames = ExtractAtomNames(interaction_name)
# for a in anames:
# if IsAutoAtom(a):
# return True
# if not PossibleAutoAtom(a):
# return False
# return True
def EncodeInteractionName(anames,
is_auto = False):
if is_auto == False:
is_auto = False
# Is the line containing anames from an "_auto" section of
# the FRC file? (I am trying to infer this from whether or
# not any of the atom names are followed by the '_' character.)
for s in anames:
if IsAutoAtom(s):
is_auto = True
if is_auto:
priority = DetermineAutoPriority(anames)
# (If an atom name is a wildcard '*' followed by
# an integer, DetermineAutoPriority() will return
# that integer. Otherwise it will return '')
#return str(priority)+'auto'+','.join(anames)
return 'auto'+','.join(anames)
return ','.join(anames)
def ExtractANames(interaction_name):
if IsAutoInteraction(interaction_name):
return interaction_name[4:].split(',')
return interaction_name.split(',')
def OOPImproperNameSort(aorig):
assert(len(aorig) == 4)
atom_names = map(EncodeAName, aorig)
if atom_names[0] < atom_names[3]:
return (atom_names, [0,1,2,3])
else:
return ([atom_names[3],
atom_names[1],
atom_names[2],
atom_names[0]],
[3,1,2,0])
def Class2ImproperNameSort(aorig):
"""
This function takes a list of 4 strings as an argument representing 4 atom
names for atoms participating in an "improper" ("wilson-out-of-plane")
interaction. This function assumes the second atom is the central ("hub")
atom in the interaction, and it sorts the remaining atoms names.
This function also replaces any occurence of \"*\" with \"X\".
The new list is returned to the caller, along with the permutation.
"""
assert(len(aorig) == 4)
atom_names = [a for a in map(EncodeAName, aorig)]
z = [x for x in zip([atom_names[0], atom_names[2], atom_names[3]],
[0,2,3])]
z.sort()
l = [z[0][0], atom_names[1], z[1][0], z[2][0]]
p = [z[0][1], 1, z[1][1], z[2][1]]
return (l, p)
def Parity(p):
""" compute the parity of a permutation
(credit: "Weeble")
"""
permutation = list(p)
length = len(permutation)
elements_seen = [False] * length
cycles = 0
for index, already_seen in enumerate(elements_seen):
if already_seen:
continue
cycles += 1
current = index
while not elements_seen[current]:
elements_seen[current] = True
current = permutation[current]
return (length-cycles) % 2 == 0
def ImCrossTermID(atom_names):
"""
# From a list of 4 atom names, corresponding two a pair
# of angles between atoms# 3,2,1 and 3,2,4,
# and replaces the list of atoms with a canonical tuple
# which eliminates order ambiguity.
# If you swap the first and last atom (#1 and #4), then
# the -pair- of angles is the same. Hence if atom #1
# has a name which is lexicographically less than atom #4,
# swap atoms 1 and 4.
"""
if atom_names[0] <= atom_names[3]:
return (atom_names[0]+','+atom_names[1]+','+
atom_names[2]+','+atom_names[3])
else:
return (atom_names[3]+','+atom_names[1]+','+
atom_names[2]+','+atom_names[0])
def AtomsMatchPattern(anames, pattern):
"""
Check whether the list of atom names "anames" matches "pattern"
(Both arguments are lists of strings, but some of the strings
in pattern may contain wildcard characters followed by
"priority" numbers. Matches with lower priority numbers are
given preference whenever multiple distinct matches are found.
(Note: This function does not check patterns in reverse order.)
"""
#sys.stderr.write('DEBUG: checking whether '+str(anames)+' matches '+str(pattern)+'\n')
assert(len(anames) == len(pattern))
matched = True
for d in range(0, len(pattern)):
if (pattern[d] == anames[d]) or (pattern[d][0] == '*'):
if pattern[d][0] == '*':
priority = int(pattern[d][1:])
else:
priority = 0
else:
matched = False
if matched:
#sys.stderr.write('DEBUG: '+str(anames)+' matches '+str(pattern)+'\n')
return priority
else:
return None
def LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto):
"""
Try to find bond parameters between atoms whose original
atom names (without equivalences) are a1 and a2.
Then return both the equilibrium bond length for that bond,
as well as the equivalent atom names used to lookup that bond.
(These could be stored in either atom2equiv_bond or atom2auto_bond.)
If a match was not found, return None.
"""
return_val = None
anames = (atom2equiv_bond[a1], atom2equiv_bond[a2])
bond_name = EncodeInteractionName(SortByEnds(anames))
if bond_name in bond2r0:
return_val = (bond2r0[bond_name],
[anames[0], anames[1]],
False)
# If no bond between these atoms is defined,
# check the bonds in the _auto section(s)
# This is a lot messier.
elif ((a1 in atom2auto_bond) and (a2 in atom2auto_bond)):
anames = [atom2auto_bond[a1], atom2auto_bond[a2]]
# Because _auto interactions can contain wildcards,
# there can be multiple entries in bond2r0_auto[]
# for the same list of atom names, and we have to
# consider all of them, and pick the one with the
# most priority (ie. whose priority number is lowest).
# (Note: The MSI file format uses low priority numbers
# to indicate high priority. Somewhat confusing.
# For details, see "Forcefield based simulations" PDF, Cerius2, p 87)
HUGE_VAL = 2000000000
best_priority = HUGE_VAL
pattern = ['','']
for (pattern[0],pattern[1]), r0 in bond2r0_auto.items():
priority = AtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (r0, anames, True)
# try again with the atom type names in reverse order
priority = AtomsMatchPattern([anames[1],anames[0]], pattern)
if ((priority != None) and
(priority < best_priority)): #(note: low priority numbers = high priority)
best_priority = priority
return_val = (r0, anames, True)
#if return_val != None:
# sys.stderr.write('DEBUG: For atoms '+str((a1,a2))+' ... bond_length, batom_names = '+str(return_val)+'\n')
return return_val
def LookupBondAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
atom2auto_angle,
angle2theta0_auto_or):
"""
Try to find angle parameters between atoms whose original atom
names (without equivalences) are a1, a2, and a3. Then return
both the equilibrium rest angle for that 3body interaction
as well as the equivalent atom names used to look it up. (These
could be stored in either atom2equiv_angle or atom2auto_angle.)
If a match was not found, return None.
"""
return_val = None
anames = (atom2equiv_angle[a1], atom2equiv_angle[a2], atom2equiv_angle[a3])
angle_name = EncodeInteractionName(SortByEnds(anames))
if angle_name in angle2theta0_or:
return_val = (angle2theta0_or[angle_name],
[anames[0], anames[1], anames[2]],
False)
# If no angle between these atoms is defined,
# check the angles in the _auto section(s)
# This is a lot messier.
elif ((a1 in atom2auto_angle[0]) and
(a2 in atom2auto_angle[1]) and
(a3 in atom2auto_angle[2])):
anames = [atom2auto_angle[0][a1],
atom2auto_angle[1][a2],
atom2auto_angle[2][a3]]
#sys.stderr.write('DEBUG: LookupBondAngle(): a1,a2,a3=('+
# a1+','+a2+','+a3+'), anames='+str(anames)+'\n')
# Because _auto interactions can contain wildcards,
# there can be multiple entries in angle2theta0_auto_or[]
# for the same list of atom names, and we have to
# consider all of them, and pick the one with the
# most priority (ie. whose priority number is lowest).
# (Note: The MSI file format uses low priority numbers
# to indicate high priority. Somewhat confusing.)
HUGE_VAL = 2000000000
best_priority = HUGE_VAL # (ie. low priority)
pattern = ['','','']
for (pattern[0],pattern[1],pattern[2]), theta0 in angle2theta0_auto_or.items():
priority = AtomsMatchPattern(anames, pattern)
if ((priority != None) and
(priority < best_priority)): #(note: low priority numbers = high priority)
best_priority = priority
return_val = (theta0, anames, True)
# try again with the atom type names in reverse order
priority = AtomsMatchPattern([anames[2],anames[1],anames[0]], pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (theta0, anames, True)
#if return_val != None:
# sys.stderr.write('DEBUG: For atoms '+str((a1,a2,a3))+' ... rest_angle, anames = '+str(return_val)+'\n')
return return_val
def Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper):
"""
This function reads a list of lines containing "equivalences" and
"auto_equivalences" from an MSI-formatted .FRC file.
Then, for each atom type, it generates a long string which includes the
original atom type name as well as all of the equivalences it belongs to.
Later on, when it is time to generate angles, dihedrals, or impropers,
moltemplate will search for patterns contained in these strings to decide
which type of interaction to generate.
This function returns a dictionary that converts the original atom type name
into these strings.
"""
for line in lines_equivalences:
#tokens = SplitQuotedString(line.strip(),
# comment_char='!>')
# skip past both '!' and '>' characters
ic1 = line.find('!')
ic = ic1
ic2 = line.find('>')
if ic2 != -1 and ic2 < ic1:
ic = ic2
if ic != -1:
line = line[:ic]
else:
line = line.rstrip('\n')
tokens = line.strip().split()
#sys.stderr.write('DEBUG Equivalences2ffids():\n'
# ' tokens = '+str(tokens)+'\n')
atype = EncodeAName(tokens[2])
atom2equiv_pair[atype] = EncodeAName(tokens[3])
atom2equiv_bond[atype] = EncodeAName(tokens[4])
atom2equiv_angle[atype] = EncodeAName(tokens[5])
atom2equiv_dihedral[atype] = EncodeAName(tokens[6])
atom2equiv_improper[atype] = EncodeAName(tokens[7])
atom2ffid = OrderedDict()
for atom in atom_types:
atom2ffid[atom] = (atom +
',p'+atom2equiv_pair.get(atom,'') +
',b'+atom2equiv_bond.get(atom,'') +
',a'+atom2equiv_angle.get(atom,'') +
',d'+atom2equiv_dihedral.get(atom,'') +
',i'+atom2equiv_improper.get(atom,''))
return atom2ffid
def AutoEquivalences2ffids(lines_equivalences,
lines_auto_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper,
atom2auto_pair,
atom2auto_bondincr,
atom2auto_bond,
atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_dihedralend,
atom2auto_dihedralcenter,
atom2auto_improperend,
atom2auto_impropercenter):
"""
This function is a variant of Equivalences2ffids() which also considers
"auto_equivalences".
This function returns a dictionary that converts the original atom type name
into a string that includes that atom's "equivalences",
as well as its "auto_equivalences".
moltemplate will search for patterns contained in these strings to decide
which type of interaction to generate.
"""
Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper)
# ------ The following lines are for processing "auto_equivalences" -----
#
# What is the difference between "equivalences" and "auto_equivalences"?
#
# equivalences:
# Here is an excerpt from the Discover manual describing "equivalences":
# "Chemically distinct atoms often differ in some, but not all,
# of their forcefield parameters. For example, the bond parameters
# for the C-C bonds in ethene and in benzene are quite different,
# but the nonbond parameters for the carbon atoms are essentially
# the same. Rather than duplicating the nonbond parameters in the
# forcefield parameter file, the Discover program uses atom type
# equivalences to simplify the problem. In the example, the phenyl
# carbon atom type is equivalent to the pure sp2 carbons of ethene
# insofar as the nonbond parameters are concerned. The Discover
# program recognizes five types of equivalences for each atom
# type: nonbond, bond, angle, torsion, and out-of-plane.
# Cross terms such as bond-bond terms have the same equivalences
# (insofar as atom types are concerned) as the diagonal term of
# the topology of all the atoms defining the internal coordinates.
# For the bond-bond term, this means that the atom type
# equivalences for angles would be used
#
# auto_equivalences:
# Are similar to equivalences, but apparently with lower priority.
# In addition, it seems that, when looking up some of the class2 terms
# in the interaction according to atom type using "auto_equivalences"
# a distinction is made between end atoms and central atoms.
# The parameters for these interactions are also stored in different
# tables in the .frc file, with different comments/tags.
# (for example, "cff91_auto" as opposed to "cff91")
# An excerpt from the Discover manual is somewhat vague:
# "A forcefield may include automatic parameters for use when
# better-quality explicit parameters are not defined for a
# particular bond, angle, torsion, or out-of-plane interaction.
# These parameters are intended as temporary patches, to allow
# you to begin calculations immediately."
for line in lines_auto_equivalences:
#tokens = SplitQuotedString(line.strip(),
# comment_char='!>')
# skip past both '!' and '>' characters
ic1 = line.find('!')
ic = ic1
ic2 = line.find('>')
if ic2 != -1 and ic2 < ic1:
ic = ic2
if ic != -1:
line = line[:ic]
else:
line = line.rstrip('\n')
tokens = line.strip().split()
#sys.stderr.write('DEBUG Equivalences2ffids():\n'
# ' tokens = '+str(tokens)+'\n')
atype = EncodeAName(tokens[2])
atom2auto_pair[atype] = EncodeAName(tokens[3])
atom2auto_bondincr[atype] = EncodeAName(tokens[4])
atom2auto_bond[atype] = EncodeAName(tokens[5])
atom2auto_angleend[atype] = EncodeAName(tokens[6])
atom2auto_anglecenter[atype] = EncodeAName(tokens[7])
atom2auto_dihedralend[atype] = EncodeAName(tokens[8])
atom2auto_dihedralcenter[atype] = EncodeAName(tokens[9])
atom2auto_improperend[atype] = EncodeAName(tokens[10])
atom2auto_impropercenter[atype] = EncodeAName(tokens[11])
atom2ffid = OrderedDict()
for atom in atom_types:
atom2ffid[atom] = (atom +
',p'+atom2equiv_pair.get(atom,'') +
',b'+atom2equiv_bond.get(atom,'') +
',a'+atom2equiv_angle.get(atom,'') +
',d'+atom2equiv_dihedral.get(atom,'') +
',i'+atom2equiv_improper.get(atom,'') +
',ap'+atom2auto_pair.get(atom,'') +
',aq'+atom2auto_bondincr.get(atom,'') +
',ab'+atom2auto_bond.get(atom,'') +
',aae'+atom2auto_angleend.get(atom,'') +
',aac'+atom2auto_anglecenter.get(atom,'') +
',ade'+atom2auto_dihedralend.get(atom,'') +
',adc'+atom2auto_dihedralcenter.get(atom,'') +
',aie'+atom2auto_improperend.get(atom,'') +
',aic'+atom2auto_impropercenter.get(atom,'') +
''
)
return atom2ffid
def main():
try:
sys.stderr.write(g_program_name + ", version " +
__version__ + ", " + __date__ + "\n")
if sys.version < '2.6':
raise InputError('Error: Using python ' + sys.version + '\n' +
' Alas, your version of python is too old.\n'
' You must upgrade to a newer version of python (2.6 or later).')
if sys.version < '2.7':
from ordereddict import OrderedDict
else:
from collections import OrderedDict
if sys.version > '3':
import io
else:
import cStringIO
# defaults:
ffname = 'BIOSYM_MSI_FORCE_FIELD'
type_subset = set([])
filename_in = ''
file_in = sys.stdin
#file_in = open('pcff_repaired.frc','r') #CONTINUEHERE
include_auto_equivalences = False
#pair_style_name = 'lj/class2/coul/long'
#pair_style_params = "10.0 10.0"
pair_style2docs = {}
pair_style2args = defaultdict(str)
pair_style2docs['lj/cut/coul/long'] = 'http://lammps.sandia.gov/doc/pair_lj.html'
pair_style2args['lj/cut/coul/long'] = '10.0'
pair_style2docs['lj/class2/coul/long'] = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style2args['lj/class2/coul/long'] = '10.0'
pair_style2docs['lj/class2/coul/cut'] = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style2args['lj/class2/coul/cut'] = '10.0'
bond_style2docs = {}
#bond_style2args = defaultdict(str)
bond_style2docs['harmonic'] = 'http://lammps.sandia.gov/doc/bond_harmonic.html'
bond_style2docs['class2'] = 'http://lammps.sandia.gov/doc/bond_class2.html'
bond_style2docs['morse'] = 'http://lammps.sandia.gov/doc/bond_morse.html'
bond_symmetry_subgraph = '' # default
angle_style2docs = {}
#angle_style2args = defaultdict(str)
angle_style2docs['harmonic'] = 'http://lammps.sandia.gov/doc/angle_harmonic.html'
angle_style2docs['class2'] = 'http://lammps.sandia.gov/doc/angle_class2.html'
angle_symmetry_subgraph = '' # default
dihedral_style2docs = {}
#dihedral_style2args = defaultdict(str)
dihedral_style2docs['charmm'] = 'http://lammps.sandia.gov/doc/dihedral_charmm.html'
dihedral_style2docs['class2'] = 'http://lammps.sandia.gov/doc/dihedral_class2.html'
dihedral_symmetry_subgraph = '' # default
improper_style2docs = {}
#improper_style2args = defaultdict(str)
improper_style2docs['cvff'] = 'http://lammps.sandia.gov/doc/improper_cvff.html'
improper_style2docs['class2'] = 'http://lammps.sandia.gov/doc/improper_class2.html'
improper_symmetry_subgraph = {} #'cenJsortIKL'
pair_mixing_style = 'sixthpower tail yes'
special_bonds_command = 'special_bonds lj/coul 0.0 0.0 1.0 dihedral yes'
# Thanks to Paul Saxe for is suggestions
# http://lammps.sandia.gov/threads/msg11270.html
kspace_style = 'kspace_style pppm 0.0001'
pair_styles_selected = set([])
#pair_style_link = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style_args = {}
pair_cutoff = '10.0'
#pair_style_command = " pair_style hybrid " + \
# pair_style_name + " " + pair_style_args + "\n"
bond_styles_selected = set([])
#bond_style_link = bond_style2docs[bond_style_name]
#bond_style_args = ''
angle_styles_selected = set([])
#angle_style_link = angle_style2docs[angle_style_name]
#angle_style_args = ''
dihedral_styles_selected = set([])
#dihedral_style_link = dihedral_style2docs[dihedral_style_name]
#dihedral_style_args = ''
improper_styles_selected = set([])
#improper_style_link = improper_style2docs[improper_style_name]
#improper_style_args = ''
hbond_style_name = ''
hbond_style_link = ''
hbond_style_args = ''
lines_templates = []
lines_references = defaultdict(list)
lines_warnings = []
argv = [arg for arg in sys.argv]
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-atoms':
if i + 1 >= len(argv):
raise InputError('Error: the \"' + argv[i] + '\" argument should be followed by a quoted string\n'
' which contains a space-delimited list of of a subset of atom types\n'
' you want to use from the original force-field.\n'
' Make sure you enclose the entire list in quotes.\n')
type_subset = set(argv[i + 1].strip('\"\'').strip().split())
del argv[i:i + 2]
elif argv[i] == '-name':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by the name of the force-field\n')
ffname = argv[i + 1]
del argv[i:i + 2]
elif argv[i] in ('-file', '-in-file'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by the name of a force-field file\n')
filename_in = argv[i + 1]
try:
file_in = open(filename_in, 'r')
except IOError:
sys.stderr.write('Error: Unable to open file\n'
' \"' + filename_in + '\"\n'
' for reading.\n')
sys.exit(1)
del argv[i:i + 2]
elif argv[i] == '-pair-cutoff':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by a number'
' (the distance cutoff for non-bonded (pair) interactions)\n')
pair_style_cutoff = argv[i+1]
del argv[i:i + 2]
elif argv[i] == '-pair-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by either \"lj/class2/coul/cut\" or \"lj/class2/coul/long\"\n')
pair_style_list = argv[i + 1].split(',')
for pair_style in pair_style_list:
if pair_style == '9-6':
pair_style = 'lj/class2/coul/long'
elif pair_style in ('12-6', 'lj', 'LJ'):
pair_style = 'lj/cut/coul/long'
if pair_style.find('lj/class2/coul/long') == 0:
kspace_style = 'kspace_style pppm 0.0001'
elif pair_style.find('lj/cut/coul/long') == 0:
kspace_style = 'kspace_style pppm 0.0001'
elif pair_style.find('lj/class2/coul/cut') == 0:
pass
#kspace_style = ''
elif pair_style.find('lj/cut') == 0:
pass
#kspace_style = ''
else:
raise InputError('Error: ' + argv[i] + ' ' + pair_style + ' not supported.\n'
' The following pair_styles are supported:\n'
' lj/class2/coul/cut\n'
' lj/class2/coul/long\n'
' lj/cut\n'
' lj/cut/coul/long\n')
pair_styles_selected.add(pair_style)
del argv[i:i + 2]
elif argv[i] == '-bond-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible bond_style.\n')
bond_styles = argv[i + 1].split(',')
for bond_style in bond_styles:
bond_styles_selected.add(bond_style)
#bond_style2args[bond_style] = argv[i + 1].split()[1:]
#if bond_style_name.find('harmonic') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_harmonic.html'
#elif bond_style_name.find('morse') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_morse.html'
#elif bond_style_name.find('class2') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\", \"class2\", or \"morse\".\n')
del argv[i:i + 2]
elif argv[i] == '-angle-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible angle_style.\n')
angle_styles = argv[i + 1].split(',')
for angle_style in angle_styles:
angle_styles_selected.add(angle_style)
#if angle_style_name.find('harmonic') == 0:
# pass
# #angle_style_link = 'http://lammps.sandia.gov/doc/angle_harmonic.html'
#elif angle_style_name.find('class2') == 0:
# pass
# #angle_style_link = 'http://lammps.sandia.gov/doc/angle_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-dihedral-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible dihedral_style.\n')
dihedral_styles = argv[i + 1].split(',')
for dihedral_style in dihedral_styles:
dihedral_styles_selected.add(dihedral_style)
#if dihedral_style_name.find('charmm') == 0:
# pass
# #dihedral_style_link = 'http://lammps.sandia.gov/doc/dihedral_charmm.html'
#elif dihedral_style_name.find('class2') == 0:
# pass
# #dihedral_style_link = 'http://lammps.sandia.gov/doc/dihedral_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-improper-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible impropoer_style.\n')
improper_styles = argv[i + 1].split(',')
for improper_style in improper_styles:
improper_styles_selected.add(improper_style)
#if impropoer_style_name.find('harmonic') == 0:
# pass
# #impropoer_style_link = 'http://lammps.sandia.gov/doc/impropoer_harmonic.html'
#elif impropoer_style_name.find('class2') == 0:
# pass
# #impropoer_style_link = 'http://lammps.sandia.gov/doc/impropoer_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-hbond-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' ' + hbond_style_name + '\n'
' should be followed by a compatible pair_style.\n')
hbond_style_name = argv[i + 1]
hbond_style_link = 'http://lammps.sandia.gov/doc/pair_hbond_dreiding.html'
if hbond_style_name.find('none') == 0:
hbond_style_name = ''
hbond_style_args = ''
elif hbond_style_name.find('hbond/dreiding/lj') == 0:
n = len('hbond/dreiding/lj')
hbond_style_args = hbond_style_name[n+1:]
hbond_style_name = hbond_style_name[:n]
elif hbond_style_name.find('hbond/dreiding/morse') == 0:
n = len('hbond/dreiding/morse')
hbond_style_args = hbond_style_name[n+1:]
hbond_style_name = hbond_style_name[:n]
else:
raise InputError('Error: ' + argv[i] + ' flag should be followed by either\n'
' \"hbond/dreiding/lj\" or \"hbond/dreiding/morse"\n')
del argv[i:i + 2]
elif argv[i] in ('-url', '-in-url'):
import urllib2
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by a URL pointing to\n'
' a file containing force-field information in msi/frc format.\n')
url = argv[i + 1]
try:
request = urllib2.Request(url)
file_in = urllib2.urlopen(request)
except urllib2.URLError:
sys.stdout.write("Error: Unable to open link:\n" + url + "\n")
sys.exit(1)
del argv[i:i + 2]
elif argv[i] == '-auto':
include_auto_equivalences = True
del argv[i:i + 1]
elif argv[i] in ('-help', '--help', '-?', '--?'):
sys.stderr.write(doc_msg)
sys.exit(0)
del argv[i:i + 1]
else:
i += 1
if len(argv) != 1:
raise InputError('Error: Unrecongized arguments: ' + ' '.join(argv[1:]) +
'\n\n' + doc_msg)
# Default styles:
if len(bond_styles_selected) == 0:
bond_styles_selected.add('class2')
if len(angle_styles_selected) == 0:
angle_styles_selected.add('class2')
if len(dihedral_styles_selected) == 0:
dihedral_styles_selected.add('class2')
if len(improper_styles_selected) == 0:
improper_styles_selected.add('class2')
if len(pair_styles_selected) == 0:
pair_styles_selected.add('lj/class2/coul/long')
#sys.stderr.write("Reading parameter file...\n")
lines = file_in.readlines()
atom2charge = OrderedDict() # lookup charge from atom type
atom2mass = OrderedDict() # lookup mass from atom type
# equivalences lookup
atom2ffid = OrderedDict() # lookup "force-field-ID" a string containing
# equivalences to lookup bonded interactions
atom2equiv_pair = OrderedDict() # lookup the equivalent symbol used for
# looking up pair interactions
atom2equiv_bond = OrderedDict()
atom2equiv_angle = OrderedDict()
atom2equiv_dihedral = OrderedDict()
atom2equiv_improper = OrderedDict()
# inverse equivalences lookup
equiv_pair2atom = defaultdict(set)
equiv_bond2atom = defaultdict(set)
equiv_angle2atom = defaultdict(set)
equiv_dihedral2atom = defaultdict(set)
equiv_improper2atom = defaultdict(set)
# auto equivalences lookup
atom2auto_pair = OrderedDict()
atom2auto_bondincr = OrderedDict()
atom2auto_bond = OrderedDict()
atom2auto_angleend = OrderedDict()
atom2auto_anglecenter = OrderedDict()
atom2auto_dihedralend = OrderedDict()
atom2auto_dihedralcenter = OrderedDict()
atom2auto_improperend = OrderedDict()
atom2auto_impropercenter = OrderedDict()
# inverse auto equivalences lookup
auto_pair2atom = defaultdict(set)
auto_bondincr2atom = defaultdict(set)
auto_bond2atom = defaultdict(set)
auto_angleend2atom = defaultdict(set)
auto_anglecenter2atom = defaultdict(set)
auto_dihedralend2atom = defaultdict(set)
auto_dihedralcenter2atom = defaultdict(set)
auto_improperend2atom = defaultdict(set)
auto_impropercenter2atom = defaultdict(set)
atom2element = OrderedDict() # Optional:
# which element (eg 'C', 'O') ? (Note this
# is different from atom type: 'C1', 'Oh')
atom2numbonds = OrderedDict() # Optional: how many bonds emanate from
atom2descr = OrderedDict() # Optional: a brief description
atom2ver = OrderedDict() # atoms introduced in different versions of ff
atom2ref = OrderedDict() # reference to paper where atom introduced
lines_equivalences = [] # equivalences for force-field lookup
lines_auto_equivalences = [] # auto_equivalences have lower priority
pair2params = OrderedDict()
pair2style = OrderedDict()
pair_styles = set([])
pair2ver = OrderedDict()
pair2ref = OrderedDict()
bond2chargepair = OrderedDict() # a.k.a "bond increments"
charge_pair_priority = OrderedDict() # priority in case multiple entries
# exist for the same pair of atoms
charge_pair_ver = OrderedDict() # which version of the force field?
charge_pair_ref = OrderedDict() # paper introducing this chargepair
bond2params = OrderedDict() # store a tuple with the 2-body bond
# interaction type, and its parameters
# for every type of bond
bond2priority = OrderedDict() # What is the priority of this interaction?
bond2style = OrderedDict() # What LAMMPS bond style (formula)
# is used for a given interaction?
bond_styles = set([]) # Contains all bond styles used.
bond2ver = OrderedDict()
bond2ref = OrderedDict()
bond2r0 = OrderedDict()
bond2r0_auto = OrderedDict()
angle2params = OrderedDict() # store a tuple with the 3-body angle
# interaction type, and its parameters
# for every type of angle
angle2params_or = OrderedDict()
# http://lammps.sandia.gov/doc/angle_class2.html
#angle2class2_a = OrderedDict() # params for the "a" class2 terms
angle2class2_bb = OrderedDict() # params for the "bb" class2 terms
angle2class2_bb_or = OrderedDict()
angle2class2_ba = OrderedDict() # params for the "ba" class2 terms
angle2class2_ba_or = OrderedDict()
angle2priority = OrderedDict() # What is the priority of this interaction?
angle2priority_or = OrderedDict()
angle_is_secondary_or = OrderedDict()
angle2style = OrderedDict() # What LAMMPS angle style (formula)
# is used for a given interaction?
angle2style_or = OrderedDict()
angle_styles = set([]) # Contains all angle styles used.
angle2ref = OrderedDict()
angle2ver = OrderedDict()
angle2ref_or = OrderedDict()
angle2ver_or = OrderedDict()
angle2ver_bb = OrderedDict()
angle2ver_bb_or = OrderedDict()
angle2ref_bb = OrderedDict()
angle2ref_bb_or = OrderedDict()
angle2ver_ba = OrderedDict()
angle2ver_ba_or = OrderedDict()
angle2ref_ba = OrderedDict()
angle2ref_ba_or = OrderedDict()
angle2theta0_or = OrderedDict()
angle2theta0_auto_or = OrderedDict()
# http://lammps.sandia.gov/doc/dihedral_class2.html
dihedral2params = OrderedDict() # store a tuple with the 4-body dihedral
# interaction type, and its parameters
# for every type of dihedral
dihedral2params_or = OrderedDict()
#dihedral2class2_d = OrderedDict() # params for the "d" class2 term
dihedral2class2_mbt = OrderedDict() # params for the "mbt" class2 term
dihedral2class2_mbt_or = OrderedDict()
dihedral2class2_ebt = OrderedDict() # params for the "ebt" class2 term
dihedral2class2_ebt_or = OrderedDict()
#dihedral2sym_ebt = OrderedDict()
dihedral2class2_at = OrderedDict() # params for the "at" class2 term
dihedral2class2_at_or = OrderedDict()
#dihedral2sym_at = OrderedDict()
dihedral2class2_aat = OrderedDict() # params for the "aat" class2 term
dihedral2class2_aat_or = OrderedDict()
#dihedral2sym_aat = OrderedDict()
dihedral2class2_bb13 = OrderedDict() # params for the "bb13" class2 term
dihedral2class2_bb13_or = OrderedDict()
#dihedral2sym_bb13 = OrderedDict()
dihedral2priority = OrderedDict() # What is the priority of this interaction?
dihedral2priority_or = OrderedDict()
dihedral_is_secondary_or = OrderedDict()
dihedral2style = OrderedDict() # What LAMMPS dihedral style (formula)
# is used for a given interaction?
dihedral2style_or = OrderedDict()
dihedral_styles = set([]) # Contains all dihedral styles used.
dihedral2ref = OrderedDict()
dihedral2ver = OrderedDict()
dihedral2ver_or = OrderedDict()
dihedral2ref_or = OrderedDict()
dihedral2ver_mbt = OrderedDict()
dihedral2ver_mbt_or = OrderedDict()
dihedral2ref_mbt = OrderedDict()
dihedral2ref_mbt_or = OrderedDict()
dihedral2ver_ebt = OrderedDict()
dihedral2ver_ebt_or = OrderedDict()
dihedral2ref_ebt = OrderedDict()
dihedral2ref_ebt_or = OrderedDict()
dihedral2ver_at = OrderedDict()
dihedral2ver_at_or = OrderedDict()
dihedral2ref_at = OrderedDict()
dihedral2ref_at_or = OrderedDict()
dihedral2ver_aat = OrderedDict()
dihedral2ver_aat_or = OrderedDict()
dihedral2ref_aat = OrderedDict()
dihedral2ref_aat_or = OrderedDict()
dihedral2ver_bb13 = OrderedDict()
dihedral2ver_bb13_or = OrderedDict()
dihedral2ref_bb13 = OrderedDict()
dihedral2ref_bb13_or = OrderedDict()
# http://lammps.sandia.gov/doc/improper_class2.html
improper2params = OrderedDict() # store a tuple with the 4-body improper
# interaction type, and its parameters
# for every type of imporpoer
improper2params_or = OrderedDict()
improper2class2_aa = OrderedDict() # params for the "aa" class2 term
improper2class2_aa_or = OrderedDict()
improper2cross = defaultdict(dict)
# improper2cross[imp_name][atoms] stores the
# coefficient (K) for the angle-angle ("aa")
# improper interactions between a pair of
# neighboring 3-body angles (in the .FRC file).
# "imp_name" is the name of the improper interaction
# (which is a concatination of the central atom and
# the 3 surrounding leaf atoms (which are sorted))
# "atoms" indicates, for that K value, the list of
# leaf atoms for that K value as they appear in the
# corresponding line of the .frc file (however the
# and last atom names are swapped if the first
# atom name is lexicographically > the last, to
# eliminate redundancy and ambiguity.)
improper2sym = defaultdict(set)
# improper2sym[imp_name] indicates which subset of
# leaf atoms (from 0 to 2) are equivalent and can
# tolerate having their order rearranged without
# effecting the energy. Later on this will be used
# to reduce the number of improper interactions that
# will be generated by moltemplate.
improper2priority = OrderedDict() # What is the priority of this interaction?
improper2priority_or = OrderedDict()
improper_is_secondary_or = OrderedDict()
improper2style = OrderedDict() # What LAMMPS improper style (formula)
# is used for a given interaction?
improper2style_or = OrderedDict()
improper_styles = set([]) # Contains all improper styles used.
improper2ver = OrderedDict()
improper2ver_or = OrderedDict()
improper2ref = OrderedDict()
improper2ref_or = OrderedDict()
improper2ver_aa = OrderedDict()
improper2ver_aa_or = OrderedDict()
improper2ref_aa = OrderedDict()
improper2ref_aa_or = OrderedDict()
# Warn users if force field contains terms which cannot yet
# be simulated with LAMMPS (as of 2017-10-13)
display_OOP_OOP_warning = False
display_torsion_torsion_1_warning = False
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete this code in a later version
hbond2params = OrderedDict() # lookup hbond parameters and atom types
hbond2donors = OrderedDict() # according to the identifier in the 2nd
hbond2acceptors = OrderedDict() # column of the "#hbond_definition"
hbond2hydrogens = OrderedDict() # section of an .frc file.
"""
allowed_section_names = set(['#define',
# sections used in all MSI force-fields
'#atom_types',
'#equivalence',
'#auto_equivalence',
'#nonbond(9-6)',
'#nonbond(12-6)',
'#quadratic_bond',
'#quartic_bond',
'#morse_bond',
'#quadratic_angle',
'#quartic_angle',
'#bond-bond',
'#bond-angle',
'#torsion_1',
'#torsion_3',
'#middle_bond-torsion_3',
'#end_bond-torsion_3',
'#angle-torsion_3',
'#angle-angle-torsion_1',#(class2 dihedral)
'#bond-bond_1_3', #(a class2 dihedral term)
'#out_of_plane',
'#wilson_out_of_plane',
'#angle-angle', #(a class2 improper term)
'#out_of_plane-out_of_plane', # UNSUPPORTED
'#torsion-torsion_1', # UNSUPPORTED
'#bond_increments',
'#hbond_definition', # irrelevant?
'#templates',
'#reference',
'#end'
])
icol_type = icol_mass = icol_elem = icol_nbonds = icol_comment = icol_ver = icol_ref = -1
section_name = ''
section_is_auto = False
sys.stderr.write("parsing file pass1: look for atom types and equivalences...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if tokens[0] in allowed_section_names:
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif not tokens[0] in ('#version',
'#define'):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) == 8) and (section_name == '#equivalence'):
if line.lstrip().find('!') == 0:
continue
lines_equivalences.append(line)
elif (len(tokens) == 12) and (section_name == '#auto_equivalence'):
if line.lstrip().find('!') == 0:
continue
lines_auto_equivalences.append(line)
elif (len(tokens) > 0) and (section_name == '#atom_types'):
# Different FRC files put this information in different
# columns. Column order is stored in the !Ver comment line:
if line.lstrip().find('!Ver') == 0:
tokens = line.strip().split()
for i in range(0, len(tokens)):
if tokens[i].lower() == 'type':
icol_type = i
elif tokens[i].lower() == 'mass':
icol_mass = i
elif tokens[i].lower() == 'element':
icol_elem = i
elif tokens[i].lower() == 'connections':
icol_nbonds = i
elif tokens[i].lower() == 'comment':
icol_comment = i
elif tokens[i].lower() == '!ver': #(version of ff)
icol_ver = i
elif tokens[i].lower() == 'ref':
icol_ref = i
assert(icol_ver == 0)
if -1 in (icol_type, icol_mass):
raise InputError('Error: Invalid #atom_types section.\n'
' The meaning of each column cannot be determined.\n'
' This file needs a valid "!Ver..." comment.\n')
if icol_comment == -1:
icol_comment = max(icol_type, icol_mass,
icol_elem, icol_nbonds) + 1
sys.stderr.write('icol_ver = '+str(icol_ver)+'\n')
sys.stderr.write('icol_ref = '+str(icol_ref)+'\n')
sys.stderr.write('icol_mass = '+str(icol_mass)+'\n')
sys.stderr.write('icol_nelem = '+str(icol_elem)+'\n')
sys.stderr.write('icol_nbonds = '+str(icol_nbonds)+'\n')
sys.stderr.write('icol_comment = '+str(icol_comment)+'\n')
continue
tokens = map(RemoveOuterQuotes,
NSplitQuotedString(line.strip(),
icol_comment+1,
quotes='',
comment_char='>'))
tokens = list(tokens)
if (len(tokens) > 4):
if ((len(type_subset) == 0) or (tokens[1] in type_subset)):
aname = EncodeAName(tokens[icol_type])
atom2mass[aname] = str(max(float(tokens[icol_mass]), 1.0e-06))
# Some atoms in cvff.prm have zero mass. Unfortunately this
# causes LAMMPS to crash, even if these atoms are never used,
# so I give the mass a non-zero value instead.
if icol_elem != -1:
atom2element[aname] = tokens[icol_elem]
if icol_nbonds != -1:
atom2numbonds[aname] = int(tokens[icol_nbonds])
atom2descr[aname] = tokens[icol_comment]
atom2ver[aname] = tokens[icol_ver]
atom2ref[aname] = tokens[icol_ref]
elif len(tokens) > 0:
raise InputError('Error: Invalid atom line: (line#'+str(iline)+')\n' +
'\"'+line.strip()+'\"')
atom_types = [x for x in atom2mass]
# Now construct the lookup tables and inverse tables
# we will need to understand the remainder of the file:
if not include_auto_equivalences:
atom2ffid = Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper)
else:
atom2ffid = AutoEquivalences2ffids(lines_equivalences,
lines_auto_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper,
atom2auto_pair,
atom2auto_bondincr,
atom2auto_bond,
atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_dihedralend,
atom2auto_dihedralcenter,
atom2auto_improperend,
atom2auto_impropercenter)
for a,e in atom2equiv_pair.items():
equiv_pair2atom[e].add(a)
for a,e in atom2equiv_bond.items():
equiv_bond2atom[e].add(a)
for a,e in atom2equiv_angle.items():
equiv_angle2atom[e].add(a)
for a,e in atom2equiv_dihedral.items():
equiv_dihedral2atom[e].add(a)
for a,e in atom2equiv_improper.items():
equiv_improper2atom[e].add(a)
# the inverse lookup for '*' matches all atom types
for a in atom_types:
#equiv_pair2atom['*'].add(EncodeAName(a))
equiv_pair2atom['X'].add(EncodeAName(a))
#equiv_bond2atom['*'].add(EncodeAName(a))
equiv_bond2atom['X'].add(EncodeAName(a))
#equiv_angle2atom['*'].add(EncodeAName(a))
equiv_angle2atom['X'].add(EncodeAName(a))
#equiv_dihedral2atom['*'].add(EncodeAName(a))
equiv_dihedral2atom['X'].add(EncodeAName(a))
#equiv_improper2atom['*'].add(EncodeAName(a))
equiv_improper2atom['X'].add(EncodeAName(a))
for a,e in atom2auto_pair.items():
auto_pair2atom[e].add(a)
for a,e in atom2auto_bondincr.items():
auto_bondincr2atom[e].add(a)
for a,e in atom2auto_bond.items():
auto_bond2atom[e].add(a)
for a,e in atom2auto_angleend.items():
auto_angleend2atom[e].add(a)
#auto_angle[0][e].add(a)
#auto_angle[2][e].add(a)
for a,e in atom2auto_anglecenter.items():
auto_anglecenter2atom[e].add(a)
#auto_angle[1][e].add(a)
for a,e in atom2auto_dihedralend.items():
auto_dihedralend2atom[e].add(a)
#auto_dihedral2atom[0][e].add(a)
#auto_dihedral2atom[3][e].add(a)
for a,e in atom2auto_dihedralcenter.items():
auto_dihedralcenter2atom[e].add(a)
#auto_dihedral2atom[1][e].add(a)
#auto_dihedral2atom[2][e].add(a)
for a,e in atom2auto_improperend.items():
auto_improperend2atom[e].add(a)
for a,e in atom2auto_impropercenter.items():
auto_impropercenter2atom[e].add(a)
# the inverse lookup for '*' matches all atom types
for a in atom_types:
#auto_pair2atom['*'].add(EncodeAName(a))
auto_pair2atom['X'].add(EncodeAName(a))
#auto_bondincr2atom['*'].add(EncodeAName(a))
auto_bondincr2atom['X'].add(EncodeAName(a))
#auto_bond2atom['*'].add(EncodeAName(a))
auto_bond2atom['X'].add(EncodeAName(a))
#auto_angleend2atom['*'].add(EncodeAName(a))
auto_angleend2atom['X'].add(EncodeAName(a))
#auto_anglecenter2atom['*'].add(EncodeAName(a))
auto_anglecenter2atom['X'].add(EncodeAName(a))
#auto_dihedralend2atom['*'].add(EncodeAName(a))
auto_dihedralend2atom['X'].add(EncodeAName(a))
#auto_dihedralcenter2atom['*'].add(EncodeAName(a))
auto_dihedralcenter2atom['X'].add(EncodeAName(a))
#auto_improperend2atom['*'].add(EncodeAName(a))
auto_improperend2atom['X'].add(EncodeAName(a))
#auto_impropercenter2atom['*'].add(EncodeAName(a))
auto_impropercenter2atom['X'].add(EncodeAName(a))
sys.stderr.write("parsing file pass2: look for bonds, bond_increments and nonbonded (pair) interactions...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif ((len(tokens) > 4) and (section_name == '#nonbond(12-6)')
and (pair_styles_selected & set(['lj','lj/cut','lj/cut/coul/long',
'lj/cut/coul/cut','lj/cut/coul/debye',
'lj/cut/coul/dsf','lj/cut/coul/msm',
'12-6','nonbond(12-6)']))):
if line.lstrip().find('!') == 0:
continue
atom_name = EncodeAName(tokens[2])
pair2ver[atom_name] = tokens[0]
pair2ref[atom_name] = tokens[1]
A = float(tokens[3])
B = float(tokens[4])
epsilon = B*B/(4*A)
sigma = pow(B/A, 1.0/6)
if sigma == 0.0:
sigma = 1.0 #(non-zero to avoid nan error later)
pair_styles.add('lj/cut/coul/long')
pair_style_args['lj/cut/coul/long'] = pair_cutoff
pair2style[atom_name] = 'lj/cut/coul/long'
pair2params[atom_name] = (str(epsilon)+' '+str(sigma))
pair_mixing_style = 'geometric tail yes'
#if pair_style_name.find('lj/cut') == 0:
# pair2params[atom_name] = (str(epsilon)+' '+str(sigma))
# pair_mixing_style = 'geometric tail yes'
elif ((len(tokens) > 4) and (section_name == '#nonbond(9-6)')
and (pair_styles_selected &
set(['class2', '9-6', 'nonbond(9-6)',
'lj/class2/coul/long']))):
if line.lstrip().find('!') == 0:
continue
atom_name = EncodeAName(tokens[2])
pair2ver[atom_name] = tokens[0]
pair2ref[atom_name] = tokens[1]
sigma = tokens[3]
epsilon = tokens[4]
pair_styles.add('lj/class2/coul/long')
pair_style_args['lj/class2/coul/long'] = pair_cutoff
pair2style[atom_name] = 'lj/class2/coul/long'
pair2params[atom_name] = (epsilon+' '+sigma)
pair_mixing_style = 'sixthpower tail yes'
#if pair_style_name.find('lj/class2') == 0:
# pair2params[atom_name] = (epsilon+' '+sigma)
# pair_mixing_style = 'sixthpower tail yes'
elif (len(tokens) == 6) and (section_name == '#bond_increments'):
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:4])]
delta_q = tokens[4:6]
atom_names = [a for a in aorig]
# swap the order of the atoms?
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
delta_q.reverse()
atom_names.reverse()
bond_name = EncodeInteractionName(atom_names, section_is_auto)
charge_pair_ver[bond_name] = tokens[0]
charge_pair_ref[bond_name] = tokens[1]
charge_pair_priority[bond_name] = \
(section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(charge_pair_ver[bond_name])))
bond2chargepair[bond_name] = (delta_q[0] + ' ' + delta_q[1])
elif ((len(tokens) > 5) and (section_name == '#quadratic_bond')
and (bond_styles_selected & set(['harmonic','quadratic','quadratic_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('harmonic')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
k = tokens[5]
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
bond2style[bond_name] = 'harmonic'
bond2params[bond_name] = (k+' '+r0)
elif ((len(tokens) > 6) and (section_name == '#morse_bond')
and (bond_styles_selected & set(['morse','morse_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('morse')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
D = tokens[5]
alpha = tokens[6]
sys.stderr.write('DEBUG: morse: atom_names = '+str(atom_names)+'\n')
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
bond2style[bond_name] = 'morse'
bond2params[bond_name] = (D+' '+alpha+' '+r0)
elif ((len(tokens) > 7) and (section_name == '#quartic_bond')
and (bond_styles_selected & set(['class2','quartic','quartic_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
K2 = tokens[5]
K3 = tokens[6]
K4 = tokens[7]
bond2style[bond_name] = 'class2'
bond2params[bond_name] = (r0+' '+K2+' '+K3+' '+K4)
sys.stderr.write("parsing file pass3: look for (3-body) angle interactions...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) > 6) and (section_name == '#quadratic_angle'):
if line.lstrip().find('!') == 0:
continue
atom_names = SortByEnds(map(EncodeAName, tokens[2:5]))
angle_name = EncodeInteractionName(atom_names, section_is_auto)
angle2ver[angle_name] = tokens[0]
angle2ref[angle_name] = tokens[1]
angle2priority_or[angle_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:5],
float(angle2ver[angle_name]))
angle_is_secondary_or[angle_name] = False
angle2priority[angle_name] = \
(section_is_auto,
angle_is_secondary_or[angle_name],
angle2priority_or[angle_name])
theta0 = tokens[5]
k = tokens[6]
if not section_is_auto:
angle2theta0_or[angle_name] = theta0
sys.stderr.write('angle2theta0_or['+angle_name+'] = ' + str(theta0) + '\n')
else:
angle2theta0_auto_or[(atom_names[0], atom_names[1], atom_names[2])] = theta0
sys.stderr.write('angle2theta0_auto_or['+str(atom_names)+'] = ' + str(theta0) + '\n')
if (angle_styles_selected & set(['harmonic',
'quadratic',
'quadratic_angle'])):
angle_styles.add('harmonic')
angle2style[angle_name] = 'harmonic'
angle2params[angle_name] = (k+' '+theta0)
elif (angle_styles_selected & set(['class2',
'quartic',
'quartic_angle'])):
# Then this is a special case of the class2 angle where
# the (theta-theta0)^3 and (theta-theta0)^4 terms = 0
angle_styles.add('class2')
angle2style_or[angle_name] = 'class2'
angle2params_or[angle_name] = (theta0+' '+k+' 0 0')
elif ((len(tokens) > 8) and (section_name == '#quartic_angle')
and (angle_styles_selected & set(['class2','quartic','quartic_angle']))):
if line.lstrip().find('!') == 0:
continue
angle_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:5]))
ang_name_orig = EncodeInteractionName(atom_names, section_is_auto)
version = tokens[0]
reference = tokens[1]
angle2ver_or[ang_name_orig] = version
angle2ref_or[ang_name_orig] = reference
angle2priority_or[ang_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:5],
float(angle2ver_or[ang_name_orig]))
angle_is_secondary_or[ang_name_orig] = False
#angle2priority[ang_name_orig] = \
# (section_is_auto,
# angle_is_secondary_or[ang_name_orig],
# angle2priority_or[ang_name_orig])
theta0 = tokens[5]
if not section_is_auto:
angle2theta0_or[ang_name_orig] = theta0
sys.stderr.write('angle2theta0_or['+ang_name_orig+'] = ' + str(theta0) + '\n')
else:
angle2theta0_auto_or[(atom_names[0], atom_names[1], atom_names[2])] = theta0
sys.stderr.write('angle2theta0_auto_or['+str(atom_names)+'] = ' + str(theta0) + '\n')
K2 = tokens[6]
K3 = tokens[7]
K4 = tokens[8]
angle2style_or[ang_name_orig] = 'class2'
angle2params_or[ang_name_orig] = [theta0, K2, K3, K4]
if not ang_name_orig in angle2class2_bb_or:
angle2class2_bb_or[ang_name_orig] = '0.0' # default value
angle2ver_bb_or[ang_name_orig] = version # default value
angle2ref_bb_or[ang_name_orig] = reference # default value
if not ang_name_orig in angle2class2_ba_or:
angle2class2_ba_or[ang_name_orig] = ['0.0', '0.0'] # default value
angle2ver_ba_or[ang_name_orig] = version # default value
angle2ref_ba_or[ang_name_orig] = reference # default value
elif ((len(tokens) > 5) and
(section_name in ('#bond-bond', '#bond-angle')) and
(angle_styles_selected &
set(['class2', 'quartic', 'quartic_angle']))):
if line.lstrip().find('!') == 0:
continue
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:5])]
atom_names = SortByEnds(aorig)
ang_name_orig = EncodeInteractionName(atom_names, section_is_auto)
K = ['', '']
K[0] = tokens[5]
K[1] = K[0]
if len(tokens) > 6:
K[1] = tokens[6]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
K.reverse()
if (section_name == '#bond-bond'):
angle2class2_bb_or[ang_name_orig] = K[0]
angle2ver_bb_or[ang_name_orig] = version
angle2ref_bb_or[ang_name_orig] = reference
elif (section_name == '#bond-angle'):
angle2class2_ba_or[ang_name_orig] = [k for k in K]
angle2ver_ba_or[ang_name_orig] = version
angle2ref_ba_or[ang_name_orig] = reference
if not ang_name_orig in angle2params_or:
angle_is_secondary_or[ang_name_orig] = True #only cross terms have been defined so far
angle2params_or[ang_name_orig] = ['0.0', '0.0', '0.0', '0.0'] # default value
angle2ver_or[ang_name_orig] = version
angle2ref_or[ang_name_orig] = reference
angle2priority_or[ang_name_orig] = 0.0
sys.stderr.write("parsing file pass4: look for dihedrals(torsions) and impropers(out_of_plane)...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) > 8) and (section_name == '#torsion_1'):
if line.lstrip().find('!') == 0:
continue
atom_names = SortByEnds(map(EncodeAName, tokens[2:6]))
dihedral_name = EncodeInteractionName(atom_names, section_is_auto)
dihedral2ver[dihedral_name] = tokens[0]
dihedral2ref[dihedral_name] = tokens[1]
dihedral2priority_or[dihedral_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(dihedral2ver[dihedral_name]))
dihedral_is_secondary_or[dihedral_name] = False
dihedral2priority[dihedral_name] = \
(section_is_auto,
dihedral_is_secondary_or[dihedral_name],
dihedral2priority_or[dihedral_name])
K = tokens[6]
n = tokens[7]
d = tokens[8]
w = '0.0' #ignore: this is only used by the CHARMM force field
if (dihedral_styles_selected & set(['charmm','torsion_1'])):
dihedral_styles.add('charmm')
dihedral2style[dihedral_name] = 'charmm'
#dihedral2params_or[dihedral_name] = [K,n,d,w]
dihedral2params[dihedral_name] = (K+' '+n+' '+d+' '+w)
elif (dihedral_styles_selected & set(['class2','torsion_3'])):
# Then this is a special case of the class2 angle
# lacking the higher terms in the Fourier series
dihedral_styles.add('class2')
dihedral2style[dihedral_name] = 'class2'
dihedral2params_or[dihedral_name] = [K,d,0,0,0,0]
elif ((len(tokens) > 7) and (section_name == '#torsion_3')
and (dihedral_styles_selected & set(['class2','torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:6]))
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
version = tokens[0]
reference = tokens[1]
dihedral2priority_or[dih_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(version))
dihedral_is_secondary_or[dih_name_orig] = False
#dihedral2priority[dih_name_orig] = \
# (section_is_auto,
# dihedral_is_secondary_or[dih_name_orig],
# dihedral2priority_or[dih_name_orig])
V1 = tokens[6]
phi0_1 = tokens[7]
V2 = phi0_2 = V3 = phi0_3 = '0.0'
if len(tokens) > 9:
V2 = tokens[8]
phi0_2 = tokens[9]
if len(tokens) > 11:
V3 = tokens[10]
phi0_3 = tokens[11]
dihedral2style_or[dih_name_orig] = 'class2'
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2params_or[dih_name_orig] = [V1, phi0_1, V2, phi0_2, V3, phi0_3]
# default values for cross terms:
if not dih_name_orig in dihedral2class2_mbt_or:
dihedral2class2_mbt_or[dih_name_orig] = ['0.0','0.0','0.0'] # default value
dihedral2ver_mbt_or[dih_name_orig] = version
dihedral2ref_mbt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_ebt_or:
dihedral2class2_ebt_or[dih_name_orig] = [['0.0','0.0','0.0'],['0.0','0.0','0.0']] # default value
dihedral2ver_ebt_or[dih_name_orig] = version
dihedral2ref_ebt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_bb13_or:
dihedral2class2_bb13_or[dih_name_orig] = '0.0' # default value
dihedral2ver_bb13_or[dih_name_orig] = version
dihedral2ref_bb13_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_at_or:
dihedral2class2_at_or[dih_name_orig] = [['0.0','0.0','0.0'],['0.0','0.0','0.0']] # default value
dihedral2ver_at_or[dih_name_orig] = version
dihedral2ref_at_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_aat_or:
dihedral2class2_aat_or[dih_name_orig] = '0.0' # default value
dihedral2ver_aat_or[dih_name_orig] = version
dihedral2ref_aat_or[dih_name_orig] = reference
elif ((len(tokens) > 6) and (section_name == '#middle_bond-torsion_3')
and (dihedral_styles_selected & set(['class2','torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
Fmbt = [tokens[6], '0.0', '0.0']
if len(tokens) > 7:
Fmbt[1] = tokens[7]
if len(tokens) > 8:
Fmbt[2] = tokens[8]
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
#sys.stderr.write('DEBUG: (a2,a3) = '+str((a2,a3))+', '
# ' (b1,b2) = '+str(batoms)+'\n')
dihedral2style[dih_name_orig] = 'class2'
dihedral2class2_mbt_or[dih_name_orig] = [F for F in Fmbt]
dihedral2ver_mbt_or[dih_name_orig] = version
dihedral2ref_mbt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2params_or:
dihedral_is_secondary_or[dih_name_orig] = True #only cross terms have been defined so far
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 6) and
(section_name in ('#end_bond-torsion_3',
'#bond-bond_1_3')) and
(dihedral_styles_selected &
set(['class2', 'torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
dihedral2style[dih_name_orig] = 'class2'
if section_name == '#end_bond-torsion_3':
Febt = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
Febt[0][0] = tokens[6]
if len(tokens) > 7:
Febt[0][1] = tokens[7]
if len(tokens) > 8:
Febt[0][2] = tokens[8]
Febt[1][0] = Febt[0][0]
Febt[1][1] = Febt[0][1]
Febt[1][2] = Febt[0][2]
if len(tokens) > 9:
Febt[1][0] = tokens[9]
if len(tokens) > 10:
Febt[1][1] = tokens[10]
if len(tokens) > 11:
Febt[1][2] = tokens[11]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
Febt.reverse()
dihedral2class2_ebt_or[dih_name_orig] = [ [F_ij for F_ij in F_i] for F_i in Febt] #deep copy of Febt[][]
dihedral2ver_ebt_or[dih_name_orig] = version
dihedral2ref_ebt_or[dih_name_orig] = reference
elif section_name == '#bond-bond_1_3':
Kbb13 = tokens[6]
#dihedral2ver_bb13[dih_name_orig] = version
dihedral2class2_bb13_or[dih_name_orig] = Kbb13
dihedral2ver_bb13_or[dih_name_orig] = version
dihedral2ref_bb13_or[dih_name_orig] = reference
else:
assert(False)
if not dih_name_orig in dihedral2params_or:
dihedral_is_secondary_or[dih_name_orig] = True #only cross terms have been defined so far
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 6) and
(section_name in ('#angle-torsion_3',
'#angle-angle-torsion_1')) and
(dihedral_styles_selected &
set(['class2', 'torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
dihedral2style[dih_name_orig] = 'class2'
if section_name == '#angle-torsion_3':
Fat = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
Fat[0][0] = tokens[6]
if len(tokens) > 7:
Fat[0][1] = tokens[7]
if len(tokens) > 8:
Fat[0][2] = tokens[8]
Fat[1][0] = Fat[0][0]
Fat[1][1] = Fat[0][1]
Fat[1][2] = Fat[0][2]
if len(tokens) > 9:
Fat[1][0] = tokens[9]
if len(tokens) > 10:
Fat[1][1] = tokens[10]
if len(tokens) > 11:
Fat[1][2] = tokens[11]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
Fat.reverse()
Fat[0].reverse()
Fat[1].reverse()
dihedral2class2_at_or[dih_name_orig] = [ [F_ij for F_ij in F_i] for F_i in Fat] #deep copy of Fat
dihedral2ver_at_or[dih_name_orig] = version
dihedral2ref_at_or[dih_name_orig] = reference
elif section_name == '#angle-angle-torsion_1':
Kaat = tokens[6]
dihedral2class2_aat_or[dih_name_orig] = Kaat
dihedral2ver_aat_or[dih_name_orig] = version
dihedral2ref_aat_or[dih_name_orig] = reference
else:
assert(False)
if not dih_name_orig in dihedral2params_or:
dihedral_is_secondary_or[dih_name_orig] = True #only cross terms have been defined so far
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0'] # default value
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 8) and (section_name == '#out_of_plane')
and (improper_styles_selected & set(['cvff','out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('cvff')
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names,_ignore = OOPImproperNameSort(tokens[2:6])
improper_name = EncodeInteractionName(atom_names, section_is_auto)
imsym = improper_symmetry_subgraph[improper_name] = 'cenJflipIL'
subgraph2impname['cenJflipIL'].add(improper_name) CONTINUEHERE
improper2ver[imsym][improper_name] = tokens[0]
improper2ref[imsym][improper_name] = tokens[1]
improper2priority_or[imsym][improper_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(improper2ver[imsym][improper_name]))
improper_is_secondary_or[imsym][imp_name_orig] = False
improper2priority[imsym][improper_name] = \
(section_is_auto,
improper_is_secondary_or[imsym][imp_name_orig],
improper2priority_or[imsym][improper_name])
K = tokens[6]
n = tokens[7]
chi0 = tokens[8]
improper2style[imsym][improper_name] = 'cvff'
improper2params[imsym][improper_name] = (Kchi+' '+n+' '+chi0)
#if improper_style_name == 'cvff':
# improper2params[improper_name] = (Kchi+' '+n+' '+chi0)
# improper_symmetry_subgraph[improper_name] = 'cenJswapIL'
elif ((len(tokens) > 7) and (section_name == '#wilson_out_of_plane')
and (improper_styles_selected and set(['class2','wilson_out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('class2')
sys.stderr.write('tokens = ' + str(tokens) + '\n')
version = tokens[0]
reference = tokens[1]
aorig = [a for a in map(EncodeAName, tokens[2:6])]
# To avoid redundancy, it is necessary to order the atoms
# in the interaction so that two equivalent ways of ordering
# the atoms in an improper interaction do not get misinterpreted
# as two different types of improper interactions. So we sort
# the 3 "leaf" atoms surrounding the central "hub" by name.
atom_names, permutation = Class2ImproperNameSort(tokens[2:6])
# This will effect the formula for the energy.
# (specifically the "chi0" parameter)
# When we lookup the various cross-term interactions for that
# same improper interaction, we will be sure to sort them
# in the same way to make sure those interactions are
# associated with the same improper interaction.
imp_name_orig = EncodeInteractionName(atom_names, section_is_auto)
#improper_symmetry_subgraph_or[improper_name] = 'dihedrals_nosym' (<--no)
imsym = improper_symmetry_subgraph_or[imp_name_orig] = 'cenJsortIKL'
improper2ver_or[imsym][imp_name_orig] = version
improper2ref_or[imsym][imp_name_orig] = reference
improper2priority_or[imsym][imp_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(improper2ver_or[imp_name_orig]))
improper_is_secondary_or[imsym][imp_name_orig] = False
#improper2priority[imp_name_orig] = \
# (section_is_auto,
# improper_is_secondary_or[imp_name_orig],
# improper2priority_or[imp_name_orig])
K = tokens[6]
chi0 = tokens[7]
if Parity(permutation) != 0:
# Each time the order of a pair of atoms is swapped in
# the interaction, all 3 of the "X" (chi) angles change sign
# The formula for the ordinary term in the improper
# interaction is Ei = K*((Xijkl + Xkjli + Xljik)/3 - chi0)^2
# This formula is invariant if we change the sign of all
# Xijkl, Xkjli, Xljik, chi0
# Hence, we can account for a change in atom order by
# changing the sign of the "chi0" parameter.
# We calculate the "Parity" of the permutation (ie whether
# the permutation has an even or odd number of swaps)
# and multiply chi0 by -1 for each swap.
# It's not clear if this is necessary since in practice
# the "chi0" parameter is usually zero.
chi0 = str(-1.0*float(chi0)) # same as ('-' + chi0)
improper2style_or[imsym][imp_name_orig] = 'class2'
improper2params_or[imsym][imp_name_orig] = [K, chi0]
#improper2params[imp_name_orig] = K + ' ' + chi0
# default values for cross terms:
if not imp_name_orig in improper2class2_aa_or:
improper2class2_aa_or[imsym][imp_name_orig] = '0.0' #(default)
improper2ver_aa_or[imsym][imp_name_orig] = version
improper2ref_aa_or[imsym][imp_name_orig] = reference
# Initially, set all of the angle-angle cross terms to zero
# Start with the first cross term between aorig[0],aorig[1],aorig[2] & aorig[2],aorig[1],aorig[3]
improper2cross[imp_name_orig][ImCrossTermID([aorig[0],aorig[1],aorig[2],aorig[3]])] = '0.0'
# ...then cyclically permute the 3 "leaf" atoms (aorig[0], aorig[2], aorig[3]) around the "hub" atom (aorig[1])
improper2cross[imp_name_orig][ImCrossTermID([aorig[2],aorig[1],aorig[3],aorig[0]])] = '0.0'
improper2cross[imp_name_orig][ImCrossTermID([aorig[3],aorig[1],aorig[0],aorig[2]])] = '0.0'
elif ((len(tokens) > 6) and (section_name == '#angle-angle')
and (improper_styles_selected and set(['class2','wilson_out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('class2')
version = tokens[0]
reference = tokens[1]
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names, permutation = Class2ImproperNameSort(tokens[2:6])
imp_name_orig = EncodeInteractionName(atom_names, section_is_auto)
imsym = improper_symmetry_subgraph_or[imp_name_orig] = 'cenJsortIKL'
improper2ver_aa_or[imsym][imp_name_orig] = version
improper2ref_aa_or[imsym][imp_name_orig] = reference
K = tokens[6]
improper2style_or[imsym][imp_name_orig] = 'class2'
if not imp_name_orig in improper2params_or:
improper_is_secondary_or[imsym][imp_name_orig] = True #only cross terms have been defined so far
improper2params_or[imsym][imp_name_orig] = ['0.0', '0.0']
improper2ver_or[imsym][imp_name_orig] = version
improper2ref_or[imsym][imp_name_orig] = reference
improper2priority_or[imsym][imp_name_orig] = 0.0
if not imp_name_orig in improper2cross:
# then initialize all of the cross terms to zero
improper2cross[imp_name_orig][ImCrossTermID([aorig[0],aorig[1],aorig[2],aorig[3]])] = '0.0'
# ...then cyclically permute the 3 "leaf" atoms (aorig[0], aorig[2], aorig[3]) around the "hub" atom (aorig[1])
improper2cross[imp_name_orig][ImCrossTermID([aorig[2],aorig[1],aorig[3],aorig[0]])] = '0.0'
improper2cross[imp_name_orig][ImCrossTermID([aorig[3],aorig[1],aorig[0],aorig[2]])] = '0.0'
#improper2class2_aa_or[imp_name_orig] = K (not needed)
improper2cross[imp_name_orig][ImCrossTermID(aorig)] = K
elif (len(tokens) > 0) and (section_name == '#out_of_plane-out_of_plane'):
if line.lstrip().find('!') == 0:
continue
display_OOP_OOP_warning = True
elif (len(tokens) > 0) and (section_name == '#torsion-torsion_1'):
if line.lstrip().find('!') == 0:
continue
display_torsion_torsion_1_warning = True
elif section_name == '#templates':
#if line.lstrip().find('!') == 0:
# continue
lines_templates.append(line)
elif section_name == '#reference':
if line.lstrip().find('!') == 0:
continue
if len(tokens_after_section_name) > 0:
ref_number = int(tokens_after_section_name[0])
if len(line.strip()) > 0:
lines_references[ref_number].append(line)
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete this code in a later version
elif (len(tokens) > 3) and (section_name == '#hbond_definition'):
hbondID = tokens[1]
if tokens[2] == 'distance':
hbond2distance[hbondID] = tokens[3]
if tokens[2] == 'angle':
hbond2angle[hbondID] = tokens[3]
if tokens[2] == 'donors':
hbond2donors[hbondID] = map(EncodeAName, tokens[2:])
if tokens[2] == 'acceptors':
hbond2acceptors[hbondID] = map(EncodeAname(),tokens[2:])
"""
if display_OOP_OOP_warning:
lines_warnings.append('###########################################################\n'
'# WARNING\n'
'# ALL \"out-of-plane_out-of_plane\" INTERACTIONS ARE IGNORED.\n'
'# CHECK THAT THESE TERMS ARE NEGLEGIBLY SMALL.\n'
'# \"out-of-plane_out-of_plane\" interactions are not yet supported in LAMMPS\n'
'# (...as of 2017-10-13) There is no way that moltemplate can produce\n'
'# LAMMPS compatible parameter files for these interactions.\n'
'###########################################################\n')
if display_torsion_torsion_1_warning:
lines_warnings.append('###########################################################\n'
'# WARNING\n'
'# ALL \"torsion_torsion_1\" INTERACTIONS ARE IGNORED.\n'
'# CHECK THAT THESE TERMS ARE NEGLEGIBLY SMALL.\n'
'# \"torsion_torsion_1\" interactions are not yet supported in LAMMPS\n'
'# (...as of 2017-10-13) There is no way that moltemplate can produce\n'
'# LAMMPS compatible parameter files for these interactions.\n'
'###########################################################\n')
sys.stderr.write(' done.\n'
'building lookup tables...')
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete them eventually
if len(hbond2params) > 0:
sys.stdout.write('\n\n write_once("In Settings") {\n')
if hbond_style == 'hbond/dreiding/lj':
for hbondID, angle in hbond2angle:
hbond2params[hbondID] = hbond2distance[hbondID]+' '+hbond2angle[hbondID] ##<--this is not correct
for hbondID, params in hbond2params:
for donor in hbond2donors[hbondID]:
for acceptor in hbond2acceptors[hbondID]:
for hydrogen in hbond2hydrogens[hbondID]:
sys.stdout.write('pair_coeff @atom:'+donor+' @atom:'+acceptor+' '+hbond_style+' @atom:'+hydrogen+' i '+params+'\n')
sys.stdout.write(' } # (DREIDING style H-bond parameters)\n\n\n')
"""
sys.stderr.write(" done.\n")
sys.stderr.write("Trying all combinations of atom types...")
##################### POST-PROCESSING ########################
for ang_name_orig in angle2params_or:
is_auto = (ang_name_orig.find('auto_') == 0)
atom_names = ExtractANames(ang_name_orig)
num_angles = 0
atom_combos = [set([]), set([]), set([])]
# We must consider every possible combination of atom types
# which satisfy BOTH angle_equivalences and bond_equivalences.
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @angle interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_angle2atom = [auto_angleend2atom,
auto_anglecenter2atom,
auto_angleend2atom]
for i in range(0, 3):
angle_atom_name = atom_names[i]
sys.stderr.write('DEBUG: angle_atom_name = '+angle_atom_name+'\n')
if not is_auto:
assert(angle_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_angle2atom['+angle_atom_name+'] = '+
str(equiv_angle2atom[angle_atom_name])+'\n')
for a in equiv_angle2atom[angle_atom_name]:
atom_combos[i].add(a)
else:
#assert((angle_atom_name[-1] == '_') or (angle_atom_name[0] == '*')) (<--some exceptions. don't assert this)
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_angle2atom['+str(i)+']['+angle_atom_name+'] = \n'
' '+str(equiv_angle2atom[i][angle_atom_name])+'\n')
for a in auto_angle2atom[i][angle_atom_name]:
atom_combos[i].add(a)
found_at_least_one = False
#for a1 in atom_combos[0]:
for a1 in sorted(list(atom_combos[0])):
#for a2 in atom_combos[1]:
for a2 in sorted(list(atom_combos[1])):
#sys.stderr.write('atom2auto_bond = '+str(atom2auto_bond)+'\n')
bond_data1 = LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data1 == None: # Save time by continuing only if a
continue # bond was defined between a1 and a2
#for a3 in atom_combos[2]:
for a3 in sorted(list(atom_combos[2])):
bond_data2 = LookupBondLength(a2, a3,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data2 == None:
continue
#bond lengths:
r0s = [0.0, 0.0]
#equivalent atom names used to lookup the bonds:
batoms = [['', ''], ['', '']]
#were "auto" equivalences needed to lookup the bond length?
b_is_auto = [False, False]
r0s[0], batoms[0], b_is_auto[0] = bond_data1
r0s[1], batoms[1], b_is_auto[1] = bond_data2
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
batoms.reverse()
batoms[0].reverse()
batoms[1].reverse()
b_is_auto.reverse()
ang_name_full = (ang_name_orig + ',' +
EncodeInteractionName(batoms[0], b_is_auto[0]) + ',' +
EncodeInteractionName(batoms[1], b_is_auto[1]))
#sys.stderr.write('DEBUG: (a1,a2,a3) = '+str((a1,a2,a3))+', '
# ' (b11,b12,b21,b22) = '+str(batoms)+'\n')
angle2ref_or[ang_name_full] = reference
angle2style_or[ang_name_full] = 'class2'
theta0_K_params = angle2params_or[ang_name_orig]
angle2params[ang_name_full] = ' '.join(theta0_K_params)
if ang_name_orig in angle2class2_bb_or:
Kbb = angle2class2_bb_or[ang_name_orig]
assert(ang_name_orig in angle2ver_bb_or)
assert(ang_name_orig in angle2ref_bb_or)
else: #(use default values)
Kbb = '0.0'
angle2class2_bb_or[ang_name_orig] = Kbb
angle2ver_bb_or[ang_name_orig] = angle2ver_or[ang_name_orig]
angle2ref_bb_or[ang_name_orig] = angle2ref_or[ang_name_orig]
angle2class2_bb[ang_name_full] = (Kbb+' '+r0s[0]+' '+r0s[1])
angle2priority_bb = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[1],
float(angle2ver_bb_or[ang_name_orig]))
angle2ver_bb[ang_name_full] = angle2ver_bb_or[ang_name_orig]
angle2ref_bb[ang_name_full] = angle2ref_bb_or[ang_name_orig]
if ang_name_orig in angle2class2_ba_or:
Kba = angle2class2_ba_or[ang_name_orig]
assert(ang_name_orig in angle2ver_ba_or)
assert(ang_name_orig in angle2ref_ba_or)
else: #(use default values)
Kba = ['0.0', '0.0']
angle2class2_ba_or[ang_name_orig] = Kba
angle2ver_ba_or[ang_name_orig] = angle2ver_or[ang_name_orig]
angle2ref_ba_or[ang_name_orig] = angle2ref_or[ang_name_orig]
angle2class2_ba[ang_name_full] = (Kba[0]+' '+Kba[1]+' '+r0s[0]+' '+r0s[1])
angle2sym_ba = (Kba[0] == Kba[1])
angle2priority_ba = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[1],
angle2ver_ba_or[ang_name_orig])
angle2ver_ba[ang_name_full] = angle2ver_ba_or[ang_name_orig]
angle2ref_ba[ang_name_full] = angle2ref_ba_or[ang_name_orig]
version = max((angle2ver_or[ang_name_orig],
angle2ver_bb_or[ang_name_orig],
angle2ver_ba_or[ang_name_orig]))
angle2ver[ang_name_full] = version
angle2ref[ang_name_full] = angle2ref_or[ang_name_orig]
angle2style[ang_name_full] = 'class2'
angle2priority[ang_name_full] = \
(is_auto,
angle_is_secondary_or[ang_name_orig],
angle2priority_or[ang_name_orig],
angle2priority_bb,
angle2priority_ba)
if num_angles < len(angle2params):
sys.stderr.write('DEBUG: '+section_name[1:]+' r0 ('+ang_name_full+') = ('+r0s[0]+', '+r0s[1]+')\n')
sys.stderr.write('DEBUG: len(angle2class2_bb) = '+str(len(angle2class2_bb))+'\n')
sys.stderr.write('DEBUG: '+section_name[1:]+' r0 ('+ang_name_full+') = ('+r0s[0]+', '+r0s[1]+')\n')
#sys.stderr.write('DEBUG: len(angle2class2_ba) = '+str(len(angle2class2_ba))+'\n')
num_angles = len(angle2params)
if ((not angle2sym_ba)
and
(atom_names[0] == atom_names[2])):
raise InputError('Error: Unsupported angle interaction: \"@angle:'+str(ang_name_orig)+'\"\n'
' This interaction has symmetric atom names:\n'
', '.join(atom_names)+'\n'
' and yet it lacks symmetry in the corresponding force field parameters.\n'
' (If this is not a mistake in the .frc file, then explain\n'
' why to andrew so he can fix this.)\n')
found_at_least_one = True
if not found_at_least_one:
lines_warnings.append('# WARNING: Undefined bond length (r0) in angle: ' +
' '.join(atom_names)+'\n')
# Then we were unable to define cross terms for this interaction
# because at least one of the bond lengths could not be determined.
# This usually occurs because most of the .FRC files which are
# in circulation are incomplete. We have to handle this gracefully.
ang_name_full = (ang_name_orig + ',X,X,X,X,X,X')
version = angle2ver_or[ang_name_orig]
reference = angle2ref_or[ang_name_orig]
angle2ref[ang_name_full] = reference
angle2ver[ang_name_full] = version
angle2style[ang_name_full] = 'class2'
angle2params[ang_name_full] = ' '.join(angle2params_or[ang_name_orig])
# substitute zeros for all the cross term interactions
angle2priority[ang_name_full] = angle2priority_or[ang_name_orig]
angle2class2_bb[ang_name_full] = '0.0 1.0 1.0'
angle2ref_bb[ang_name_full] = reference
angle2ver_bb[ang_name_full] = version
angle2class2_ba[ang_name_full] = '0.0 0.0 1.0 1.0'
angle2ref_ba[ang_name_full] = reference
angle2ver_ba[ang_name_full] = version
#sys.stderr.write('bond_names = ' + str(bond_names) + '\n')
############ POST-PROCESSING DIHEDRALS ###########
for dih_name_orig in dihedral2params_or:
#assert(dih_name_orig in dihedral2class2_mbt_or)
#assert(dih_name_orig in dihedral2class2_ebt_or)
#assert(dih_name_orig in dihedral2class2_bb13_or)
#assert(dih_name_orig in dihedral2class2_at_or)
#assert(dih_name_orig in dihedral2class2_aat_or)
is_auto = (dih_name_orig.find('auto_') == 0)
atom_names = ExtractANames(dih_name_orig)
num_dihedrals = 0
atom_combos = [set([]), set([]), set([]), set([])]
# We must consider every possible combination of atom types
# which satisfy all three:
# dihedral_equivalences
# bond_equivalences
# angle_equivalences
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @dihedral interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_dihedral2atom = [auto_dihedralend2atom,
auto_dihedralcenter2atom,
auto_dihedralcenter2atom,
auto_dihedralend2atom]
for i in range(0, 4):
dihedral_atom_name = atom_names[i]
sys.stderr.write('DEBUG: dihedral_atom_name = '+dihedral_atom_name+'\n')
if not is_auto:
assert(dihedral_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_dihedral2atom['+dihedral_atom_name+'] = '+
str(equiv_dihedral2atom[dihedral_atom_name])+'\n')
for a in equiv_dihedral2atom[dihedral_atom_name]:
atom_combos[i].add(a)
else:
assert((dihedral_atom_name[-1] == '_') or (ange_atom_name[0] == '*'))
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_dihedral2atom['+str(i)+']['+dihedral_atom_name+'] = \n'
' '+str(equiv_dihedral2atom[i][dihedral_atom_name])+'\n')
for a in auto_dihedral2atom[i][dihedral_atom_name]:
atom_combos[i].add(a)
found_at_least_one = False
#for a1 in atom_combos[0]:
for a1 in sorted(list(atom_combos[0])):
#for a2 in atom_combos[1]:
for a2 in sorted(list(atom_combos[1])):
#sys.stderr.write('atom2auto_bond = '+str(atom2auto_bond)+'\n')
bond_data12 = LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data12 == None:
# Save time by only continuing if a bond was
# found between a1 and a2
continue
#for a3 in atom_combos[2]:
for a3 in sorted(list(atom_combos[2])):
bond_data23 = LookupBondLength(a2, a3,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data23 == None:
# Save time by only continuing if a bond was
# found between a2 and a3
continue
angle_data123 = LookupBondAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_anglecenter],
angle2theta0_auto_or)
if angle_data123 == None:
# Save time by only continuing if an angle was
# found between a1, a2, a3
continue
#for a4 in atom_combos[3]:
for a4 in sorted(list(atom_combos[3])):
bond_data34 = LookupBondLength(a3, a4,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data34 == None:
# Save time by only continuing if a bond was
# found between a3 and a4
continue
#rest bond lengths:
r0s = [0.0, 0.0, 0,0]
#equivalent atom names used to lookup the bonds:
batoms = [['', ''], ['', ''], ['','']]
#are these bond interactions "auto" interactions?
#were "auto" equivalences needed to lookup the bond length?
b_is_auto = [False, False, False]
r0s[0], batoms[0], b_is_auto[0] = bond_data12
r0s[1], batoms[1], b_is_auto[1] = bond_data23
r0s[2], batoms[2], b_is_auto[2] = bond_data34
angle_data234 = LookupBondAngle(a2, a3, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_anglecenter],
angle2theta0_auto_or)
if angle_data234 == None:
# Save time by only continuing if an angle was
# found between a2, a3, a4
continue
#rest angles:
theta0s = [0.0, 0.0]
#equivalent atom names used to lookup angles:
aatoms = [['', '',''], ['', '','']]
#were "auto" equivalences needed to lookup the bond-angle?
a_is_auto = [False, False]
theta0s[0], aatoms[0], a_is_auto[0] = angle_data123
theta0s[1], aatoms[1], a_is_auto[1] = angle_data234
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
batoms.reverse()
batoms[0].reverse()
batoms[1].reverse()
batoms[2].reverse()
b_is_auto.reverse()
theta0s.reverse()
aatoms.reverse()
aatoms[0].reverse()
aatoms[1].reverse()
a_is_auto.reverse()
#if is_auto:
dih_name_full = (dih_name_orig + ',' +
EncodeInteractionName(batoms[0], b_is_auto[0]) + ',' +
EncodeInteractionName(batoms[1], b_is_auto[1]) + ',' +
EncodeInteractionName(batoms[2], b_is_auto[2]) + ',' +
EncodeInteractionName(aatoms[0], a_is_auto[0]) + ',' +
EncodeInteractionName(aatoms[1], a_is_auto[1]))
#else:
# assert(batoms[0][1] == batoms[1][0])
# assert(batoms[1][1] == batoms[2][0])
# assert(aatoms[0][1] == aatoms[1][0])
# assert(aatoms[0][2] == aatoms[1][1])
# dih_name_full = dih_name_orig + ',' + \
# EncodeInteractionName([batoms[0][0], batoms[0][1]
# batoms[2][0], batoms[2][1],
# aatoms[0][0], aatoms[0][1],
# aatoms[0][2], aatoms[1][0]],
# False)
########### Fourier terms ###########
#if dih_name_orig in dihedral2param_or:
V_phi0_params = dihedral2params_or[dih_name_orig]
dihedral2params[dih_name_full] = ' '.join(V_phi0_params)
#else:
# dihedral2params[dih_name_full] = '0.0 0.0 0.0 0.0 0.0 0.0'
########### "mbt", "ebt", and "aat" terms ###########
# "mbt" terms:
if dih_name_orig in dihedral2class2_mbt_or:
Fmbt = dihedral2class2_mbt_or[dih_name_orig]
else:
Fmbt = ['0.0', '0.0', '0.0']
dihedral2class2_mbt_or[dih_name_orig] = Fmbt
dihedral2ver_mbt_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_mbt_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2class2_mbt[dih_name_full] = \
(Fmbt[0]+' '+Fmbt[1]+' '+Fmbt[2]+' '+r0s[1])
dihedral2priority_mbt = \
DetermineNumericPriority(is_auto,
batoms[1],
float(dihedral2ver_mbt_or[dih_name_orig]))
dihedral2ver_mbt[dih_name_full] = dihedral2ver_mbt_or[dih_name_orig]
dihedral2ref_mbt[dih_name_full] = dihedral2ref_mbt_or[dih_name_orig]
# "ebt" terms:
if dih_name_orig in dihedral2class2_ebt_or:
Febt = dihedral2class2_ebt_or[dih_name_orig]
dihedral2sym_ebt = ((Febt[0][0] == Febt[1][0]) and
(Febt[0][1] == Febt[1][1]) and
(Febt[0][2] == Febt[1][2]))
#and (r0s[0] == r0s[2]))
else:
Febt = [['0.0','0.0','0.0'], ['0.0','0.0','0.0']]
dihedral2class2_ebt_or[dih_name_orig] = Febt
dihedral2ver_ebt_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_ebt_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_ebt = True
dihedral2class2_ebt[dih_name_full]= (Febt[0][0] + ' ' +
Febt[0][1] + ' ' +
Febt[0][2] + ' ' +
Febt[1][0] + ' ' +
Febt[1][1] + ' ' +
Febt[1][2] + ' ' +
r0s[0]+' '+r0s[2])
dihedral2priority_ebt = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[2],
float(dihedral2ver_ebt_or[dih_name_orig]))
dihedral2ver_ebt[dih_name_full] = dihedral2ver_ebt_or[dih_name_orig]
dihedral2ref_ebt[dih_name_full] = dihedral2ref_ebt_or[dih_name_orig]
#(Note: large atom_priority number <==> low priority
# Only one of the atom priority numbers should be > 0)
# "bb13" terms:
if dih_name_orig in dihedral2class2_bb13_or:
Kbb13 = dihedral2class2_bb13_or[dih_name_orig]
#dihedral2sym_bb13 = (r0s[0] == r0s[2])
dihedral2sym_bb13 = True
else:
Kbb13 = '0.0'
dihedral2class2_bb13_or[dih_name_orig] = Kbb13
dihedral2ver_bb13_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_bb13_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_bb13 = True
dihedral2class2_bb13[dih_name_full] = (Kbb13+' '+r0s[0]+' '+r0s[2])
dihedral2priority_bb13 = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[2],
float(dihedral2ver_bb13_or[dih_name_orig]))
dihedral2ver_bb13[dih_name_full] = dihedral2ver_bb13_or[dih_name_orig]
dihedral2ref_bb13[dih_name_full] = dihedral2ref_bb13_or[dih_name_orig]
########### "at" and "aat" terms ###########
# "at" terms:
if dih_name_orig in dihedral2class2_at_or:
Fat = dihedral2class2_at_or[dih_name_orig]
dihedral2sym_at = ((Fat[0][0] == Fat[1][0]) and
(Fat[0][1] == Fat[1][1]) and
(Fat[0][2] == Fat[1][2]))
#and (theta0[0] == theta0[1]))
else:
Fat = [['0.0','0.0','0.0'], ['0.0','0.0','0.0']]
dihedral2class2_at_or[dih_name_orig] = Fat
dihedral2ver_at_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_at_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_at = True
dihedral2class2_at[dih_name_full] = \
(Fat[0][0] + ' ' +
Fat[0][1] + ' ' +
Fat[0][2] + ' ' +
Fat[1][0] + ' ' +
Fat[1][1] + ' ' +
Fat[1][2] + ' ' +
theta0s[0] + ' ' +
theta0s[1])
dihedral2priority_at = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1],
float(dihedral2ver_at_or[dih_name_orig]))
dihedral2ver_at[dih_name_full] = dihedral2ver_at_or[dih_name_orig]
dihedral2ref_at[dih_name_full] = dihedral2ref_at_or[dih_name_orig]
# "aat" terms:
if dih_name_orig in dihedral2class2_aat_or:
Kaat = dihedral2class2_aat_or[dih_name_orig]
#dihedral2sym_aat = (theta0[0] == theta0[1])
dihedral2sym_aat = True
else:
Kaat = '0.0'
dihedral2class2_aat_or[dih_name_orig] = Kaat
dihedral2ver_aat_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_aat_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_aat = True
dihedral2class2_aat[dih_name_full] = \
(Kaat+' '+theta0s[0]+' '+theta0s[1])
dihedral2priority_aat = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1],
float(dihedral2ver_aat_or[dih_name_orig]))
dihedral2ver_aat[dih_name_full] = dihedral2ver_aat_or[dih_name_orig]
dihedral2ref_aat[dih_name_full] = dihedral2ref_aat_or[dih_name_orig]
if len(dihedral2params) > num_dihedrals:
sys.stderr.write('DEBUG: dihedral['+dih_name_full+']:\n'
'(r12,r23,r34) = ('
+r0s[0]+','+r0s[1]+','+r0s[2]+') \n'
'(theta123,theta234) = ('
+theta0s[0]+','+theta0s[1]+') \n')
sys.stderr.write('DEBUG: num_dihedrals = len(dihedral2params) = '
+str(len(dihedral2params))+'\n')
version = max((dihedral2ver_or[dih_name_orig],
dihedral2ver_mbt_or[dih_name_orig],
dihedral2ver_ebt_or[dih_name_orig],
dihedral2ver_bb13_or[dih_name_orig],
dihedral2ver_at_or[dih_name_orig],
dihedral2ver_aat_or[dih_name_orig]))
dihedral2style[dih_name_full] = 'class2'
dihedral2ver[dih_name_full] = version
dihedral2ref[dih_name_full] = dihedral2ref_or[dih_name_orig]
dihedral2priority[dih_name_full] = \
(is_auto,
dihedral_is_secondary_or[dih_name_orig],
dihedral2priority_or[dih_name_orig],
dihedral2priority_mbt,
dihedral2priority_ebt,
dihedral2priority_bb13,
dihedral2priority_at,
dihedral2priority_aat)
num_dihedrals = len(dihedral2params)
if ((not (dihedral2sym_ebt and
#dihedral2sym_mbt and
# (note: symmetry doesn't make sense for mbt)
dihedral2sym_at and
dihedral2sym_aat and
dihedral2sym_bb13))
and
((atom_names[0] == atom_names[3]) and
(atom_names[1] == atom_names[2]))):
raise InputError('Error: Unsupported dihedral interaction: \"@dihedral:'+str(dih_name_orig)+'\"\n'
' This interaction has symmetric atom names:\n'+
', '.join(atom_names)+'\n'+
' and yet it lacks symmetry in the corresponding force field parameters.\n'+
' (If this is not a mistake in the .frc file, then explain\n'+
' why to andrew so he can fix this.)\n')
found_at_least_one = True
#sys.stderr.write('DEBUG: number of interactions = '+str(len(dihedral2class2_bb))+'\n')
if not found_at_least_one:
lines_warnings.append('# WARNING: Undefined bond length (r0) or rest angle (theta0) in dihedral: ' +
#'# the dihedral interaction between: ' +
' '.join(atom_names)+'\n')
# Then we were unable to define cross terms for this interaction because
# at least one of the bond lengths or bond angles could not be determined.
# This usually occurs because most of the .FRC files which are
# in circulation are incomplete. We have to handle this gracefully.
dih_name_full = (dih_name_orig + ',X,X,X,X,X,X,X,X,X,X,X,X')
reference = dihedral2ref_or[dih_name_orig]
version = dihedral2ver_or[dih_name_orig]
dihedral2ref[dih_name_full] = reference
dihedral2ver[dih_name_full] = version
dihedral2style[dih_name_full] = 'class2'
dihedral2priority[dih_name_full] = dihedral2priority_or[dih_name_orig]
dihedral2params[dih_name_full] = ' '.join(dihedral2params_or[dih_name_orig])
# substitute zeros for all the cross term interactions
dihedral2class2_mbt[dih_name_full] = '0.0 0.0 0.0 1.0'
dihedral2ref_mbt[dih_name_full] = reference
dihedral2ver_mbt[dih_name_full] = version
dihedral2class2_ebt[dih_name_full] = '0.0 0.0 0.0 0.0 0.0 0.0 1.0 1.0'
dihedral2ref_ebt[dih_name_full] = reference
dihedral2ver_ebt[dih_name_full] = version
dihedral2class2_bb13[dih_name_full] = '0.0 1.0 1.0'
dihedral2ref_bb13[dih_name_full] = reference
dihedral2ver_bb13[dih_name_full] = version
dihedral2class2_at[dih_name_full] = '0.0 0.0 0.0 0.0 0.0 0.0 120.0 120.0'
dihedral2ref_at[dih_name_full] = reference
dihedral2ver_at[dih_name_full] = version
dihedral2class2_aat[dih_name_full] = '0.0 120.0 120.0'
dihedral2ref_aat[dih_name_full] = reference
dihedral2ver_aat[dih_name_full] = version
############ POST-PROCESSING IMPROPERS ###########
imsym = 'cenJsortIKL'
for imp_name_orig in improper2cross[imsym]:
if improper2style_or[imsym][imp_name_orig] != 'class2':
continue
assert(imp_name_orig in improper2params_or[imsym])
assert(imp_name_orig in improper2class2_aa_or[imsym])
is_auto = (imp_name_orig.find('auto') == 0)
atom_names = ExtractANames(imp_name_orig)
num_impropers = 0
atom_combos = [set([]), set([]), set([]), set([])]
# We must consider every possible combination of atom types
# which satisfy both:
# improper_equivalences
# angle_equivalences
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @improper interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_improper2atom = [auto_improperend2atom,
auto_impropercenter2atom,
auto_improperend2atom,
auto_improperend2atom]
for i in range(0, 4):
improper_atom_name = atom_names[i]
sys.stderr.write('DEBUG: improper_atom_name = '+improper_atom_name+'\n')
if not is_auto:
assert(improper_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_improper2atom['+improper_atom_name+'] = '+
str(equiv_improper2atom[improper_atom_name])+'\n')
for a in equiv_improper2atom[improper_atom_name]:
atom_combos[i].add(a)
else:
assert((improper_atom_name[-1] == '_') or (improper_atom_name[0] == 'X'))
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_improper2atom['+str(i)+']['+improper_atom_name+'] = \n'
' '+str(auto_improper2atom[i][improper_atom_name])+'\n')
for a in auto_improper2atom[i][improper_atom_name]:
atom_combos[i].add(a)
is_auto = IsAutoInteraction(imp_name_orig) # is this an "auto" interaction?
atom_names = ExtractANames(imp_name_orig) # names of all 4 atoms
lnames = [atom_names[0], atom_names[2], atom_names[3]] # names of "leaf" atoms
#M1 = improper2cross[imp_name_orig][ 2 ]
#M2 = improper2cross[imp_name_orig][ 0 ]
#M3 = improper2cross[imp_name_orig][ 3 ]
#try:
M1 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[0],
atom_names[1],
atom_names[2],
atom_names[3]])]
#except KeyError:
# M1 = '0.0'
#try:
M2 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[2],
atom_names[1],
atom_names[0],
atom_names[3]])]
#except KeyError:
# M2 = '0.0'
#try:
M3 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[0],
atom_names[1],
atom_names[3],
atom_names[2]])]
#except KeyError:
# M3 = '0.0'
# ###### Symmetry: ######
# Unfortunately, it's time to wade into the messy issue of symmetry.
# We desire a way to detect whether an improper interaction
# between 4 atoms is invariant with respect to atom reordering
# of the 3 peripheral "leaf" atoms which surround the central atom.
# In principle, any rearrangement of atoms would require a separate
# class2 improper interaction. However, in some cases, when the
# parameters for these rearrangements are symmetric, we can detect
# that and warn moltemplate that it is not necessary to generate new
# improper interactions for every conceivable permutation of these
# atoms. Figuring out when it is safe to do that is a headache.
# (...but it's necessary. Otherwise each junction in the molecule
# will generate 3*2*1=6 improper interactions which are usually
# redundant. This will slow down the simulation significantly
# and may make it difficult to compare the resulting LAMMPS
# input files with those generated by other tools like msi2lmp.)
#
# To make this easier, I store the parameters in arrays which
# are arranged in a more symmetric way
M = [0.0, 0.0, 0.0]
theta0 = [0.0, 0.0, 0.0]
# noti3[i] = the sorted tuple of integers from the
# set {0,1,2} which remain after deleting i
noti3 = ((1,2), (0,2), (0,1))
i_neigh = [ ([0,2,3][ noti3[i][0] ], # neighbor leaves of ith leaf
[0,2,3][ noti3[i][1] ]) for i in range(0,3)]
for i in range(0, 3):
# You will notice the pattern "[0,2,3][i]" appears often in the
# code below because for class 2 force-fields, the second atom
# (with index 1) is the central atom ("hub" atom), and the three
# that surround it ("leaf" atoms) have indices 0,2,3. I want
# to skip over the central atoms and loop over the leaf atoms
imTermID = ImCrossTermID([atom_names[ i_neigh[i][0] ],
atom_names[ 1 ],
atom_names[ [0,2,3][i] ],
atom_names[ i_neigh[i][1] ]])
M[i] = float(improper2cross[imp_name_orig][imTermID])
##i_leaf = [0,2,3][i]
##M[i] = float(improper2cross[imp_name_orig][ i_leaf ])
#angle_name_l = SortByEnds([atom_names[i_neigh[i][0]],
# atom_names[ 1 ],
# atom_names[i_neigh[i][1]]])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta0[i] = float(angle2theta0_or[angle_name])
for i in range(0, 3):
if (M[ noti3[i][0] ] == M[ noti3[i][1] ]):
#and (theta0[ noti3[i][0] ] == theta0[ noti3[i][1] ])):
# Then it is safe to swap the order of these two atoms in
# the list of atoms when looking up force-field parameters
improper2sym[imp_name_orig].add(i_neigh[i][0])
improper2sym[imp_name_orig].add(i_neigh[i][1])
# Later, I can use these to decide whether or not I need to
# change the default script with symmetry rules. (I'm hoping
# that "cenJsortIKL.py" should work in most cases.)
# CONTINUEHERE: FIGURE OUT WHETHER TO WORRY ABOUT improper2sym
else:
if atom_names[i_neigh[i][0]] == atom_names[i_neigh[i][1]]:
raise InputError('Error: Unsupported improper interaction: \"@improper:'+str(imp_name_orig)+'\"\n'
' This interaction has matching atom aliases:\n'
' (@atom:'+str(atom_names[i_neigh[i][0]])+
', @atom:'+str(atom_names[i_neigh[i][1]])+')\n'
' and yet it lacks symmetry in the corresponding force field parameters.\n'
' (If this is not a mistake in the .frc file, then ask andrew to\n'
' fix this limitation.)\n')
found_at_least_one = False
for a1 in sorted(list(atom_combos[0])):
for a2 in sorted(list(atom_combos[1])):
sys.stderr.write('DEBUG: improper '+imp_name_orig+' substitutions: '+a1+','+a2+',...\n')
for a3 in sorted(list(atom_combos[2])):
#(Note: sorting "atom_combos" makes it faster and easier
# to follow the loop's progress. This nested loop can be very slow.)
theta0s = ['0.0', '0.0', '0.0']
aatoms = [['', '',''], ['', '',''], ['', '', '']]
#were "auto" equivalences needed to lookup the bond-angle?
a_is_auto = [False, False, False]
# Collect information from the different terms in a class2 improper:
# http://lammps.sandia.gov/doc/improper_class2.html
# Loop over the neighbors of the central atom in each improper
# interaction and collect all the Mi and Ti parameters. Collect
# them in the order they appear in the formula for the Eaa
# term as it appears in the documentation for improper_style class2:
#
# http://lammps.sandia.gov/doc/improper_class2.html
#
# Eaa = M1 (Tijk - T0)(Tkjl - T2) + #common leaf node: k (index 2)
# M2 (Tijk - T0)(Tijl - T1) + #common leaf node: i (index 0)
# M3 (Tijl - T1)(Tkjl - T2) #common leaf node: l (index 3)
# (I'm trying to match the variable names used in this web page
# I wish the author had chosen the M1,M2,M3, T1,T2,T3 order in more
# symmetric way, or at least in a way that makes more sense to me.)
#angle_name_l = SortByEnds([atom_names[0], atom_names[1], atom_names[2]])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta01 = angle2theta0_or[angle_name]
angle_data = LookupBondAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a1, a2, a3
continue
theta0s[0], aatoms[0], a_is_auto[0] = angle_data
for a4 in sorted(list(atom_combos[3])):
theta0s[1] = theta0s[2] = '0.0'
aatoms[1] = aatoms[2] = ['', '','']
#angle_name_l = SortByEnds(aatoms[0])
#angle_name = EncodeInteractionName(angle_name_l[0], is_auto)
#theta02 = angle2theta0_or[angle_name]
angle_data = LookupBondAngle(a1, a2, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a1, a2, a4
continue
theta0s[1], aatoms[1], a_is_auto[1] = angle_data
#angle_name_l = SortByEnds(aatoms[1])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta03 = angle2theta0_or[angle_name]
angle_data = LookupBondAngle(a3, a2, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a3, a2, a4
continue
theta0s[2], aatoms[2], a_is_auto[2] = angle_data
# The following asserts checks that the two theta0s
# are defined whenever the corresponding M is defined.
# (Note: The order is LAMMPS-implementation specific.
# See http://lammps.sandia.gov/doc/improper_class2.html)
assert((float(theta0s[0]) != 0) or (float(M1) == 0))
assert((float(theta0s[2]) != 0) or (float(M1) == 0))
assert((float(theta0s[0]) != 0) or (float(M2) == 0))
assert((float(theta0s[1]) != 0) or (float(M2) == 0))
assert((float(theta0s[1]) != 0) or (float(M3) == 0))
assert((float(theta0s[2]) != 0) or (float(M3) == 0))
#angle_name_l = SortByEnds(aatoms[2])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
imp_name_full = (imp_name_orig + ',' +
EncodeInteractionName(aatoms[0], a_is_auto[0]) + ',' +
EncodeInteractionName(aatoms[1], a_is_auto[1]) + ',' +
EncodeInteractionName(aatoms[2], a_is_auto[2]))
#if imp_name_orig in improper2params_or[imsym][imp_name_orig]:
improper2params[imsym][imp_name_full] = ' '.join(improper2params_or[imsym][imp_name_orig])
#else:
# improper2params[imsym][imp_name_full] = '0.0 0.0'
#if imp_name_orig in improper2cross:
improper2class2_aa[imsym][imp_name_full] = \
(str(M1)+' '+str(M2)+' '+str(M3)+' '+
str(theta0s[0])+' '+str(theta0s[1])+' '+str(theta0s[2]))
#else:
# improper2class2_aa[imsym][imp_name_full] = '0.0 0.0 0.0 0.0 0.0 0.0'
# improper2ver_aa_or[imsym][imp_name_orig] = improper2ver_or[imsym][imp_name_orig]
# improper2ref_aa_or[imsym][imp_name_orig] = improper2ref_or[imsym][imp_name_orig]
improper2priority_aa = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1] + aatoms[2],
float(improper2ver_aa_or[imsym][imp_name_orig]))
improper2ver_aa[imsym][imp_name_full] = improper2ver_aa_or[imsym][imp_name_orig]
improper2ref_aa[imsym][imp_name_full] = improper2ref_aa_or[imsym][imp_name_orig]
version = max((improper2ver_or[imsym][imp_name_orig],
improper2ver_aa_or[imsym][imp_name_orig]))
improper2style[imsym][imp_name_full] = 'class2'
improper2ref[imsym][imp_name_full] = improper2ref_or[imsym][imp_name_orig]
improper2ver[imsym][imp_name_full] = version
improper2priority[imsym][imp_name_full] = \
(is_auto,
improper_is_secondary_or[imsym][imp_name_orig],
improper2priority_or[imsym][imp_name_orig],
improper2priority_aa)
if len(improper2params) > num_impropers:
sys.stderr.write('DEBUG: improper['+imp_name_full+']:\n'
'theta0 = ('
+theta0s[0]+','+theta0s[1]+','+theta0s[2]+')\n')
sys.stderr.write('DEBUG: num_impropers = len(improper2params) = '
+str(len(improper2params))+'\n')
num_impropers = len(improper2params)
found_at_least_one = True
if not found_at_least_one:
lines_warnings.append('# WARNING: Undefined rest angle (theta0) in improper: ' +
#'# the improper interaction between: ' +
' '.join(atom_names)+'\n')
# Then we were unable to define cross terms for this interaction because
# at least one of the equilibrium rest angles could not be determined.
# This usually occurs because most of the .FRC files which are
# in circulation are incomplete. We have to handle this gracefully.
imp_name_full = (imp_name_orig + ',X,X,X,X,X,X,X,X,X')
reference = improper2ref_or[imsym][imp_name_orig]
version = improper2ver_or[imsym][imp_name_orig]
improper2ref[imsym][imp_name_full] = reference
improper2ver[imsym][imp_name_full] = version
improper2params[imsym][imp_name_full] = ' '.join(improper2params_or[imp_name_orig])
CONTINUEHERE
improper2style[imp_name_full] = 'class2'
improper2priority[imp_name_full] = improper2priority_or[imp_name_orig]
# substitute zeros for the cross term interactions
improper2class2_aa[imp_name_full] = '0.0 0.0 0.0 120.0 120.0 120.0'
improper2ref_aa[imp_name_full] = reference
improper2ver_aa[imp_name_full] = version
sys.stderr.write("done\n")
sys.stderr.write("Converting to moltemplate format...\n")
##################### BEGIN WRITING FILE #####################
sys.stdout.write("# This file was generated automatically using:\n")
sys.stdout.write("# " + g_program_name + " " + " ".join(sys.argv[1:]) + "\n")
sys.stdout.write("\n\n")
sys.stdout.write(ffname + " {\n\n")
sys.stdout.write("\n"
" # AtomType Mass # \"Description\" (version, reference)\n\n")
sys.stdout.write(" write_once(\"Data Masses\") {\n")
for atype in atom2mass:
sys.stdout.write(" @atom:" + atype + " " + str(atom2mass[atype]))
sys.stdout.write(" # ")
if atype in atom2element:
sys.stdout.write(atom2element[atype] + ", ")
#sys.stdout.write(atom2descr[atype])
sys.stdout.write("\"" + atom2descr[atype] + "\"")
sys.stdout.write(" (")
if atype in atom2numbonds:
sys.stdout.write("nbonds="+str(atom2numbonds[atype])+", ")
sys.stdout.write("ver=" + atom2ver[atype] +
", ref=" + atom2ref[atype])
sys.stdout.write(")\n")
sys.stdout.write(" } #(end of atom masses)\n\n\n")
sys.stdout.write(" # ---------- EQUIVALENCE CATEGORIES for bonded interaction lookup ----------\n"
" # Each type of atom has a separate ID used for looking up bond parameters\n"
" # and a separate ID for looking up 3-body angle interaction parameters\n"
" # and a separate ID for looking up 4-body dihedral interaction parameters\n"
" # and a separate ID for looking up 4-body improper interaction parameters\n"
#" # (This is because there are several different types of sp3 carbon atoms\n"
#" # which have the same torsional properties when within an alkane molecule,\n"
#" # for example. If they share the same dihedral-ID, then this frees us\n"
#" # from being forced define separate dihedral interaction parameters\n"
#" # for all of them.)\n"
" # The complete @atom type name includes ALL of these ID numbers. There's\n"
" # no need to force the end-user to type the complete name of each atom.\n"
" # The \"replace\" command used below informs moltemplate that the short\n"
" # @atom names we have been using abovee are equivalent to the complete\n"
" # @atom names used below:\n\n")
for atype in atom2ffid:
#ffid = atype + "_ffid" + atom2ffid[atype]
sys.stdout.write(" replace{ @atom:" + atype +
" @atom:" + atom2ffid[atype] + " }\n")
sys.stdout.write("\n\n\n\n")
sys.stdout.write(" # --------------- Non-Bonded Interactions: ---------------------\n"
" # Syntax:\n"
" # pair_coeff AtomType1 AtomType2 pair_style_name parameters...\n\n")
sys.stdout.write(" write_once(\"In Settings\") {\n")
for atype in pair2params:
assert(atype in pair2style)
if IsAutoInteraction(bond_name):
assert(atype in atom2auto_pair)
if include_auto_equivalences:
sys.stdout.write(' pair_coeff @atom:*,ap' + atom2auto_pair[atype] +
',aq*,ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap' + atom2auto_pair[atype] +
',aq*,ab*,aae*,aac*,ade*,adc*,aie*,aic* ' +
pair2style[atype] + ' ' +
pair2params[atype] +
' # (ver=' + pair2ver[atype] +
', ref=' +pair2ref[atype] + ')\n')
else:
continue
else:
assert(atype in atom2equiv_pair)
sys.stdout.write(' pair_coeff ' +
'@atom:*,p' + atom2equiv_pair[atype] + ',b*,a*,d*,i* ' +
'@atom:*,p' + atom2equiv_pair[atype] + ',b*,a*,d*,i* ' +
pair2style[atype] + ' ' +
pair2params[atype] +
' # (ver=' + pair2ver[atype] +
', ref=' +pair2ref[atype] + ')\n')
sys.stdout.write(" } #(end of pair_coeffs)\n\n\n\n")
################# Print Charge By Bond Interactions ##################
charge_pair_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(charge_pair_priority.items())],
key=itemgetter(1),
reverse=True)]
if len(charge_pair_priority) > 0:
sys.stdout.write(" # ---------- Charge By Bond (a.k.a. \"bond equivalences\") ----------\n")
# Print rules for generating (2-body) "bond" interactions:
sys.stdout.write('\n\n\n'
' write_once("Data Charge By Bond") {\n')
for bond_name in charge_pair_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(bond_name)]
# Did the user ask us to include "auto" interactions?
if IsAutoInteraction(bond_name):
if include_auto_equivalences:
sys.stdout.write(' @atom:*,ap*,aq' + anames[0] +
',ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap*,aq' + anames[1] +
',ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' ' + bond2chargepair[bond_name] +
" # (ver=" + charge_pair_ver[bond_name] +
", ref=" + charge_pair_ref[bond_name] + ")\n")
else:
continue
else:
sys.stdout.write(' @atom:*,p*,b' + anames[0] + ',a*,d*,i* ' +
' @atom:*,p*,b' + anames[1] + ',a*,d*,i* ' +
' ' + bond2chargepair[bond_name] +
" # (ver=" + charge_pair_ver[bond_name] +
", ref=" + charge_pair_ref[bond_name] + ")\n")
sys.stdout.write(' } #(end of Charge by Bond (bond equivalences))\n\n'
'\n\n\n\n')
################# Print 2-body Bond Interactions ##################
bond_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(bond2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(bond2priority) > 0:
sys.stdout.write(" # --------------- Bond Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (2-body) "bond" interactions: --\n'
' # BondType AtomType1 AtomType2\n')
sys.stdout.write('\n'
' write_once("Data Bonds By Type')
if bond_symmetry_subgraph != '':
sys.stdout.write(' ('+bond_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for bond_name in bond_names_priority_high_to_low:
if not (bond2style[bond_name] in
bond_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(bond_name)]
# Did the user ask us to include "auto" interactions?
if IsAutoInteraction(bond_name):
if include_auto_equivalences:
sys.stdout.write(' @bond:' + bond_name + ' ' +
' @atom:*,ap*,aq*,ab' + anames[0] +
',aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab' + anames[1] +
',aae*,aac*,ade*,adc*,aie*,aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @bond:' + bond_name + ' ' +
' @atom:*,b' + anames[0] + ',a*,d*,i* ' +
' @atom:*,b' + anames[1] + ',a*,d*,i* ' +
'\n')
sys.stdout.write(' } # end of "Data Bonds By Type" section\n'
'\n')
# Print the force-field parameters for these bond interactions:
sys.stdout.write('\n\n'
' # ------------ Bond Parameters: ----------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' # '+bond_style2docs[bond_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # bond_coeff BondTypeName BondStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for bond_name in bond_names_priority_high_to_low:
if not (bond2style[bond_name] in
bond_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(bond_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' bond_coeff @bond:'+bond_name+' '+
bond2style[bond_name] + ' ' +
bond2params[bond_name] +
" # (ver=" + bond2ver[bond_name] +
", ref=" +bond2ref[bond_name] + ")\n")
sys.stdout.write(' } # end of bond_coeff commands\n'
'\n\n')
################# Print 3-body Angle Interactions ##################
ang_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(angle2priority.items())],
key=itemgetter(1),
reverse=True)]
ang_name_abbr = {} #optional abbreviated name for each interaction
ang_name_abbr_used = set([]) #make sure we don't reuse these abbreviated names
if len(angle2priority) > 0:
sys.stdout.write(" # --------------- Angle Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (3-body) "angle" interactions: --\n'
' # AngleType AtomType1 AtomType2 AtomType3 [BondType1 BondType2]\n')
sys.stdout.write('\n'
' write_once("Data Angles By Type')
if angle_symmetry_subgraph != '':
sys.stdout.write(' ('+angle_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for angle_name in ang_names_priority_high_to_low:
if not (angle2style[angle_name] in
angle_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(angle_name)]
angle_is_auto = IsAutoInteraction(angle_name)
if angle2style[angle_name] == 'class2':
anm = [a for a in map(DecodeAName, anames)]
bnames = [[a for a in map(DecodeAName, anames[3:5])],
[a for a in map(DecodeAName, anames[5:7])]]
bond_is_auto1 = IsAutoInteraction(anames[3])
bond_is_auto2 = IsAutoInteraction(anames[5])
if ((angle_is_auto or bond_is_auto1 or bond_is_auto2) and
(not include_auto_equivalences)):
continue
# Can we ignore "auto" interactions?
# (If so, life is much easier)
if not (angle_is_auto or bond_is_auto1 or bond_is_auto2):
if angle2style[angle_name] == 'class2':
assert(bnames[0][1] == bnames[1][0])
# Optional: Shorten the angle name since some of the atom's bond names are redundant:
ang_name_abbr[angle_name] = EncodeInteractionName(map(EncodeAName,
anm[0:3] +
#[anm[3],anm[4],anm[6]],
[bnames[0][0],bnames[0][1],bnames[1][1]]),
angle_is_auto)
sys.stdout.write(' @angle:' + ang_name_abbr[angle_name] + ' ' +
' @atom:*,p*,b'+bnames[0][0]+',a'+anames[0]+',d*,i* ' +
' @atom:*,p*,b'+bnames[0][1]+',a'+anames[1]+',d*,i* ' +
' @atom:*,p*,b'+bnames[1][1]+',a'+anames[2]+',d*,i*'
'\n')
else:
ang_name_abbr[angle_name] = angle_name
sys.stdout.write(' @angle:' + ang_name_abbr[angle_name] + ' ' +
' @atom:*,p*,b*,a'+anames[0]+',d*,i* ' +
' @atom:*,p*,b*,a'+anames[1]+',d*,i* ' +
' @atom:*,p*,b*,a'+anames[2]+',d*,i*'
'\n')
else:
# Consider "auto" interactions and "auto" atom equivalences
ang_name_abbr[angle_name] = angle_name #(full name)
sys.stdout.write(' @angle:' + ang_name_abbr[angle_name] + ' ')
if angle2style[angle_name] == 'class2':
bshared = 'b*' #(default. overidden below)
abshared = 'ab*' #(default. overidden below)
if angle_is_auto:
a1 = a2 = a3 = 'a*' #Then, dont use regular equivalences for these atoms.
aa1 = 'aae' + anames[0] + ',aac*' #Instead use the corresponding "auto" equivalence names
aa2 = 'aae*,aac*' + anames[1] #for these atoms. (There are different auto equivalence names depending
aa3 = 'aae' + anames[2] + ',aac*' #on if the atom appears in the center (c) or end(e) of the 3-body angle)
else:
a1 = 'a' + anames[0] #In this case, use use (regular) equivalence names
a2 = 'a' + anames[1] #for these atoms
a3 = 'a' + anames[2]
aa1 = aa2 = aa3 = 'aae*,aac*'
if not bond_is_auto1:
b11 = 'b' + bnames[0][0] #(bond atom equivalent name)
b12 = 'b' + bnames[0][1] #(bond atom equivalent name)
bshared = 'b' + bnames[0][1] #(bond atom equivalent name)
ab11 = ab12 = 'ab*'
else:
b11 = b12 = 'b*'
ab11 = 'ab' + bnames[0][0] #(auto bond atom name)
ab12 = 'ab' + bnames[0][1] #(auto bond atom name)
abshared = 'ab' + bnames[0][1] #(auto bond atom name)
# print atom 1 information:
sys.stdout.write(' @atom:*,p*,'+b11+','+a1+',d*,i*,' +
'ap*,aq*,'+ab11+','+aa1+
',ade*,adc*,aie*,aic*')
if not bond_is_auto2:
b21 = 'b' + bnames[1][0] #(bond atom equivalent name)
b22 = 'b' + bnames[1][1] #(bond atom equivalent name)
assert((bshared == 'b*') or (bshared == 'b' + bnames[1][0]))
bshared = 'b' + bnames[1][0]
ab21 = ab22 = 'ab*'
else:
b21 = b22 = 'b*'
ab21 = 'ab' + bnames[1][0] #(auto bond atom name)
ab22 = 'ab' + bnames[1][1] #(auto bond atom name)
assert((abshared == 'ab*') or (abshared == 'ab' + bnames[1][0]))
abshared = 'ab' + bnames[1][0]
# print atom 2 information:
sys.stdout.write(' @atom:*,p*,'+bshared+','+a2+',d*,i*,' +
'ap*,aq*,'+abshared+','+aa2+
',ade*,adc*,aie*,aic*')
# print atom 3 information:
sys.stdout.write(' @atom:*,p*,'+b22+','+a3+',d*,i*,' +
'ap*,aq*,'+ab22+','+aa3+
',ade*,adc*,aie*,aic*')
sys.stdout.write('\n')
else:
sys.stdout.write(' @angle:' + ang_name_abbr[angle_name] + ' ' +
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae'+anames[0]+'aac*,ade*,adc*,aie*,aic* '
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac'+anames[1]+',ade*,adc*,aie*,aic* '
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae'+anames[2]+'aac*,ade*,adc*,aie*,aic* '
'\n')
assert(ang_name_abbr[angle_name] not in ang_name_abbr_used)
ang_name_abbr_used.add(ang_name_abbr[angle_name])
sys.stdout.write(' } # end of "Data Angles By Type" section\n'
'\n')
# Print the force-field parameters for these angle interactions:
sys.stdout.write('\n\n'
' # ------- Angle Force Field Parameters: -------')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' # '+angle_style2docs[angle_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # angle_coeff AngleTypeName AngleStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for angle_name in ang_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(angle_name)]
if not (angle2style[angle_name] in
angle_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
#if (IsAutoInteraction(angle_name) and
# (not include_auto_equivalences)):
# continue
# the if statement above is covered by the following:
if angle_name not in ang_name_abbr:
continue
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr[angle_name]+' '+
angle2style[angle_name] + ' ' +
angle2params[angle_name] +
" # (ver=" + angle2ver[angle_name] +
", ref=" + angle2ref[angle_name] + ")\n")
if angle_name in angle2class2_bb:
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr[angle_name]+' '+
angle2style[angle_name] + ' bb ' +
angle2class2_bb[angle_name] +
" # (ver=" + angle2ver_bb[angle_name] +
", ref=" + angle2ref_bb[angle_name] + ")\n")
assert(angle_name in angle2class2_ba)
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr[angle_name]+' '+
angle2style[angle_name] + ' ba ' +
angle2class2_ba[angle_name] +
" # (ver=" + angle2ver_ba[angle_name] +
", ref=" + angle2ref_ba[angle_name] + ")\n")
sys.stdout.write(' } # end of angle_coeff commands\n'
'\n\n')
################# Print 4-body Dihedral Interactions ##################
dih_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(dihedral2priority.items())],
key=itemgetter(1),
reverse=True)]
dih_name_abbr = {} #optional abbreviated name for each interaction
dih_name_abbr_used = set([]) #make sure we don't reuse these abbreviated names
if len(dih_names_priority_high_to_low) > 0:
sys.stdout.write(' # --------------- Dihedral Interactions: ---------------------\n')
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (4-body) "dihedral" interactions: --\n'
' # DihedralType AtmType1 AtmType2 AtmType3 AtmType3 [BondType1 Bnd2 Bnd3]\n')
sys.stdout.write('\n\n'
' write_once("Data Dihedrals By Type')
if dihedral_symmetry_subgraph != '':
sys.stdout.write(' ('+dihedral_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for dihedral_name in dih_names_priority_high_to_low:
if not (dihedral2style[dihedral_name] in
dihedral_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(dihedral_name)]
dihedral_is_auto = IsAutoInteraction(dihedral_name)
if dihedral2style[dihedral_name] == 'class2':
anm = [a for a in map(DecodeAName, anames)]
bnames = [[a for a in map(DecodeAName, anames[4:6])],
[a for a in map(DecodeAName, anames[6:8])],
[a for a in map(DecodeAName, anames[8:10])]]
bond_is_auto1 = IsAutoInteraction(anames[4])
bond_is_auto2 = IsAutoInteraction(anames[6])
bond_is_auto3 = IsAutoInteraction(anames[8])
ang_names = [[a for a in map(DecodeAName, anames[10:13])],
[a for a in map(DecodeAName, anames[13:16])]]
angle_is_auto1 = IsAutoInteraction(anames[10])
angle_is_auto2 = IsAutoInteraction(anames[13])
if ((dihedral_is_auto or
angle_is_auto1 or angle_is_auto2 or
bond_is_auto1 or bond_is_auto2 or bond_is_auto3) and
(not include_auto_equivalences)):
continue
# Can we ignore "auto" interactions?
# (If so, life is much easier)
if not (dihedral_is_auto or
angle_is_auto1 or angle_is_auto2 or
bond_is_auto1 or bond_is_auto2 or bond_is_auto3):
if dihedral2style[dihedral_name] == 'class2':
assert(bnames[0][1] == bnames[1][0])
assert(bnames[1][1] == bnames[2][0])
assert(ang_names[0][1] == ang_names[1][0])
assert(ang_names[0][2] == ang_names[1][1])
# Optional: Shorten the dihedral name since some of the atom's bond names are redundant:
dih_name_abbr[dihedral_name] = EncodeInteractionName(map(EncodeAName,
anm[0:4] +
#[bnames[0][0], bnames[0][1],
# bnames[1][1], bnames[2][1]]
[anm[4],anm[5],anm[7],anm[9]]+
#[ang_names[0][0],
# ang_names[0][1],
# ang_names[0][2],
# ang_names[1][2]]
[anm[10],anm[11],anm[12],anm[15]]),
is_auto)
sys.stdout.write(' @dihedral:' + dih_name_abbr[dihedral_name] + ' ' +
' @atom:*,p*,b'+bnames[0][0]+',a'+ang_names[0][0]+',d'+anames[0]+',i* ' +
' @atom:*,p*,b'+bnames[0][1]+',a'+ang_names[0][1]+',d'+anames[1]+',i* ' +
' @atom:*,p*,b'+bnames[1][1]+',a'+ang_names[0][2]+',d'+anames[2]+',i* '
' @atom:*,p*,b'+bnames[2][1]+',a'+ang_names[1][2]+',d'+anames[3]+',i*'
'\n')
else:
dih_name_abbr[dihedral_name] = dihedral_name
sys.stdout.write(' @dihedral:' + dih_name_abbr[dihedral_name] + ' ' +
' @atom:*,p*,b*,a*,d'+anames[0]+',i* ' +
' @atom:*,p*,b*,a*,d'+anames[1]+',i* ' +
' @atom:*,p*,b*,a*,d'+anames[2]+',i* '
' @atom:*,p*,b*,a*,d'+anames[3]+',i*' +
'\n')
else:
# Consider "auto" interactions and "auto" atom equivalences
dih_name_abbr[dihedral_name] = dihedral_name #(full name)
sys.stdout.write(' @dihedral:' + dih_name_abbr[dihedral_name] + ' ')
if dihedral2style[dihedral_name] == 'class2':
# equivalent names of atoms shared by more than one bond:
# (names ending in * mean they were unspecified for this
# dihedral interaction. By default, this is the case.)
bshared1 = 'b*' #(default. overidden below)
bshared2 = 'b*' #(default. overidden below)
abshared1 = 'ab*' #(default. overidden below)
abshared2 = 'ab*' #(default. overidden below)
# equivalent names of atoms shared by more than one angle interaction:
# (names ending in * mean they were unspecified for this
# dihedral interaction. By default, this is the case.)
ashared1 = 'a*' #(default. overidden below)
ashared2 = 'a*' #(default. overidden below)
aac_shared1 = 'aac*' #(default. overidden below)
aae_shared1 = 'aae*' #(default. overidden below)
aac_shared2 = 'aac*' #(default. overidden below)
aae_shared2 = 'aae*' #(default. overidden below)
if dihedral_is_auto:
d1 = d2 = d3 = d4 = 'd*' #Then, dont use regular equivalences for these atoms.
ad1 = 'ade' + anames[0] + ',adc*' #Instead use the corresponding "auto"
ad2 = 'ade*,adc*' + anames[1] #equivalence names for these atoms.
ad3 = 'ade*,adc*' + anames[1] #(There are different auto equivalence names depending upon
ad4 = 'ade' + anames[2] + ',adc*' # if the atom appears in the center (c) or end(e) of the dihedral)
else:
d1 = 'd' + anames[0] # In this case, use use (regular) equivalence names
d2 = 'd' + anames[1] # for these atoms
d3 = 'd' + anames[2]
d4 = 'd' + anames[3]
ad1 = ad2 = ad3 = ad4 = 'ade*,adc*'
if not bond_is_auto1:
b11 = 'b' + bnames[0][0] #(bond atom equivalent name)
b12 = 'b' + bnames[0][1] #(bond atom equivalent name)
bshared1 = 'b' + bnames[0][1] #(bond atom equivalent name)
ab11 = ab12 = 'ab*'
else:
b11 = b12 = 'b*'
ab11 = 'ab' + bnames[0][0] #(auto bond atom name)
ab12 = 'ab' + bnames[0][1] #(auto bond atom name)
abshared1 = 'ab' + bnames[0][1] #(auto bond atom name)
if not bond_is_auto2:
b21 = 'b' + bnames[1][0] #(bond atom equivalent name)
b22 = 'b' + bnames[1][1] #(bond atom equivalent name)
assert((bshared1 == 'b*') or (bshared1 == 'b' + bnames[1][0]))
bshared1 = 'b' + bnames[1][0] #(bond atom equivalent name)
assert((bshared2 == 'b*') or (bshared2 == 'b' + bnames[1][1]))
bshared2 = 'b' + bnames[1][1] #(bond atom equivalent name)
ab21 = ab22 = 'ab*'
else:
b21 = b22 = 'b*'
ab21 = 'ab' + bnames[1][0] #(auto bond atom name)
ab22 = 'ab' + bnames[1][1] #(auto bond atom name)
assert((abshared1 == 'ab*') or (abshared1 == 'ab' + bnames[1][0]))
abshared1 = 'ab' + bnames[1][0] #(auto bond atom name)
assert((abshared2 == 'ab*') or (abshared2 == 'ab' + bnames[1][1]))
abshared2 = 'ab' + bnames[1][1] #(auto bond atom name)
if not bond_is_auto3:
b31 = 'b' + bnames[2][0] #(bond atom equivalent name)
b32 = 'b' + bnames[2][1] #(bond atom equivalent name)
assert((bshared2 == 'b*') or (bshared2 == 'b' + bnames[2][0]))
bshared2 = 'b' + bnames[2][0] #(bond atom equivalent name)
ab31 = ab32 = 'ab*'
else:
b31 = b32 = 'b*'
ab31 = 'ab' + bnames[2][0] #(auto bond atom name)
ab32 = 'ab' + bnames[2][1] #(auto bond atom name)
assert((abshared2 == 'ab*') or (abshared2 == 'ab' + bnames[2][0]))
abshared2 = 'ab' + bnames[2][0] #(auto bond atom name)
if not angle_is_auto1:
a11 = 'a' + ang_names[0][0] #(angle atom equivalent name)
a12 = 'a' + ang_names[0][1] #(angle atom equivalent name)
a13 = 'a' + ang_names[0][2] #(angle atom equivalent name)
ashared1 = 'a' + ang_names[0][1] #(angle atom equivalent name)
ashared2 = 'a' + ang_names[0][2] #(angle atom equivalent name)
aa11 = 'aae*,aac*'
aa12 = 'aae*,aac*'
aa13 = 'aae*,aac*'
else:
a11 = a12 = a13 = 'a*'
aa11 = 'aae'+ang_names[0][0]+'aac*' #(auto angle atom name)
aa12 = 'aae*,aac'+ang_names[0][1] #(auto angle atom name)
aa13 = 'aae'+ang_names[0][2]+'aac*' #(auto angle atom name)
aac_shared1 = 'aac'+ang_names[0][1] #(auto angle atom name)
aae_shared2 = 'aae'+ang_names[0][2] #(auto angle atom name)
if not angle_is_auto2:
a21 = 'a' + ang_names[1][0] #(angle atom equivalent name)
a22 = 'a' + ang_names[1][1] #(angle atom equivalent name)
a23 = 'a' + ang_names[1][2] #(angle atom equivalent name)
assert((ashared1 == 'a*') or (ashared1 == 'a' + ang_names[1][0]))
ashared1 = 'a' + ang_names[1][0] #(angle atom equivalent name)
assert((ashared2 == 'a*') or (ashared2 == 'a' + ang_names[1][1]))
ashared2 = 'a' + ang_names[1][1] #(angle atom equivalent name)
aa21 = 'aae*,aac*'
aa22 = 'aae*,aac*'
aa23 = 'aae*,aac*'
else:
a21 = a22 = a23 = 'a*'
aa21 = 'aae'+ang_names[1][0]+',aac*' #(auto angle atom name)
aa22 = 'aae*,aac'+ang_names[1][1] #(auto angle atom name)
aa23 = 'aae'+ang_names[1][2]+',aac*' #(auto angle atom name)
aae_shared1 = 'aae'+ang_names[1][0] #(auto angle atom name)
aac_shared2 = 'aac'+ang_names[1][1] #(auto angle atom name)
# print atom 1 information:
sys.stdout.write(' @atom:*,p*,'+b11+','+a11+','+d1+',i*,' +
'ap*,aq*,'+ab11+','+aa11+',' +
ad1+',aie*,aic*')
# print atom 2 information:
sys.stdout.write(' @atom:*,p*,'+bshared1+','+ashared1+','+d2+',i*,' +
'ap*,aq*,'+abshared1+','+aae_shared1+','+aac_shared1+',' +
ad2+',aie*,aic*')
# print atom 3 information:
sys.stdout.write(' @atom:*,p*,'+bshared2+','+ashared2+','+d3+',i*,' +
'ap*,aq*,'+abshared2+','+aae_shared2+','+aac_shared2+',' +
ad3+',aie*,aic*')
# print atom 4 information:
sys.stdout.write(' @atom:*,p*,'+b32+','+a23+','+d4+',i*,' +
'ap*,aq*,'+ab32+','+aa23+',' +
ad4+',aie*,aic*')
sys.stdout.write('\n')
else:
assert(dihedral_is_auto) #(so we should use "auto" equivalence names for these atoms)
sys.stdout.write(' @dihedral:' + dih_name_abbr[dihedral_name] + ' ' +
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac*,ade'+anames[0]+',adc*,aie*,aic* '
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac*,ade*,adc'+anames[1]+',aie*,aic* '
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac*,ade*,adc'+anames[2]+',aie*,aic* '
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac*,ade'+anames[3]+',adc*,aie*,aic* '
'\n')
assert(dih_name_abbr[dihedral_name] not in dih_name_abbr_used)
dih_name_abbr_used.add(dih_name_abbr[dihedral_name])
sys.stdout.write(' } # end of "Data Dihedrals By Type" section\n'
'\n')
# Print the force-field parameters for these dihedral interactions:
sys.stdout.write('\n\n'
' # ------- Dihedral Force Field Parameters: -------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' # '+dihedral_style2docs[dihedral_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # dihedral_coeff DihedralTypeName DihedralStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for dihedral_name in dih_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(dihedral_name)]
#if (len(anames) == 4) and dihedral2style[dihedral_name] == 'class2':
# continue
if not (dihedral2style[dihedral_name] in
dihedral_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
#if (IsAutoInteraction(dihedral_name) and
# (not include_auto_equivalences)):
# continue
# the if statement above is covered by the following:
if dihedral_name not in dih_name_abbr:
continue
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr[dihedral_name]+' '+
dihedral2style[dihedral_name] + ' ' +
dihedral2params[dihedral_name] +
" # (ver=" + dihedral2ver[dihedral_name] +
", ref=" + dihedral2ref[dihedral_name] + ")\n")
if dihedral_name in dihedral2class2_mbt:
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr[dihedral_name]+' '+
dihedral2style[dihedral_name] + ' mbt ' +
dihedral2class2_mbt[dihedral_name] +
" # (ver=" + dihedral2ver_mbt[dihedral_name] +
", ref=" + dihedral2ref_mbt[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_ebt)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr[dihedral_name]+' '+
dihedral2style[dihedral_name] + ' ebt ' +
dihedral2class2_ebt[dihedral_name] +
" # (ver=" + dihedral2ver_ebt[dihedral_name] +
", ref=" + dihedral2ref_ebt[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_at)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr[dihedral_name]+' '+
dihedral2style[dihedral_name] + ' at ' +
dihedral2class2_at[dihedral_name] +
" # (ver=" + dihedral2ver_at[dihedral_name] +
", ref=" + dihedral2ref_at[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_aat)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr[dihedral_name]+' '+
dihedral2style[dihedral_name] + ' aat ' +
dihedral2class2_aat[dihedral_name] +
" # (ver=" + dihedral2ver_aat[dihedral_name] +
", ref=" + dihedral2ref_aat[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_bb13)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr[dihedral_name]+' '+
dihedral2style[dihedral_name] + ' bb13 ' +
dihedral2class2_bb13[dihedral_name] +
" # (ver=" + dihedral2ver_bb13[dihedral_name] +
", ref=" + dihedral2ref_bb13[dihedral_name] + ")\n")
sys.stdout.write(' } # end of dihedral_coeff commands\n'
'\n\n')
################# Print 4-body Improper Interactions ##################
imp_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(improper2priority.items())],
key=itemgetter(1),
reverse=True)]
imp_name_abbr = {} #optional abbreviated name for each interaction
imp_name_abbr_used = set([]) #make sure we don't reuse these abbreviated names
if len(imp_names_priority_high_to_low) > 0:
sys.stdout.write(" # --------------- Improper Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (4-body) "improper" interactions: --\n'
' # ImproperType AtmType1 AtmType2 AtmType3 AtmType3 [BondType1 Bnd2 Bnd3]\n')
sys.stdout.write('\n'
' write_once("Data Impropers By Type')
if improper_symmetry_subgraph != '':
sys.stdout.write(' ('+improper_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for improper_name in imp_names_priority_high_to_low:
if not (improper2style[improper_name] in
improper_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(improper_name)]
#if (len(anames) == 4) and improper2style[improper_name] == 'class2':
# continue
ang_names = [[a for a in map(DecodeAName, anames[4:7])],
[a for a in map(DecodeAName, anames[7:10])],
[a for a in map(DecodeAName, anames[10:13])]]
anm = [a for a in map(DecodeAName, anames)]
improper_is_auto = IsAutoInteraction(improper_name)
if improper2style[improper_name] == 'class2':
angle_is_auto1 = IsAutoInteraction(anames[4])
angle_is_auto2 = IsAutoInteraction(anames[7])
angle_is_auto3 = IsAutoInteraction(anames[10])
if ((improper_is_auto or
angle_is_auto1 or
angle_is_auto2 or
angle_is_auto3) and
(not include_auto_equivalences)):
continue
# Can we ignore "auto" interactions?
# (If so, life is much easier)
if not (improper_is_auto or
angle_is_auto1 or
angle_is_auto2 or
angle_is_auto3):
if improper2style[improper_name] == 'class2':
# NOTE: atom orderings here are LAMMPS implementation specific.
# http://lammps.sandia.gov/doc/improper_class2.html
#ang_names[0] <==> (a1, a2, a3) <==> (i, j, k)
#ang_names[1] <==> (a1, a2, a4) <==> (i, j, l)
#ang_names[2] <==> (a3, a2, a4) <==> (k, j, l)
assert(ang_names[0][1] == ang_names[1][1] == ang_names[2][1])
assert(ang_names[0][0] == ang_names[1][0])
assert(ang_names[1][2] == ang_names[2][2])
assert(ang_names[2][0] == ang_names[0][2])
# Optional: Shorten the improper name since some of the atom's bond names are redundant:
imp_name_abbr[improper_name] = EncodeInteractionName(map(EncodeAName, anm[0:4] +
[ang_names[0][0],
ang_names[0][1],
ang_names[0][2],
ang_names[1][2]]),
#[anm[4],anm[5],anm[6],
#anm[9]],
improper_is_auto)
sys.stdout.write(' @improper:' + imp_name_abbr[improper_name] + ' ' +
' @atom:*,p*,b*,a'+ang_names[0][0]+',d*,i' + anames[0] +
' @atom:*,p*,b*,a'+ang_names[0][1]+',d*,i' + anames[1] +
' @atom:*,p*,b*,a'+ang_names[0][2]+',d*,i' + anames[2] +
' @atom:*,p*,b*,a'+ang_names[1][2]+',d*,i' + anames[3] +
'\n')
else:
imp_name_abbr[improper_name] = improper_name
sys.stdout.write(' @improper:' + imp_name_abbr[improper_name] + ' ' +
' @atom:*,p*,b*,a*,d*,i' + anames[0] +
' @atom:*,p*,b*,a*,d*,i' + anames[1] +
' @atom:*,p*,b*,a*,d*,i' + anames[2] +
' @atom:*,p*,b*,a*,d*,i' + anames[3] +
'\n')
else:
# Consider "auto" interactions and "auto" atom equivalences
imp_name_abbr[improper_name] = improper_name #(full name)
sys.stdout.write(' @improper:' + imp_name_abbr[improper_name] + ' ')
if improper2style[improper_name] == 'class2':
#ang_names[0] <==> (a1, a2, a3) <==> (i, j, k)
#ang_names[1] <==> (a1, a2, a4) <==> (i, j, l)
#ang_names[2] <==> (a3, a2, a4) <==> (k, j, l)
# default angle atom equivalence names:
ashared1 = 'a*' #(default for a1 <-> ang_names[0][0], ang_names[1][0])
ashared2 = 'a*' #(default for a2 <-> ang_names[0][1], ang_names[1][1], ang_names[2][1])
ashared3 = 'a*' #(default for a3 <-> ang_names[2][0], ang_names[0][2])
ashared4 = 'a*' #(default for a4 <-> ang_names[1][2], ang_names[2][2])
# default auto angle atom equivalence names:
aashared1 = 'aae*,aac*' #(default for a1 <-> ang_names[0][0], ang_names[1][0])
aashared2 = 'aae*,aac*' #(default for a2 <-> ang_names[0][1], ang_names[1][1], ang_names[2][1])
aashared3 = 'aae*,aac*' #(default for a3 <-> ang_names[2][0], ang_names[0][2])
aashared4 = 'aae*,aac*' #(default for a4 <-> ang_names[1][2], ang_names[2][2])
if improper_is_auto:
i1 = i2 = i3 = i4 = 'i*' #Then, dont use regular equivalences for these atoms.
ai1 = 'aie' + anames[0] + ',aic*' #Instead use the corresponding "auto" equivalence names
ai2 = 'aie*,aic*' + anames[1] #for these atoms. (There are different auto equivalence names depending
ai3 = 'aie' + anames[2] + ',aic*' #on if the atom appears in the center (c) or end(e)
ai4 = 'aie' + anames[3] + ',aic*'
else:
i1 = 'i' + anames[0] #In this case, use use (regular) equivalence names
i2 = 'i' + anames[1] #for these atoms
i3 = 'i' + anames[2]
i4 = 'i' + anames[3]
ai1 = ai2 = ai3 = 'aie*,aic*'
#For reference, LAMMPS-specific atom ordering:
#ang_names[0] <==> (a1, a2, a3) <==> (i, j, k)
#ang_names[1] <==> (a1, a2, a4) <==> (i, j, l)
#ang_names[2] <==> (a3, a2, a4) <==> (k, j, l)
if not angle_is_auto1:
ashared1 = 'a' + ang_names[0][0]
ashared2 = 'a' + ang_names[0][1]
ashared3 = 'a' + ang_names[0][2]
else:
aashared1 = 'aae' + ang_names[0][0] + ',aac*'
aashared2 = 'aae*,aac' + ang_names[0][1]
aashared3 = 'aae' + ang_names[0][2] + ',aac*'
#For reference, LAMMPS-specific atom ordering:
#ang_names[0] <==> (a1, a2, a3) <==> (i, j, k)
#ang_names[1] <==> (a1, a2, a4) <==> (i, j, l)
#ang_names[2] <==> (a3, a2, a4) <==> (k, j, l)
if not angle_is_auto2:
assert((ashared1 == 'a*') or (ashared1 == 'a' + ang_names[1][0]))
ashared1 = 'a' + ang_names[1][0]
assert((ashared2 == 'a*') or (ashared2 == 'a' + ang_names[1][1]))
ashared2 = 'a' + ang_names[1][1]
ashared4 = 'a' + ang_names[1][2]
else:
assert((aashared1 == 'aae*,aac*') or (aashared1 == 'aae' + ang_names[1][0] + ',aac*'))
aashared1 = 'aae' + ang_names[1][0] + ',aac*'
assert((aashared2 == 'aae*,aac*') or (aashared2 == 'aae*,aac' + ang_names[1][1]))
aashared2 = 'aae*,aac' + ang_names[1][1]
aashared4 = 'aae' + ang_names[1][2] + ',aac*'
#For reference, LAMMPS-specific atom ordering:
#ang_names[0] <==> (a1, a2, a3) <==> (i, j, k)
#ang_names[1] <==> (a1, a2, a4) <==> (i, j, l)
#ang_names[2] <==> (a3, a2, a4) <==> (k, j, l)
if not angle_is_auto3:
assert((ashared3 == 'a*') or (ashared3 == 'a' + ang_names[2][0]))
ashared3 = 'a' + ang_names[2][0]
assert((ashared2 == 'a*') or (ashared2 == 'a' + ang_names[2][1]))
ashared2 = 'a' + ang_names[2][1]
assert((ashared4 == 'a*') or (ashared4 == 'a' + ang_names[2][2]))
ashared4 = 'a' + ang_names[2][2]
else:
assert((aashared3 == 'aae*,aac*') or (aashared3 == 'aae' + ang_names[2][0] + ',aac*'))
aashared3 = 'aae' + ang_names[2][0] + ',aac*'
assert((aashared2 == 'aae*,aac*') or (aashared2 == 'aae*,aac' + ang_names[2][1]))
aashared2 = 'aae*,aac' + ang_names[2][1]
assert((aashared4 == 'aae*,aac*') or (aashared4 == 'aae' + ang_names[2][2] + ',aac*'))
aashared4 = 'aae' + ang_names[2][2] + ',aac*'
# print atom 1 information:
sys.stdout.write(' @atom:*,p*,b*,'+ashared1+',d*,'+i1+','+
'ap*,aq*,ab*,'+aashared1+',ad*,'+ai1)
# print atom 2 information:
sys.stdout.write(' @atom:*,p*,b*,'+ashared2+',d*,'+i2+','+
'ap*,aq*,ab*,'+aashared2+',ad*,'+ai2)
# print atom 3 information:
sys.stdout.write(' @atom:*,p*,b*,'+ashared3+',d*,'+i3+','+
'ap*,aq*,ab*,'+aashared3+',ad*,'+ai3)
# print atom 4 information:
sys.stdout.write(' @atom:*,p*,b*,'+ashared4+',d*,'+i4+','+
'ap*,aq*,ab*,'+aashared4+',ad*,'+ai4)
sys.stdout.write('\n')
else:
sys.stdout.write(' @improper:' + imp_name_abbr[improper_name] + ' ' +
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie*,aie'+anames[0]+',aic*'
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie*,aie*,aic'+anames[1]+
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie*,aie'+anames[2]+',aic*'
' @atom:*,p*,b*,d*,i*,' +
'ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie*,aie'+anames[3]+',aic*'
'\n')
assert(imp_name_abbr[improper_name] not in imp_name_abbr_used)
imp_name_abbr_used.add(imp_name_abbr[improper_name])
sys.stdout.write(' } # end of "Data Impropers By Type" section\n'
'\n')
# Print the force-field parameters for these improper interactions:
sys.stdout.write('\n\n'
' # ------- Improper Force Field Parameters: -------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' # '+improper_style2docs[improper_style]+'\n')
sys.stdout.write('\n'
'# Syntax: \n'
' # improper_coeff ImproperTypeName ImproperStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for improper_name in imp_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(improper_name)]
#if (len(anames) == 4) and improper2style[improper_name] == 'class2':
# continue
# Optional: Shorten the angle name since some of the bnames are redundant:
is_auto = IsAutoInteraction(improper_name)
if not (improper2style[improper_name] in
improper_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
#if (IsAutoInteraction(improper_name) and
# (not include_auto_equivalences)):
# continue
# the if statement above is covered by the following:
if improper_name not in imp_name_abbr:
continue
sys.stdout.write(' improper_coeff @improper:'+imp_name_abbr[improper_name]+' '+
improper2style[improper_name] + ' ' +
improper2params[improper_name] +
" # (ver=" + improper2ver[improper_name] +
", ref=" + improper2ref[improper_name] + ")\n")
if improper_name in improper2class2_aa:
sys.stdout.write(' improper_coeff @improper:'+imp_name_abbr[improper_name]+' '+
improper2style[improper_name] + ' aa ' +
improper2class2_aa[improper_name] +
" # (ver=" + improper2ver_aa[improper_name] +
", ref=" + improper2ref[improper_name] + ")\n")
sys.stdout.write(' } # end of improper_coeff commands\n'
'\n\n')
sys.stdout.write('\n\n\n\n'
' # -------------------- Select LAMMPS style(s) ------------------\n'
'\n')
sys.stdout.write('\n'
' # LAMMPS supports many different kinds of bonded and non-bonded\n'
' # interactions which can be selected at run time. Eventually\n'
' # we must inform LAMMPS which of them we will need. We specify\n'
' # this in the "In Init" section: \n\n')
sys.stdout.write(' write_once("In Init") {\n')
sys.stdout.write(' units real\n')
sys.stdout.write(' atom_style full\n')
if len(bond_styles) > 0:
sys.stdout.write(' bond_style hybrid')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' ' + bond_style)
sys.stdout.write('\n')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' # '+bond_style2docs[bond_style]+'\n')
sys.stdout.write('\n')
if len(angle_styles) > 0:
sys.stdout.write(' angle_style hybrid')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' ' + angle_style)
sys.stdout.write('\n')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' # '+angle_style2docs[angle_style]+'\n')
sys.stdout.write('\n')
if len(dihedral_styles) > 0:
sys.stdout.write(' dihedral_style hybrid')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' ' + dihedral_style)
sys.stdout.write('\n')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' # '+dihedral_style2docs[dihedral_style]+'\n')
sys.stdout.write('\n')
if len(improper_styles) > 0:
sys.stdout.write(' improper_style hybrid')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' ' + improper_style)
sys.stdout.write('\n')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' # '+improper_style2docs[improper_style]+'\n')
sys.stdout.write('\n')
if len(pair_styles) > 0:
sys.stdout.write(' pair_style hybrid')
for pair_style in pair_styles:
if not (pair_style in pair_styles_selected):
continue
sys.stdout.write(' ' + pair_style +
' ' + pair_style_args[pair_style])
sys.stdout.write('\n')
for pair_style in pair_styles:
sys.stdout.write(' # '+pair_style2docs[pair_style]+'\n')
sys.stdout.write('\n')
sys.stdout.write(' pair_modify mix ' + pair_mixing_style + '\n')
sys.stdout.write(' ' + special_bonds_command + '\n')
sys.stdout.write(' ' + kspace_style + '\n')
sys.stdout.write(' } #end of init parameters\n\n')
sys.stdout.write('} # ' + ffname + '\n\n')
sys.stdout.write("#\n"
"# WARNING: The following 1-2, 1-3, and 1-4 weighting parameters were ASSUMED:\n")
sys.stdout.write("# " + special_bonds_command + "\n")
sys.stdout.write("# (See http://lammps.sandia.gov/doc/special_bonds.html for details)\n")
#sys.stderr.write(' done.\n')
if len(lines_templates) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- templates from the original .frc file used for atom type selection: ---\n')
for line in lines_templates:
sys.stdout.write('# '+line)
if len(lines_references) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- references from the original .frc file: ----\n\n')
for ref_number,lines in sorted(lines_references.items()):
sys.stdout.write('# reference '+str(ref_number)+'\n')
for line in lines:
sys.stdout.write('# '+line)
sys.stdout.write('\n')
if len(lines_warnings) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- additional warnings: ----\n')
for line in lines_warnings:
sys.stdout.write(line)
if filename_in != '':
file_in.close()
except InputError as err:
sys.stderr.write('\n\n' + str(err) + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
|
quang-ha/lammps
|
tools/moltemplate/moltemplate/force_fields/convert_MSI_files_to_LT_files/msifrc2lt.py
|
Python
|
gpl-2.0
| 234,432
|
[
"CHARMM",
"LAMMPS",
"VisIt"
] |
9b302abb35dc07e4b1b05e6b8ba1c91f9021ca715eceeb41c98c6009d6d92bed
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
from PyQt4 import QtCore, QtGui
from openlp.core.lib import build_icon, translate
from openlp.core.lib.ui import create_button_box
from openlp.plugins.bibles.lib import LanguageSelection, BibleStrings
from openlp.plugins.bibles.lib.db import BiblesResourcesDB
class Ui_EditBibleDialog(object):
def setupUi(self, editBibleDialog):
editBibleDialog.setObjectName(u'editBibleDialog')
editBibleDialog.resize(520, 400)
editBibleDialog.setWindowIcon(build_icon(u':/icon/openlp-logo-16x16.png'))
editBibleDialog.setModal(True)
self.dialogLayout = QtGui.QVBoxLayout(editBibleDialog)
self.dialogLayout.setSpacing(8)
self.dialogLayout.setContentsMargins(8, 8, 8, 8)
self.dialogLayout.setObjectName(u'dialog_layout')
self.bibleTabWidget = QtGui.QTabWidget(editBibleDialog)
self.bibleTabWidget.setObjectName(u'BibleTabWidget')
# Meta tab
self.metaTab = QtGui.QWidget()
self.metaTab.setObjectName(u'metaTab')
self.metaTabLayout = QtGui.QVBoxLayout(self.metaTab)
self.metaTabLayout.setObjectName(u'metaTabLayout')
self.licenseDetailsGroupBox = QtGui.QGroupBox(self.metaTab)
self.licenseDetailsGroupBox.setObjectName(u'licenseDetailsGroupBox')
self.licenseDetailsLayout = QtGui.QFormLayout(self.licenseDetailsGroupBox)
self.licenseDetailsLayout.setObjectName(u'licenseDetailsLayout')
self.versionNameLabel = QtGui.QLabel(self.licenseDetailsGroupBox)
self.versionNameLabel.setObjectName(u'versionNameLabel')
self.versionNameEdit = QtGui.QLineEdit(self.licenseDetailsGroupBox)
self.versionNameEdit.setObjectName(u'versionNameEdit')
self.versionNameLabel.setBuddy(self.versionNameEdit)
self.licenseDetailsLayout.addRow(self.versionNameLabel, self.versionNameEdit)
self.copyrightLabel = QtGui.QLabel(self.licenseDetailsGroupBox)
self.copyrightLabel.setObjectName(u'copyrightLabel')
self.copyrightEdit = QtGui.QLineEdit(self.licenseDetailsGroupBox)
self.copyrightEdit.setObjectName(u'copyrightEdit')
self.copyrightLabel.setBuddy(self.copyrightEdit)
self.licenseDetailsLayout.addRow(self.copyrightLabel, self.copyrightEdit)
self.permissionsLabel = QtGui.QLabel(self.licenseDetailsGroupBox)
self.permissionsLabel.setObjectName(u'permissionsLabel')
self.permissionsEdit = QtGui.QLineEdit(self.licenseDetailsGroupBox)
self.permissionsEdit.setObjectName(u'permissionsEdit')
self.permissionsLabel.setBuddy(self.permissionsEdit)
self.licenseDetailsLayout.addRow(self.permissionsLabel, self.permissionsEdit)
self.metaTabLayout.addWidget(self.licenseDetailsGroupBox)
self.languageSelectionGroupBox = QtGui.QGroupBox(self.metaTab)
self.languageSelectionGroupBox.setObjectName(u'languageSelectionGroupBox')
self.languageSelectionLayout = QtGui.QVBoxLayout(self.languageSelectionGroupBox)
self.languageSelectionLabel = QtGui.QLabel(self.languageSelectionGroupBox)
self.languageSelectionLabel.setObjectName(u'languageSelectionLabel')
self.languageSelectionComboBox = QtGui.QComboBox(self.languageSelectionGroupBox)
self.languageSelectionComboBox.setObjectName(u'languageSelectionComboBox')
self.languageSelectionComboBox.addItems([u'', u'', u'', u''])
self.languageSelectionLayout.addWidget(self.languageSelectionLabel)
self.languageSelectionLayout.addWidget(self.languageSelectionComboBox)
self.metaTabLayout.addWidget(self.languageSelectionGroupBox)
self.metaTabLayout.addStretch()
self.bibleTabWidget.addTab(self.metaTab, u'')
# Book name tab
self.bookNameTab = QtGui.QWidget()
self.bookNameTab.setObjectName(u'bookNameTab')
self.bookNameTabLayout = QtGui.QVBoxLayout(self.bookNameTab)
self.bookNameTabLayout.setObjectName(u'bookNameTabLayout')
self.bookNameNotice = QtGui.QLabel(self.bookNameTab)
self.bookNameNotice.setObjectName(u'bookNameNotice')
self.bookNameNotice.setWordWrap(True)
self.bookNameTabLayout.addWidget(self.bookNameNotice)
self.scrollArea = QtGui.QScrollArea(self.bookNameTab)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(u'scrollArea')
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.bookNameWidget = QtGui.QWidget(self.scrollArea)
self.bookNameWidget.setObjectName(u'bookNameWidget')
self.bookNameWidgetLayout = QtGui.QFormLayout(self.bookNameWidget)
self.bookNameWidgetLayout.setObjectName(u'bookNameWidgetLayout')
self.bookNameLabel = {}
self.bookNameEdit= {}
for book in BiblesResourcesDB.get_books():
self.bookNameLabel[book[u'abbreviation']] = QtGui.QLabel(self.bookNameWidget)
self.bookNameLabel[book[u'abbreviation']].setObjectName(u'bookNameLabel[%s]' % book[u'abbreviation'])
self.bookNameEdit[book[u'abbreviation']] = QtGui.QLineEdit(self.bookNameWidget)
self.bookNameEdit[book[u'abbreviation']].setObjectName(u'bookNameEdit[%s]' % book[u'abbreviation'])
self.bookNameWidgetLayout.addRow(
self.bookNameLabel[book[u'abbreviation']],
self.bookNameEdit[book[u'abbreviation']])
self.scrollArea.setWidget(self.bookNameWidget)
self.bookNameTabLayout.addWidget(self.scrollArea)
self.bookNameTabLayout.addStretch()
self.bibleTabWidget.addTab(self.bookNameTab, u'')
# Last few bits
self.dialogLayout.addWidget(self.bibleTabWidget)
self.button_box = create_button_box(editBibleDialog, u'button_box', [u'cancel', u'save'])
self.dialogLayout.addWidget(self.button_box)
self.retranslateUi(editBibleDialog)
QtCore.QMetaObject.connectSlotsByName(editBibleDialog)
def retranslateUi(self, editBibleDialog):
self.book_names = BibleStrings().BookNames
editBibleDialog.setWindowTitle(translate('BiblesPlugin.EditBibleForm', 'Bible Editor'))
# Meta tab
self.bibleTabWidget.setTabText( self.bibleTabWidget.indexOf(self.metaTab),
translate('SongsPlugin.EditBibleForm', 'Meta Data'))
self.licenseDetailsGroupBox.setTitle(translate('BiblesPlugin.EditBibleForm', 'License Details'))
self.versionNameLabel.setText(translate('BiblesPlugin.EditBibleForm', 'Version name:'))
self.copyrightLabel.setText(translate('BiblesPlugin.EditBibleForm', 'Copyright:'))
self.permissionsLabel.setText(translate('BiblesPlugin.EditBibleForm', 'Permissions:'))
self.languageSelectionGroupBox.setTitle(translate('BiblesPlugin.EditBibleForm', 'Default Bible Language'))
self.languageSelectionLabel.setText(translate('BiblesPlugin.EditBibleForm',
'Book name language in search field, search results and on display:'))
self.languageSelectionComboBox.setItemText(0, translate('BiblesPlugin.EditBibleForm', 'Global Settings'))
self.languageSelectionComboBox.setItemText(LanguageSelection.Bible + 1,
translate('BiblesPlugin.EditBibleForm', 'Bible Language'))
self.languageSelectionComboBox.setItemText(LanguageSelection.Application + 1,
translate('BiblesPlugin.EditBibleForm', 'Application Language'))
self.languageSelectionComboBox.setItemText(LanguageSelection.English + 1,
translate('BiblesPlugin.EditBibleForm', 'English'))
# Book name tab
self.bibleTabWidget.setTabText(self.bibleTabWidget.indexOf(self.bookNameTab),
translate('SongsPlugin.EditBibleForm', 'Custom Book Names'))
for book in BiblesResourcesDB.get_books():
self.bookNameLabel[book[u'abbreviation']].setText(u'%s:' % unicode(self.book_names[book[u'abbreviation']]))
|
marmyshev/transitions
|
openlp/plugins/bibles/forms/editbibledialog.py
|
Python
|
gpl-2.0
| 10,020
|
[
"Brian"
] |
692720cb09bb87680664884a30e6be82296182c0bdd9c111c0c9b1fda7923e90
|
"""
Application-class that implements pyFoamConvertMixingPlaneToNewSyntax.py
Adjust the mixingPlane interface definition in the boundary
file to the latest supported syntax.
Author:
Martin Beaudoin, Hydro-Quebec, 2012. All rights reserved
"""
from PyFoamApplication import PyFoamApplication
from PyFoam.RunDictionary.ParsedParameterFile import ParsedParameterFile
from os import path
import sys
# ------> Start of Python code snippet copied from an external source
#
# Author : Brian Beck
# http://code.activestate.com/recipes/410692-readable-switch-construction-without-lambdas-or-di/
#
# License for this Python code snippet:
#
# This code that was deposited before July 15, 2008 on aspn.activestate.com.
# It is governed by the Python license (http://www.python.org/psf/license/)
# in accordance with the Python Cookbook agreement.
#
# Description:
# Python's lack of a 'switch' statement has garnered much discussion and even
# a PEP. The most popular substitute uses dictionaries to map cases to
# functions, which requires lots of defs or lambdas. While the approach shown
# here may be O(n) for cases, it aims to duplicate C's original 'switch'
# functionality and structure with reasonable accuracy.
#
# This class provides the functionality we want. You only need to look at
# this if you want to know how this works. It only needs to be defined
# once, no need to muck around with its internals.
#
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
# ------> End of Python code snippet copied from an external source
####################################################################
#
# The rest of this source code was written by me.
# Martin Beaudoin, June 2012
class ConvertMixingPlaneBoundaryToNewSyntax(PyFoamApplication):
def __init__(self,args=None):
description="""
Change MixingPlane boundary condition parameters
"""
PyFoamApplication.__init__(self,
args=args,
description=description,
usage="%prog <caseDirectory>",
interspersed=True,
changeVersion=False,
nr=1)
def addOptions(self):
self.parser.add_option("--test",
action="store_true",
default=False,
dest="test",
help="Only print the new boundary file")
def run(self):
fName=self.parser.getArgs()[0]
boundary=ParsedParameterFile(path.join(".",fName,"constant","polyMesh","boundary"),debug=False,boundaryDict=True)
bnd=boundary.content
if type(bnd)!=list:
print "Problem with boundary file (not a list)"
sys.exit(-1)
found=False
for index in range(0, len(bnd), 2):
indexDefPatch=index+1
oldAssembly=""
oldOrientation=""
if bnd[indexDefPatch]["type"]=="mixingPlane":
if bnd[indexDefPatch].has_key("assembly"):
print " Replacing the parameter 'assembly' for patch", bnd[index]
oldAssembly=bnd[indexDefPatch]["assembly"]
del bnd[indexDefPatch]["assembly"]
if bnd[indexDefPatch].has_key("orientation"):
print " Replacing the parameter 'orientation' for patch", bnd[index]
oldOrientation=bnd[indexDefPatch]["orientation"]
del bnd[indexDefPatch]["orientation"]
if bnd[indexDefPatch].has_key("ribbonPatch")==False:
bnd[indexDefPatch]["ribbonPatch"]={}
if bnd[indexDefPatch].has_key("zone")==False:
bnd[indexDefPatch]["zone"]=bnd[index] + "Zone"
if oldAssembly != "":
# Converting "assembly" to ribbonPatch/discretisation
for case in switch(oldAssembly):
if case('master'):
bnd[indexDefPatch]["ribbonPatch"]["discretisation"]="masterPatch"
break
if case('slave'):
bnd[indexDefPatch]["ribbonPatch"]["discretisation"]="slavePatch"
break
if case('both'):
bnd[indexDefPatch]["ribbonPatch"]["discretisation"]="bothPatches"
break
if case('userdefined'):
bnd[indexDefPatch]["ribbonPatch"]["discretisation"]="userDefined"
break
if case(): # default
print "Unsupported assembly type: ", oldAssembly
if oldOrientation != "":
# Converting "orientation" to ribbonPatch/ribbonPatchSweepAxis and
# ribbonPatch/ribbonPatchStackAxis
for case in switch(oldOrientation):
if case('dirX_spanY'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="X"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="Y"
break
if case('dirX_spanZ'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="X"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="Z"
break
if case('dirY_spanX'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="Y"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="X"
break
if case('dirY_spanZ'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="Y"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="Z"
break
if case('dirZ_spanX'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="Z"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="X"
break
if case('dirZ_spanY'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="Z"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="Y"
break
if case('dirR_spanTheta'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="R"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="Theta"
break
if case('dirR_spanZ'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="R"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="Z"
break
if case('dirTheta_spanZ'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="Theta"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="Z"
break
if case('dirTheta_spanR'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="Theta"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="R"
break
if case('dirZ_spanTheta'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="Z"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="Theta"
break
if case('dirZ_spanR'):
bnd[indexDefPatch]["ribbonPatch"]["stackAxis"]="Z"
bnd[indexDefPatch]["ribbonPatch"]["sweepAxis"]="R"
break
if case(): # default
print "Unsupported orientation type: ", oldOrientation
if self.parser.getOptions().test:
print boundary
else:
boundary.writeFile()
|
Unofficial-Extend-Project-Mirror/openfoam-extend-foam-extend-3.1
|
ThirdParty/LocalDev/Hydro-Quebec/PyFoam/ConvertMixingPlaneBoundaryToNewSyntax.py
|
Python
|
gpl-3.0
| 8,738
|
[
"Brian"
] |
576cc0f4a0fb5677b4b3f52e1046c696abcdd98cb6f9e0f606132e0090cb4c74
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2022 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions for Psi4/Cfour interface. Portions that require
calls to Boost Python psi4 module are here, otherwise in qcdb module.
Also calls to qcdb module are here and not elsewhere in driver.
Organizationally, this module isolates qcdb code from psi4 code.
"""
import os
import re
import glob
import shelve
import shutil
import difflib
import datetime
import subprocess
from psi4.driver.p4util.exceptions import *
def run_cfour_module(xmod):
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) +
':' + os.environ.get('PATH') +
':' + core.get_datadir() + '/basis' +
':' + core.psi_top_srcdir() + '/share/basis',
'CFOUR_NUM_CORES': os.environ.get('CFOUR_NUM_CORES'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Call executable xcfour, directing cfour output to the psi4 output file
try:
retcode = subprocess.Popen([xmod], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
#p4out.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
message = ('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
if not data:
break
#if core.outfile_name() == 'stdout':
# sys.stdout.write(data)
#else:
# p4out.write(data)
# p4out.flush()
c4out += data
#internal_p4c4_info['output'] = c4out
return c4out
def vpt2(name, **kwargs):
"""Perform vibrational second-order perturbation computation through
Cfour to get anharmonic frequencies. This version uses c4 for the disp
and pt2 but gets gradients from p4.
:type c4full: :ref:`boolean <op_py_boolean>`
:param c4full: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether when *name* indicates a Cfour method and *mode*
indicates a sow/reap approach, sown files are direct ZMAT files
and FJOBARC files are expected to reap, so that Cfour only, not
Cfour-through-Psi4, is needed for distributed jobs.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Presently uses all gradients. Could mix in analytic 2nd-derivs.
- Collect resutls.
- Manage scratch / subdir better.
- Allow CFOUR_BASIS
- Consider forcing some tighter convcrit, c4 and p4
- mixed ang/bohr signals
- error by converting to ang in psi?
- Expand CURRENT DIPOLE XYZ beyond SCF
- Remember additional FJOBARC record TOTENER2 if EXCITE .ne. NONE
- switch C --> S/R with recovery using shelf
"""
lowername = name.lower()
kwargs = p4util.kwargs_lower(kwargs)
optstash = p4util.OptionsState(
['BASIS'])
# Option mode of operation- whether vpt2 run in one job or files farmed out
if not('vpt2_mode' in kwargs):
if ('mode' in kwargs):
kwargs['vpt2_mode'] = kwargs['mode']
del kwargs['mode']
else:
kwargs['vpt2_mode'] = 'continuous'
# Switches for route through code- S/R or continuous & Psi4 or Cfour gradients
isSowReap = True if kwargs['vpt2_mode'].lower() == 'sowreap' else False
isC4notP4 = bool(re.match('cfour', lowername)) or bool(re.match('c4-', lowername))
isC4fully = True if ('c4full' in kwargs and yes.match(str(kwargs['c4full'])) and isC4notP4 and isSowReap) else False
# Save submission directory and basis set
current_directory = os.getcwd()
user_basis = core.get_global_option('BASIS')
# Open data persistence shelf- vital for sowreap, checkpoint for continuouw
shelf = shelve.open(current_directory + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf', writeback=True)
# Cfour keywords to request vpt2 analysis through findif gradients
core.set_local_option('CFOUR', 'CFOUR_VIBRATION', 'FINDIF')
core.set_local_option('CFOUR', 'CFOUR_FREQ_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANH_ALGORITHM', 'PARALLEL')
core.set_local_option('CFOUR', 'CFOUR_ANHARMONIC', 'VPT2')
core.set_local_option('CFOUR', 'CFOUR_FD_PROJECT', 'OFF')
# When a Psi4 method is requested for vpt2, a skeleton of
# computations in Cfour is still required to hang the gradients
# upon. The skeleton is as cheap as possible (integrals only
# & sto-3g) and set up here.
if isC4notP4:
skelname = lowername
else:
skelname = 'c4-scf'
core.set_global_option('BASIS', 'STO-3G')
# P4 'c4-scf'/'cfour'CALC_LEVEL lowername # temporary
# C4 lowername cfour{} # temporary
if 'status' not in shelf:
shelf['status'] = 'initialized'
shelf['linkage'] = os.getpid()
shelf['zmat'] = {} # Cfour-generated ZMAT files with finite difference geometries
shelf['fjobarc'] = {} # Cfour- or Psi4-generated ascii files with packaged gradient results
shelf.sync()
else:
pass
# how decide whether to use. keep precedent of intco.dat in mind
# Construct and move into directory job scratch / cfour scratch / harm
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path()) # psi_scratch
cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.cfour.' + str(uuid.uuid4())[:8]
if not os.path.exists(cfour_tmpdir):
os.mkdir(cfour_tmpdir)
os.chdir(cfour_tmpdir) # psi_scratch/cfour
if not os.path.exists('harm'):
os.mkdir('harm')
os.chdir('harm') # psi_scratch/cfour/harm
psioh.set_specific_retention(32, True) # temporary, to track p4 scratch
#shelf['status'] = 'anharm_jobs_sown' # temporary to force backtrack
print('STAT', shelf['status']) # temporary
# Generate the ZMAT input file in scratch
with open('ZMAT', 'w') as handle:
cfour_infile = write_zmat(skelname, 1)
handle.write(cfour_infile)
print('\n====== Begin ZMAT input for CFOUR ======')
print(open('ZMAT', 'r').read())
print('======= End ZMAT input for CFOUR =======\n')
shelf['genbas'] = open('GENBAS', 'r').read()
# Check existing shelf consistent with generated ZMAT, store
if ('000-000' in shelf['zmat']) and (shelf['zmat']['000-000'] != cfour_infile):
diff = difflib.Differ().compare(shelf['zmat']['000-000'].splitlines(), cfour_infile.splitlines())
raise ValidationError("""Input file translated to Cfour ZMAT does not match ZMAT stored in shelf.\n\n""" +
'\n'.join(list(diff)))
shelf['zmat']['000-000'] = cfour_infile
shelf.sync()
# Reset basis after Cfour skeleton seeded
core.set_global_option('BASIS', user_basis)
if shelf['status'] == 'initialized':
p4util.banner(' VPT2 Setup: Harmonic ')
# Generate the displacements that will form the harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the harmonic freq
zmats0N = ['000-' + item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s-%s has been read\n' % ('zmat' + zm2, zm1, zm2))
core.print_out('%s\n' % shelf['zmat'][zm12])
# S/R: Write distributed input files for harmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
print(msg)
shelf['status'] = 'harm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'harm_jobs_sown':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('harmonic', current_directory, zmats0N)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmats0N, reap_job_validate,
shelf['linkage'], ['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the harmonic freq
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'harm_jobs_reaped'
if shelf['status'] == 'harm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir + '/harm') # psi_scratch/cfour/harm
harmout = run_cfour_module('xjoda')
harmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
harmout += run_cfour_module('xja2fja')
harmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
try:
os.remove('zmat' + zm2)
except OSError:
pass
harmout += run_cfour_module('xjoda')
harmout += run_cfour_module('xcubic')
core.print_out(harmout)
with open('harm.out', 'w') as handle:
handle.write(harmout)
# Generate displacements along harmonic normal modes
zmatsN0 = [item[-3:] for item in sorted(glob.glob('zmat*'))]
os.chdir('..') # psi_scratch/cfour
for zm1 in zmatsN0:
zm12 = zm1 + '-000'
with open(psioh.get_default_path() + cfour_tmpdir + '/harm/zmat' + zm1, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm1, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
# Collect displacements along the normal coordinates generated by the harmonic freq.
# Further harmonic freqs are to be run at each of these to produce quartic force field.
# To carry these out, generate displacements for findif by gradient at each displacement.
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm12])
shutil.copy2('../harm/GENBAS', 'GENBAS') # ln -s $ecpdir/ECPDATA $j/ECPDATA
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xsymcor'))
# Read the displacements that will form the anharmonic freq
zmatsNN = [item[-3:] for item in sorted(glob.glob('zmat*'))]
for zm2 in zmatsNN:
zm12 = zm1 + '-' + zm2
with open(psioh.get_default_path() + cfour_tmpdir + '/' + zm1 + '/zmat' + zm2, 'r') as handle:
shelf['zmat'][zm12] = handle.read()
shelf.sync()
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('zmat' + zm2, zm12))
core.print_out('%s\n' % shelf['zmat'][zm12])
os.chdir('..') # psi_scratch/cfour
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Write distributed input files for anharmonic freq
if isSowReap:
os.chdir(current_directory)
inputSansMol = p4util.format_currentstate_for_input(gradient, lowername, allButMol=True, **kwargs)
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
ifile = vpt2_sow_files(zm12, shelf['linkage'], isC4notP4, isC4fully,
shelf['zmat'][zm12], inputSansMol, shelf['genbas'])
# GENBAS needed here
with open('VPT2-' + zm12 + '.in', 'w') as handle:
handle.write(ifile)
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
print(msg)
shelf['status'] = 'anharm_jobs_sown'
# S/R: Pause for distributed calculations
if isSowReap:
shelf.close()
return 0.0
if shelf['status'] == 'anharm_jobs_sown':
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
# S/R: Check that distributed calcs all completed correctly
if isSowReap:
msg = vpt2_instructions('anharmonic', current_directory, zmatsNN)
core.print_out(msg)
isOk, msg = sown_jobs_status(current_directory, 'VPT2', zmatsNN,
reap_job_validate, shelf['linkage'],
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
core.print_out(msg)
print(msg)
if not isOk:
shelf.close()
return 0.0
# Collect all results from gradients forming the anharmonic freq
for zm12 in zmatsNN:
zm1, zm2 = zm12.split('-')
if zm12 not in shelf['fjobarc']:
p4util.banner(' VPT2 Computation: %s ' % (zm12))
print(' VPT2 Computation: %s ' % (zm12))
fjobarc = vpt2_reaprun_files(zm12, shelf['linkage'], isSowReap, isC4notP4, isC4fully,
shelf['zmat'][zm12], current_directory, psioh.get_default_path(), cfour_tmpdir,
lowername, kwargs)
shelf['fjobarc'][zm12] = fjobarc
shelf.sync()
shelf['status'] = 'anharm_jobs_reaped'
if shelf['status'] == 'anharm_jobs_reaped':
zmats0N = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] == '000' and item[-3:] != '000')]
zmatsN0 = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] == '000')]
zmatsNN = [item for item in sorted(shelf['zmat'].keys()) if (item[:3] != '000' and item[-3:] != '000')]
p4util.banner(' VPT2 Results: Harmonic ')
# Process the gradients into harmonic freq
os.chdir(psioh.get_default_path() + cfour_tmpdir) # psi_scratch/cfour
if os.path.exists('anharm'):
shutil.rmtree('anharm')
os.mkdir('anharm')
os.chdir('harm') # psi_scratch/cfour/harm
run_cfour_module('xclean')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm12 in zmats0N:
zm1, zm2 = zm12.split('-')
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xcubic')
core.print_out(anharmout)
with open('harm.out', 'w') as handle:
handle.write(anharmout)
# Process the gradients into harmonic freq at each normco displaced point
os.chdir('..') # psi_scratch/cfour
for zm11 in zmatsN0:
zm1 = zm11[:3]
if os.path.exists(zm1):
shutil.rmtree(zm1)
os.mkdir(zm1)
os.chdir(zm1) # psi_scratch/cfour/004
run_cfour_module('xclean')
with open('ZMAT', 'w') as handle:
handle.write(shelf['zmat'][zm11])
shutil.copy2('../harm/GENBAS', 'GENBAS')
anharmout = run_cfour_module('xjoda')
anharmout += run_cfour_module('xsymcor')
for zm22 in [item for item in zmatsNN if (item[:3] == zm1 and item[-3:] != '000')]:
zm2 = zm22[-3:]
zm12 = zm1 + '-' + zm2
print(zm12)
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout += run_cfour_module('xja2fja')
anharmout += run_cfour_module('xsymcor')
shutil.move('FJOBARC', 'fja.' + zm12)
anharmout += run_cfour_module('xjoda')
anharmout += run_cfour_module('xja2fja')
with open('FJOBARC', 'r') as handle:
shelf['fjobarc'][zm11] = handle.read()
shelf.sync()
core.print_out(anharmout)
with open('partial.out', 'w') as handle:
handle.write(anharmout)
os.chdir('..') # psi_scratch/cfour
# Process the harmonic freqs at normco displacements into anharmonic freq
p4util.banner(' VPT2 Results: Anharmonic ')
os.chdir('anharm') # psi_scratch/cfour/anharm
shutil.copy2('../harm/JOBARC', 'JOBARC')
shutil.copy2('../harm/JAINDX', 'JAINDX')
for zm12 in zmatsN0:
with open('FJOBARC', 'w') as handle:
handle.write(shelf['fjobarc'][zm12])
anharmout = run_cfour_module('xja2fja')
anharmout += run_cfour_module('xcubic')
shutil.move('FJOBARC', 'fja.' + zm12)
core.print_out(anharmout)
with open('anharm.out', 'w') as handle:
handle.write(anharmout)
shelf['status'] = 'vpt2_completed'
# Finish up
os.chdir(current_directory)
shelf.close()
optstash.restore()
def vpt2_sow_files(item, linkage, isC4notP4, isC4fully, zmat, inputSansMol, inputGenbas):
"""Provided with the particular displacement number *item* and the
associated *zmat* file contents and *linkage*, and common contents
*inputSansMol*, returns contents of input file to be sown.
"""
inputReapOrders = r"""
print_variables()
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT ENERGY being %r\n' % (variable('CURRENT ENERGY')))
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT GRADIENT being %r\n' % (p4util.mat2arr(core.get_gradient())))
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT DIPOLE being [%r, %r, %r]\n' % (variable('CURRENT DIPOLE X'), variable('CURRENT DIPOLE Y'), variable('CURRENT DIPOLE Z')))
""".format(linkage, item)
# Direct Cfour for gradients
if isC4fully:
inputString = zmat
with open('VPT2-GENBAS', 'w') as handle:
handle.write(inputGenbas)
# Cfour for gradients
elif isC4notP4:
# GENBAS needed here
inputString = 'extracted_genbas = """\n' + inputGenbas.replace('\n\n', '\nblankline\n') + '\n"""\n\n'
inputString += """cfour {\n%s\n}\n\nenergy('cfour', genbas=extracted_genbas)\n\n""" % (zmat)
inputString += inputReapOrders
inputString += r"""
print_out('VPT2 RESULT: linkage {0} for item {1} yields CURRENT MOLECULE being %r\n' % (get_active_molecule().create_psi4_string_from_molecule()))
""".format(linkage, item)
# Psi4 for gradients
else:
inputString = p4util.format_molecule_for_input(
qcdb.cfour.harvest_zmat(zmat).create_psi4_string_from_molecule(),
name='disp' + item[:3] + item[-3:])
inputString += inputSansMol
inputString += inputReapOrders
return inputString
def vpt2_reaprun_files(item, linkage, isSowReap, isC4notP4, isC4fully, zmat, outdir, scrdir, c4scrdir, lowername, kwargs):
"""Provided with the particular displacement number *item* and the
associated *zmat* file with geometry and *linkage*, returns the
FJOBARC contents. Depending on the mode settings of *isC4notP4*,
*isSowReap*, and *isC4fully*, either runs (using *lowername* and
*kwargs*) or reaps contents. *outdir* is where psi4 was invoked,
*scrdir* is the psi4 scratch directory, and *c4scrdir* is Cfour
scratch directory within.
"""
os.chdir(outdir) # current_directory
# Extract qcdb.Molecule at findif orientation
zmmol = qcdb.cfour.harvest_zmat(zmat)
# Cfour S/R Direct for gradients
if isC4fully:
with open('VPT2-' + item + '.fja', 'r') as handle:
fjobarc = handle.read()
# Cfour for gradients
elif isC4notP4:
# S/R: Reap results from output file
if isSowReap:
isOk, msg, results = reap_job_validate(outdir, 'VPT2', item, linkage,
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT', 'CURRENT MOLECULE'])
if not isOk:
raise ValidationError(msg)
fje = results['CURRENT ENERGY']
fjgrd = results['CURRENT GRADIENT']
fjdip = [item / constants.dipmom_au2debye for item in results['CURRENT DIPOLE']]
c4mol = qcdb.Molecule(results['CURRENT MOLECULE'])
c4mol.update_geometry()
# C: Run the job and collect results
else:
# Prepare Cfour skeleton calc directory
os.chdir(scrdir + c4scrdir) # psi_scratch/cfour
if os.path.exists('scr.' + item):
shutil.rmtree('scr.' + item)
os.mkdir('scr.' + item)
os.chdir('scr.' + item) # psi_scratch/cfour/scr.000-004
with open('ZMAT', 'w') as handle:
handle.write(zmat)
shutil.copy2('../harm/GENBAS', 'GENBAS')
#os.chdir(scrdir + '/scr.' + item)
#run_cfour_module('xja2fja')
#with open('FJOBARC', 'r') as handle:
# fjobarc = handle.read()
# Run Cfour calc using ZMAT & GENBAS in scratch, outdir redirects to outfile
os.chdir(outdir) # current_directory
core.get_active_molecule().set_name('blank_molecule_psi4_yo')
energy('cfour', path=c4scrdir + '/scr.' + item)
# os.chdir(scrdir + '/scr.' + item)
fje = core.variable('CURRENT ENERGY')
fjgrd = p4util.mat2arr(core.get_gradient())
fjdip = [core.variable('CURRENT DIPOLE X') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Y') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Z') / constants.dipmom_au2debye]
c4mol = qcdb.Molecule(core.get_active_molecule().create_psi4_string_from_molecule())
c4mol.update_geometry()
# Get map btwn ZMAT and C4 orientation, then use it, grad and dipole to forge FJOBARC file
fjobarc = qcdb.cfour.format_fjobarc(fje,
*qcdb.cfour.backtransform(chgeMol=zmmol, permMol=c4mol), gradient=fjgrd, dipole=fjdip)
# Psi4 for gradients
else:
# Prepare Cfour skeleton calc directory
os.chdir(scrdir + c4scrdir) # psi_scratch/cfour
if os.path.exists('scr.' + item):
shutil.rmtree('scr.' + item)
os.mkdir('scr.' + item)
os.chdir('scr.' + item) # psi_scratch/cfour/scr.000-004
with open('ZMAT', 'w') as handle:
handle.write(zmat)
shutil.copy2('../harm/GENBAS', 'GENBAS')
# Run Cfour skeleton calc and extract qcdb.Molecule at needed C4 orientation
with open('partial.out', 'w') as handle:
handle.write(run_cfour_module('xjoda'))
handle.write(run_cfour_module('xvmol'))
handle.write(run_cfour_module('xvmol2ja'))
core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('JOBARC (binary)', item))
c4mol = qcdb.cfour.jajo2mol(qcdb.jajo.getrec(['COORD ', 'ATOMCHRG', 'MAP2ZMAT', 'IFLAGS ']))
# S/R: Reap results from output file
if isSowReap:
isOk, msg, results = reap_job_validate(outdir, 'VPT2', item, linkage,
['CURRENT ENERGY', 'CURRENT DIPOLE', 'CURRENT GRADIENT'])
if not isOk:
raise ValidationError(msg)
fje = results['CURRENT ENERGY']
fjgrd = results['CURRENT GRADIENT']
fjdip = [item / constants.dipmom_au2debye for item in results['CURRENT DIPOLE']]
# C: Run the job and collect results
else:
core.IO.set_default_namespace(item)
molecule = geometry(zmmol.create_psi4_string_from_molecule(), 'disp-' + item)
molecule.update_geometry()
gradient(lowername, **kwargs)
fje = core.variable('CURRENT ENERGY')
fjgrd = p4util.mat2arr(core.get_gradient())
fjdip = [core.variable('CURRENT DIPOLE X') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Y') / constants.dipmom_au2debye,
core.variable('CURRENT DIPOLE Z') / constants.dipmom_au2debye]
# Transform results into C4 orientation (defined by c4mol) & forge FJOBARC file
fjobarc = qcdb.cfour.format_fjobarc(fje,
*qcdb.cfour.backtransform(chgeMol=zmmol, permMol=c4mol, chgeGrad=fjgrd, chgeDip=fjdip))
return fjobarc
def vpt2_instructions(stage, dir, zmats):
"""Stores all the instructions to the user for running
:py:func:`~wrappers_cfour.vpt2` in sowreap mode. Depending on the
*stage*, Pieces together instruction strings for the appropriate
*stage* individualized by working directory *dir* and sown inputs
*zmats* information.
"""
stepFiles = ''
for zm12 in sorted(zmats):
stepFiles += """ psi4 %-27s %-27s\n""" % ('VPT2-' + zm12 + '.in', 'VPT2-' + zm12 + '.out')
step0 = """
The vpt2 sow/reap procedure has been selected through mode='sowreap'. This
output file, the corresponding input file, and the data persistence file
must not be edited by the user over the course of the sow/reap procedure.
Throughout, psi4 can be invoked to move to the next stage of the procedure
or to tally up the 'sown' jobs. This output file is overwritten each time
psi4 is invoked, but all results and instructions accumulate.
This procedure involves two stages of distributed calculations, harmonic and
anharmonic, and a mimimum of three invokations of psi4 on the original input
file (including the one that initially generated this text). From the input
geometry (0), displacements are generated for which gradients are required.
Input files for these are 'sown' in the current directory (1). Upon
completion, their output files are 'reaped' into a harmonic force field (2).
At displacements along the normal coordinates, further displacements are
generated for which gradients are required. Input files for these are again
'sown' in the current directory (3). Upon completion, their output files are
'reaped' into an anharmonic force field (4), terminating the vpt2 procedure.
Follow the instructions below to continue.
(0) Read Only
--------------
%s
%s
%s
""" % (dir + '/' + os.path.splitext(core.outfile_name())[0] + '.in',
dir + '/' + core.outfile_name(),
dir + '/' + os.path.splitext(core.outfile_name())[0] + '.shelf')
step1 = """
(1) Sow
--------
Run all of the VPT2-000-*.in input files on any variety of computer
architecture. The output file names must be as given below (default).
"""
step2 = """
(2) Reap
---------
Gather all the resulting output files in this directory along with the
three read-only files from (0). Invoke psi4 again. The job will be
trivial in length (unless sto-3g integrals on the molecule are costly)
and give results for the harmonic frequency stage in this output file. It
will also supply the next set of instructions.
psi4 %-27s %-27s
""" % (os.path.splitext(core.outfile_name())[0] + '.in', core.outfile_name())
step3 = """
(3) Sow
--------
Run all of the VPT2-*-*.in input files on any variety of computer
architecture. The output file names must be as given below (default).
"""
step4 = """
(4) Reap
---------
Gather all the resulting output files in this directory along with the
three read-only files from (0). Invoke psi4 again. The job will be
trivial in length (unless sto-3g integrals on the molecule are costly)
and give results for the harmonic and anharmonic frequency stages in this
output file.
psi4 %-27s %-27s
""" % (os.path.splitext(core.outfile_name())[0] + '.in', core.outfile_name())
if stage == 'harmonic':
instructions = step0 + step1 + stepFiles + step2
elif stage == 'anharmonic':
instructions = step0 + step3 + stepFiles + step4
return instructions
def sown_jobs_status(dir, prefix, zmats, validate_func=None, linkage=None, keys=None):
"""Evaluate the output file status of jobs in *zmats* which should
exist at *dir* + '/' + prefix + '-' + job + '.out'. Returns string with
formatted summary of job status and boolean of whether all complete.
Return boolean *isOk* signals whether all *zmats* have completed and,
if *validate_func* present, are validated.
"""
isOk = True
msgError = ''
instructions = '\n'
instructions += p4util.banner(prefix + ' Status: ' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), strNotOutfile=True)
instructions += '\n'
for job in sorted(zmats):
outfile = dir + '/' + prefix + '-' + job + '.out'
fjafile = dir + '/' + prefix + '-' + job + '.fja'
formatArgs = [prefix + '-' + job, '', '', '', '']
if os.path.isfile(outfile):
with open(outfile, 'r') as handle:
for line in handle:
if line.find('Buy a developer a beer!') > -1:
formatArgs[3] = 'Completed'
if reap_job_validate is not None:
isOkJob, msg, temp = reap_job_validate(dir, prefix, job, linkage, keys)
if isOkJob:
formatArgs[4] = '& Validated'
else:
isOk = False
msgError += msg
formatArgs[4] = 'INVALID'
break
else:
isOk = False
formatArgs[2] = 'Running'
elif os.path.isfile(fjafile):
formatArgs[3] = 'Completed'
else:
isOk = False
formatArgs[1] = 'Waiting'
instructions += """ {0:<27} {1:^10} {2:^10} {3:^10} {4:^10}\n""".format(*formatArgs)
instructions += '\n' + msgError + '\n\n'
return isOk, instructions
def reap_job_validate(dir, prefix, item, linkage, keys):
"""For a given output file whose path is constructed with
*dir* + '/' + *prefix* + '-' + *item* + '.out', tests that the file
exists and has *prefix* RESULTS lines for each piece of information
requested in list *keys* and that those lines correspond to the
appropriate *linkage* and *item*. Returns *keys* along with their
scanned values in dict *reapings*, along with error and success
messages in *instructions* and a boolean *isOk* indicating whether
all *keys* reaped sucessfully.
"""
isOk = True
instructions = ''
reapings = {}
outfile = dir + '/' + prefix + '-' + item + '.out'
try:
with open(outfile, 'r') as handle:
for line in handle:
if line.find(prefix + ' RESULT:') == 0:
sline = line.split()
if sline[2:7] == ['linkage', str(linkage), 'for', 'item', item]:
yieldsAt = line.find('yields')
beingAt = line.find('being')
if beingAt > yieldsAt > -1:
key = line[yieldsAt + 6:beingAt].strip()
val = line[beingAt + 5:].strip()
if key in keys:
reapings[key] = eval(val)
#core.print_out(' CFOUR scratch file %s for %s has been read\n' % ('JOBARC', zm12))
else:
isOk = False
instructions += """Outfile file %s
has corrupted sowreap result line:\n%s\n\n""" % (outfile, line)
else:
isOk = False
instructions += """Outfile file %s
has sowreap result of either incompatible linkage (observed: %s, expected: %s)
or incompatible job affiliation (observed: %s, expected: %s).\n\n""" % \
(outfile, sline[3], linkage, sline[6], item)
else:
if len(reapings) != len(keys):
isOk = False
instructions += """Output file %s
has missing results (observed: %s, expected: %s).\n\n""" % \
(outfile, reapings.keys(), keys)
except IOError:
isOk = False
instructions += """Output file %s
that was judged present and complete at the beginning of this
job is now missing. Replace it and invoke psi4 again.\n\n""" % (outfile)
# return file contents in instructions
return isOk, instructions, reapings
|
psi4/psi4
|
psi4/driver/procrouting/wrappers_cfour.py
|
Python
|
lgpl-3.0
| 36,339
|
[
"CFOUR",
"Psi4"
] |
5491964fc66758d5c9f2ab33ffe32e746d4d1001dc0112cb0743491b12482547
|
#!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for ipaddr module."""
import unittest
import time
import ipaddr
# Compatibility function to cast str to bytes objects
if issubclass(ipaddr.Bytes, str):
_cb = ipaddr.Bytes
else:
_cb = lambda bytestr: bytes(bytestr, 'charmap')
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.ipv4_hostmask = ipaddr.IPv4Network('10.0.0.1/0.255.255.255')
self.ipv6 = ipaddr.IPv6Network('2001:658:22a:cafe:200:0:0:1/64')
def tearDown(self):
del(self.ipv4)
del(self.ipv4_hostmask)
del(self.ipv6)
del(self)
def testRepr(self):
self.assertEqual("IPv4Network('1.2.3.4/32')",
repr(ipaddr.IPv4Network('1.2.3.4')))
self.assertEqual("IPv6Network('::1/128')",
repr(ipaddr.IPv6Network('::1')))
def testAutoMasking(self):
addr1 = ipaddr.IPv4Network('1.1.1.255/24')
addr1_masked = ipaddr.IPv4Network('1.1.1.0/24')
self.assertEqual(addr1_masked, addr1.masked())
addr2 = ipaddr.IPv6Network('2000:cafe::efac:100/96')
addr2_masked = ipaddr.IPv6Network('2000:cafe::/96')
self.assertEqual(addr2_masked, addr2.masked())
# issue57
def testAddressIntMath(self):
self.assertEqual(ipaddr.IPv4Address('1.1.1.1') + 255,
ipaddr.IPv4Address('1.1.2.0'))
self.assertEqual(ipaddr.IPv4Address('1.1.1.1') - 256,
ipaddr.IPv4Address('1.1.0.1'))
self.assertEqual(ipaddr.IPv6Address('::1') + (2**16 - 2),
ipaddr.IPv6Address('::ffff'))
self.assertEqual(ipaddr.IPv6Address('::ffff') - (2**16 - 2),
ipaddr.IPv6Address('::1'))
def testInvalidStrings(self):
def AssertInvalidIP(ip_str):
self.assertRaises(ValueError, ipaddr.IPAddress, ip_str)
AssertInvalidIP("")
AssertInvalidIP("016.016.016.016")
AssertInvalidIP("016.016.016")
AssertInvalidIP("016.016")
AssertInvalidIP("016")
AssertInvalidIP("000.000.000.000")
AssertInvalidIP("000")
AssertInvalidIP("0x0a.0x0a.0x0a.0x0a")
AssertInvalidIP("0x0a.0x0a.0x0a")
AssertInvalidIP("0x0a.0x0a")
AssertInvalidIP("0x0a")
AssertInvalidIP("42.42.42.42.42")
AssertInvalidIP("42.42.42")
AssertInvalidIP("42.42")
AssertInvalidIP("42")
AssertInvalidIP("42..42.42")
AssertInvalidIP("42..42.42.42")
AssertInvalidIP("42.42.42.42.")
AssertInvalidIP("42.42.42.42...")
AssertInvalidIP(".42.42.42.42")
AssertInvalidIP("...42.42.42.42")
AssertInvalidIP("42.42.42.-0")
AssertInvalidIP("42.42.42.+0")
AssertInvalidIP(".")
AssertInvalidIP("...")
AssertInvalidIP("bogus")
AssertInvalidIP("bogus.com")
AssertInvalidIP("192.168.0.1.com")
AssertInvalidIP("12345.67899.-54321.-98765")
AssertInvalidIP("257.0.0.0")
AssertInvalidIP("42.42.42.-42")
AssertInvalidIP("3ffe::1.net")
AssertInvalidIP("3ffe::1::1")
AssertInvalidIP("1::2::3::4:5")
AssertInvalidIP("::7:6:5:4:3:2:")
AssertInvalidIP(":6:5:4:3:2:1::")
AssertInvalidIP("2001::db:::1")
AssertInvalidIP("FEDC:9878")
AssertInvalidIP("+1.+2.+3.4")
AssertInvalidIP("1.2.3.4e0")
AssertInvalidIP("::7:6:5:4:3:2:1:0")
AssertInvalidIP("7:6:5:4:3:2:1:0::")
AssertInvalidIP("9:8:7:6:5:4:3::2:1")
AssertInvalidIP("0:1:2:3::4:5:6:7")
AssertInvalidIP("3ffe:0:0:0:0:0:0:0:1")
AssertInvalidIP("3ffe::10000")
AssertInvalidIP("3ffe::goog")
AssertInvalidIP("3ffe::-0")
AssertInvalidIP("3ffe::+0")
AssertInvalidIP("3ffe::-1")
AssertInvalidIP(":")
AssertInvalidIP(":::")
AssertInvalidIP("::1.2.3")
AssertInvalidIP("::1.2.3.4.5")
AssertInvalidIP("::1.2.3.4:")
AssertInvalidIP("1.2.3.4::")
AssertInvalidIP("2001:db8::1:")
AssertInvalidIP(":2001:db8::1")
AssertInvalidIP(":1:2:3:4:5:6:7")
AssertInvalidIP("1:2:3:4:5:6:7:")
AssertInvalidIP(":1:2:3:4:5:6:")
AssertInvalidIP("192.0.2.1/32")
AssertInvalidIP("2001:db8::1/128")
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network, '')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'google.com')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'::1.2.3.4')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network, '')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'google.com')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'1.2.3.4')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'cafe:cafe::/128/190')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'1234:axy::b')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'1234:axy::b')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'2001:db8:::1')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Address,
'2001:888888::1')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Address(1)._ip_int_from_string,
'1.a.2.3')
self.assertEqual(False, ipaddr.IPv4Network(1)._is_hostmask('1.a.2.3'))
def testGetNetwork(self):
self.assertEqual(int(self.ipv4.network), 16909056)
self.assertEqual(str(self.ipv4.network), '1.2.3.0')
self.assertEqual(str(self.ipv4_hostmask.network), '10.0.0.0')
self.assertEqual(int(self.ipv6.network),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6.network),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6.hostmask),
'::ffff:ffff:ffff:ffff')
def testBadVersionComparison(self):
# These should always raise TypeError
v4addr = ipaddr.IPAddress('1.1.1.1')
v4net = ipaddr.IPNetwork('1.1.1.1')
v6addr = ipaddr.IPAddress('::1')
v6net = ipaddr.IPAddress('::1')
self.assertRaises(TypeError, v4addr.__lt__, v6addr)
self.assertRaises(TypeError, v4addr.__gt__, v6addr)
self.assertRaises(TypeError, v4net.__lt__, v6net)
self.assertRaises(TypeError, v4net.__gt__, v6net)
self.assertRaises(TypeError, v6addr.__lt__, v4addr)
self.assertRaises(TypeError, v6addr.__gt__, v4addr)
self.assertRaises(TypeError, v6net.__lt__, v4net)
self.assertRaises(TypeError, v6net.__gt__, v4net)
def testMixedTypeComparison(self):
v4addr = ipaddr.IPAddress('1.1.1.1')
v4net = ipaddr.IPNetwork('1.1.1.1/32')
v6addr = ipaddr.IPAddress('::1')
v6net = ipaddr.IPNetwork('::1/128')
self.assertFalse(v4net.__contains__(v6net))
self.assertFalse(v6net.__contains__(v4net))
self.assertRaises(TypeError, lambda: v4addr < v4net)
self.assertRaises(TypeError, lambda: v4addr > v4net)
self.assertRaises(TypeError, lambda: v4net < v4addr)
self.assertRaises(TypeError, lambda: v4net > v4addr)
self.assertRaises(TypeError, lambda: v6addr < v6net)
self.assertRaises(TypeError, lambda: v6addr > v6net)
self.assertRaises(TypeError, lambda: v6net < v6addr)
self.assertRaises(TypeError, lambda: v6net > v6addr)
# with get_mixed_type_key, you can sort addresses and network.
self.assertEqual([v4addr, v4net], sorted([v4net, v4addr],
key=ipaddr.get_mixed_type_key))
self.assertEqual([v6addr, v6net], sorted([v6net, v6addr],
key=ipaddr.get_mixed_type_key))
def testIpFromInt(self):
self.assertEqual(self.ipv4.ip, ipaddr.IPv4Network(16909060).ip)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, 2**32)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, -1)
ipv4 = ipaddr.IPNetwork('1.2.3.4')
ipv6 = ipaddr.IPNetwork('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddr.IPNetwork(int(ipv4)))
self.assertEqual(ipv6, ipaddr.IPNetwork(int(ipv6)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6.ip, ipaddr.IPv6Network(v6_int).ip)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, 2**128)
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, -1)
self.assertEqual(ipaddr.IPNetwork(self.ipv4.ip).version, 4)
self.assertEqual(ipaddr.IPNetwork(self.ipv6.ip).version, 6)
def testIpFromPacked(self):
ip = ipaddr.IPNetwork
self.assertEqual(self.ipv4.ip,
ip(_cb('\x01\x02\x03\x04')).ip)
self.assertEqual(ip('255.254.253.252'),
ip(_cb('\xff\xfe\xfd\xfc')))
self.assertRaises(ValueError, ipaddr.IPNetwork, _cb('\x00' * 3))
self.assertRaises(ValueError, ipaddr.IPNetwork, _cb('\x00' * 5))
self.assertEqual(self.ipv6.ip,
ip(_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01')).ip)
self.assertEqual(ip('ffff:2:3:4:ffff::'),
ip(_cb('\xff\xff\x00\x02\x00\x03\x00\x04' +
'\xff\xff' + '\x00' * 6)))
self.assertEqual(ip('::'),
ip(_cb('\x00' * 16)))
self.assertRaises(ValueError, ip, _cb('\x00' * 15))
self.assertRaises(ValueError, ip, _cb('\x00' * 17))
def testGetIp(self):
self.assertEqual(int(self.ipv4.ip), 16909060)
self.assertEqual(str(self.ipv4.ip), '1.2.3.4')
self.assertEqual(str(self.ipv4_hostmask.ip), '10.0.0.1')
self.assertEqual(int(self.ipv6.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4.netmask), 4294967040L)
self.assertEqual(str(self.ipv4.netmask), '255.255.255.0')
self.assertEqual(str(self.ipv4_hostmask.netmask), '255.0.0.0')
self.assertEqual(int(self.ipv6.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddr.IPv4Network('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.netmask), 0)
self.assertTrue(ipv4_zero_netmask._is_valid_netmask(str(0)))
ipv6_zero_netmask = ipaddr.IPv6Network('::1/0')
self.assertEqual(int(ipv6_zero_netmask.netmask), 0)
self.assertTrue(ipv6_zero_netmask._is_valid_netmask(str(0)))
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4.broadcast), 16909311L)
self.assertEqual(str(self.ipv4.broadcast), '1.2.3.255')
self.assertEqual(int(self.ipv6.broadcast),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6.broadcast),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4.prefixlen, 24)
self.assertEqual(self.ipv6.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4.supernet().network), '1.2.2.0')
self.assertEqual(ipaddr.IPv4Network('0.0.0.0/0').supernet(),
ipaddr.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6.supernet().network),
'2001:658:22a:cafe::')
self.assertEqual(ipaddr.IPv6Network('::0/0').supernet(),
ipaddr.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4.supernet(3).network), '1.2.0.0')
self.assertEqual(self.ipv6.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6.supernet(3).network),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv4.supernet, new_prefix=25)
self.assertEqual(self.ipv4.supernet(prefixlen_diff=2),
self.ipv4.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv6.supernet, new_prefix=65)
self.assertEqual(self.ipv6.supernet(prefixlen_diff=2),
self.ipv6.supernet(new_prefix=62))
def testIterSubnets(self):
self.assertEqual(self.ipv4.subnet(), list(self.ipv4.iter_subnets()))
self.assertEqual(self.ipv6.subnet(), list(self.ipv6.iter_subnets()))
def testIterHosts(self):
self.assertEqual([ipaddr.IPv4Address('2.0.0.0'),
ipaddr.IPv4Address('2.0.0.1')],
list(ipaddr.IPNetwork('2.0.0.0/31').iterhosts()))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4.subnet(prefixlen_diff=3)),
sorted(self.ipv4.subnet(new_prefix=27)))
self.assertRaises(ValueError, self.ipv4.subnet, new_prefix=23)
self.assertRaises(ValueError, self.ipv4.subnet,
prefixlen_diff=3, new_prefix=27)
self.assertEqual(sorted(self.ipv6.subnet(prefixlen_diff=4)),
sorted(self.ipv6.subnet(new_prefix=68)))
self.assertRaises(ValueError, self.ipv6.subnet, new_prefix=63)
self.assertRaises(ValueError, self.ipv6.subnet,
prefixlen_diff=4, new_prefix=68)
def testGetSubnet(self):
self.assertEqual(self.ipv4.subnet()[0].prefixlen, 25)
self.assertEqual(str(self.ipv4.subnet()[0].network), '1.2.3.0')
self.assertEqual(str(self.ipv4.subnet()[1].network), '1.2.3.128')
self.assertEqual(self.ipv6.subnet()[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddr.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddr.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4.subnet(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6.subnet(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.subnet, 9)
self.assertRaises(ValueError, self.ipv6.subnet, 65)
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.supernet, 25)
self.assertRaises(ValueError, self.ipv6.supernet, 65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ValueError, self.ipv4.subnet, -1)
self.assertRaises(ValueError, self.ipv6.subnet, -1)
def testGetNumHosts(self):
self.assertEqual(self.ipv4.numhosts, 256)
self.assertEqual(self.ipv4.subnet()[0].numhosts, 128)
self.assertEqual(self.ipv4.supernet().numhosts, 512)
self.assertEqual(self.ipv6.numhosts, 18446744073709551616)
self.assertEqual(self.ipv6.subnet()[0].numhosts, 9223372036854775808)
self.assertEqual(self.ipv6.supernet().numhosts, 36893488147419103232)
def testContains(self):
self.assertTrue(ipaddr.IPv4Network('1.2.3.128/25') in self.ipv4)
self.assertFalse(ipaddr.IPv4Network('1.2.4.1/24') in self.ipv4)
self.assertTrue(self.ipv4 in self.ipv4)
self.assertTrue(self.ipv6 in self.ipv6)
# We can test addresses and string as well.
addr1 = ipaddr.IPv4Address('1.2.3.37')
self.assertTrue(addr1 in self.ipv4)
# issue 61, bad network comparison on like-ip'd network objects
# with identical broadcast addresses.
self.assertFalse(ipaddr.IPv4Network('1.1.0.0/16').__contains__(
ipaddr.IPv4Network('1.0.0.0/15')))
def testBadAddress(self):
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv4Network,
'poop')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '1.2.3.256')
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'poopv6')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '1.2.3.4/32/24')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv4Network, '10/8')
self.assertRaises(ipaddr.AddressValueError,
ipaddr.IPv6Network, '10/8')
def testBadNetMask(self):
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/33')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.2.3.4/254.254.255.256')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv4Network, '1.1.1.1/240.255.0.0')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv6Network, '::1/')
self.assertRaises(ipaddr.NetmaskValueError,
ipaddr.IPv6Network, '::1/129')
def testNth(self):
self.assertEqual(str(self.ipv4[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4.__getitem__, 256)
self.assertEqual(str(self.ipv6[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddr.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEqual(self):
self.assertTrue(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/24'))
self.assertFalse(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv4 == ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertFalse(self.ipv4 == '')
self.assertFalse(self.ipv4 == [])
self.assertFalse(self.ipv4 == 2)
self.assertTrue(ipaddr.IPNetwork('1.1.1.1/32') ==
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('1.1.1.1/24') ==
ipaddr.IPAddress('1.1.1.1'))
self.assertFalse(ipaddr.IPNetwork('1.1.1.0/24') ==
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertTrue(ipaddr.IPNetwork('::1/128') ==
ipaddr.IPAddress('::1'))
self.assertTrue(ipaddr.IPNetwork('::1/127') ==
ipaddr.IPAddress('::1'))
self.assertFalse(ipaddr.IPNetwork('::0/127') ==
ipaddr.IPAddress('::1'))
self.assertFalse(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv6 == '')
self.assertFalse(self.ipv6 == [])
self.assertFalse(self.ipv6 == 2)
def testNotEqual(self):
self.assertFalse(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/24'))
self.assertTrue(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv4 != ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertTrue(self.ipv4 != '')
self.assertTrue(self.ipv4 != [])
self.assertTrue(self.ipv4 != 2)
addr2 = ipaddr.IPAddress('2001:658:22a:cafe:200::1')
self.assertFalse(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv6 != '')
self.assertTrue(self.ipv6 != [])
self.assertTrue(self.ipv6 != 2)
def testSlash32Constructor(self):
self.assertEqual(str(ipaddr.IPv4Network('1.2.3.4/255.255.255.255')),
'1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEqual(str(ipaddr.IPv6Network('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEqual(str(ipaddr.IPv4Network('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Address('1.1.1.4')
ip6 = ipaddr.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/30'),
ipaddr.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Network('1.1.1.4/30')
ip6 = ipaddr.IPv4Network('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip5, ip1, ip2, ip3, ip4, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/29')])
# test only IP networks
ip1 = ipaddr.IPv4Network('1.1.0.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.0/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
ip4 = ipaddr.IPv4Network('1.1.3.0/24')
ip5 = ipaddr.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call [].sort
ip6 = ipaddr.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/22'),
ipaddr.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddr.collapse_address_list([ip1, ip2])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddr.IPv4Network('1.1.1.1/32')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddr.IPv4Address('1.1.1.1')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ipaddr.IPNetwork('1.1.1.1/32')])
ip1 = ipaddr.IPv6Network('::2001:1/100')
ip2 = ipaddr.IPv6Network('::2002:1/120')
ip3 = ipaddr.IPv6Network('::2001:1/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3])
self.assertEqual(collapsed, [ip3])
# the toejam test
ip1 = ipaddr.IPAddress('1.1.1.1')
ip2 = ipaddr.IPAddress('::1')
self.assertRaises(TypeError, ipaddr.collapse_address_list,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddr.IPAddress
#ipnet = ipaddr.IPNetwork
summarize = ipaddr.summarize_address_range
ip1 = ipaddr.IPAddress('1.1.1.0')
ip2 = ipaddr.IPAddress('1.1.1.255')
# test a /24 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('1.1.1.8')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1.1.1.0/29'),
ipaddr.IPNetwork('1.1.1.8')])
ip1 = ipaddr.IPAddress('1::')
ip2 = ipaddr.IPAddress('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('2::')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1::/16'),
ipaddr.IPNetwork('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, summarize, ipaddr.IPAddress('1.1.1.0'),
ipaddr.IPAddress('1.1.0.0'))
# test exception raised when first and last aren't IP addresses
self.assertRaises(TypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'),
ipaddr.IPNetwork('1.1.0.0'))
self.assertRaises(TypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'), ipaddr.IPNetwork('1.1.0.0'))
# test exception raised when first and last are not same version
self.assertRaises(TypeError, summarize, ipaddr.IPAddress('::'),
ipaddr.IPNetwork('1.1.0.0'))
def testAddressComparison(self):
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.2'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::1'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddr.IPv4Network('1.1.1.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.1/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddr.IPv6Network('2001::2000/96')
ip2 = ipaddr.IPv6Network('2001::2001/96')
ip3 = ipaddr.IPv6Network('2001:ffff::2000/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEqual(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEqual(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols.
# Should always raise a TypeError.
ipv6 = ipaddr.IPv6Network('::/0')
ipv4 = ipaddr.IPv4Network('0.0.0.0/0')
self.assertRaises(TypeError, ipv4.__lt__, ipv6)
self.assertRaises(TypeError, ipv4.__gt__, ipv6)
self.assertRaises(TypeError, ipv6.__lt__, ipv4)
self.assertRaises(TypeError, ipv6.__gt__, ipv4)
# Regression test for issue 19.
ip1 = ipaddr.IPNetwork('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddr.IPNetwork('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddr.IPNetwork('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# Regression test for issue 28.
ip1 = ipaddr.IPNetwork('10.10.10.0/31')
ip2 = ipaddr.IPNetwork('10.10.10.0')
ip3 = ipaddr.IPNetwork('10.10.10.2/31')
ip4 = ipaddr.IPNetwork('10.10.10.2')
sorted = [ip1, ip2, ip3, ip4]
unsorted = [ip2, ip4, ip1, ip3]
unsorted.sort()
self.assertEqual(sorted, unsorted)
unsorted = [ip4, ip1, ip3, ip2]
unsorted.sort()
self.assertEqual(sorted, unsorted)
self.assertRaises(TypeError, ip1.__lt__, ipaddr.IPAddress('10.10.10.0'))
self.assertRaises(TypeError, ip2.__lt__, ipaddr.IPAddress('10.10.10.0'))
# <=, >=
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.2'))
self.assertFalse(ipaddr.IPNetwork('1.1.1.2') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::2'))
self.assertFalse(ipaddr.IPNetwork('::2') <= ipaddr.IPNetwork('::1'))
def testStrictNetworks(self):
self.assertRaises(ValueError, ipaddr.IPNetwork, '192.168.1.1/24',
strict=True)
self.assertRaises(ValueError, ipaddr.IPNetwork, '::1/120', strict=True)
def testOverlaps(self):
other = ipaddr.IPv4Network('1.2.3.0/30')
other2 = ipaddr.IPv4Network('1.2.2.0/24')
other3 = ipaddr.IPv4Network('1.2.2.64/26')
self.assertTrue(self.ipv4.overlaps(other))
self.assertFalse(self.ipv4.overlaps(other2))
self.assertTrue(other2.overlaps(other3))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddr.IPv4Network(ipv4_string)
v4compat_ipv6 = ipaddr.IPv6Network('::%s' % ipv4_string)
self.assertEqual(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddr.IPv6Network('::ffff:%s' % ipv4_string)
self.assertNotEqual(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddr.AddressValueError, ipaddr.IPv6Network,
'2001:1.1.1.1:1.1.1.1')
# Issue 67: IPv6 with embedded IPv4 address not recognized.
def testIPv6AddressTooLarge(self):
# RFC4291 2.5.5.2
self.assertEqual(ipaddr.IPAddress('::FFFF:192.0.2.1'),
ipaddr.IPAddress('::FFFF:c000:201'))
# RFC4291 2.2 (part 3) x::d.d.d.d
self.assertEqual(ipaddr.IPAddress('FFFF::192.0.2.1'),
ipaddr.IPAddress('FFFF::c000:201'))
def testIPVersion(self):
self.assertEqual(self.ipv4.version, 4)
self.assertEqual(self.ipv6.version, 6)
def testMaxPrefixLength(self):
self.assertEqual(self.ipv4.max_prefixlen, 32)
self.assertEqual(self.ipv6.max_prefixlen, 128)
def testPacked(self):
self.assertEqual(self.ipv4.packed,
_cb('\x01\x02\x03\x04'))
self.assertEqual(ipaddr.IPv4Network('255.254.253.252').packed,
_cb('\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6.packed,
_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01'))
self.assertEqual(ipaddr.IPv6Network('ffff:2:3:4:ffff::').packed,
_cb('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ '\x00' * 6))
self.assertEqual(ipaddr.IPv6Network('::1:0:0:0:0').packed,
_cb('\x00' * 6 + '\x00\x01' + '\x00' * 8))
def testIpStrFromPrefixlen(self):
ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.assertEqual(ipv4._ip_string_from_prefix(), '255.255.255.0')
self.assertEqual(ipv4._ip_string_from_prefix(28), '255.255.255.240')
def testIpType(self):
ipv4net = ipaddr.IPNetwork('1.2.3.4')
ipv4addr = ipaddr.IPAddress('1.2.3.4')
ipv6net = ipaddr.IPNetwork('::1.2.3.4')
ipv6addr = ipaddr.IPAddress('::1.2.3.4')
self.assertEqual(ipaddr.IPv4Network, type(ipv4net))
self.assertEqual(ipaddr.IPv4Address, type(ipv4addr))
self.assertEqual(ipaddr.IPv6Network, type(ipv6net))
self.assertEqual(ipaddr.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEqual(True, ipaddr.IPNetwork('224.1.1.1/31').is_multicast)
self.assertEqual(False, ipaddr.IPNetwork('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddr.IPNetwork('192.168.1.1/17').is_private)
self.assertEqual(False, ipaddr.IPNetwork('192.169.0.0').is_private)
self.assertEqual(True, ipaddr.IPNetwork('10.255.255.255').is_private)
self.assertEqual(False, ipaddr.IPNetwork('11.0.0.0').is_private)
self.assertEqual(True, ipaddr.IPNetwork('172.31.255.255').is_private)
self.assertEqual(False, ipaddr.IPNetwork('172.32.0.0').is_private)
self.assertEqual(True,
ipaddr.IPNetwork('169.254.100.200/24').is_link_local)
self.assertEqual(False,
ipaddr.IPNetwork('169.255.100.200/24').is_link_local)
self.assertEqual(True,
ipaddr.IPNetwork('127.100.200.254/32').is_loopback)
self.assertEqual(True, ipaddr.IPNetwork('127.42.0.0/16').is_loopback)
self.assertEqual(False, ipaddr.IPNetwork('128.0.0.0').is_loopback)
# test addresses
self.assertEqual(True, ipaddr.IPAddress('224.1.1.1').is_multicast)
self.assertEqual(False, ipaddr.IPAddress('240.0.0.0').is_multicast)
self.assertEqual(True, ipaddr.IPAddress('192.168.1.1').is_private)
self.assertEqual(False, ipaddr.IPAddress('192.169.0.0').is_private)
self.assertEqual(True, ipaddr.IPAddress('10.255.255.255').is_private)
self.assertEqual(False, ipaddr.IPAddress('11.0.0.0').is_private)
self.assertEqual(True, ipaddr.IPAddress('172.31.255.255').is_private)
self.assertEqual(False, ipaddr.IPAddress('172.32.0.0').is_private)
self.assertEqual(True,
ipaddr.IPAddress('169.254.100.200').is_link_local)
self.assertEqual(False,
ipaddr.IPAddress('169.255.100.200').is_link_local)
self.assertEqual(True,
ipaddr.IPAddress('127.100.200.254').is_loopback)
self.assertEqual(True, ipaddr.IPAddress('127.42.0.0').is_loopback)
self.assertEqual(False, ipaddr.IPAddress('128.0.0.0').is_loopback)
self.assertEqual(True, ipaddr.IPNetwork('0.0.0.0').is_unspecified)
def testReservedIpv6(self):
self.assertEqual(True, ipaddr.IPNetwork('ffff::').is_multicast)
self.assertEqual(True, ipaddr.IPNetwork(2**128-1).is_multicast)
self.assertEqual(True, ipaddr.IPNetwork('ff00::').is_multicast)
self.assertEqual(False, ipaddr.IPNetwork('fdff::').is_multicast)
self.assertEqual(True, ipaddr.IPNetwork('fecf::').is_site_local)
self.assertEqual(True, ipaddr.IPNetwork(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddr.IPNetwork('fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddr.IPNetwork('ff00::').is_site_local)
self.assertEqual(True, ipaddr.IPNetwork('fc00::').is_private)
self.assertEqual(True, ipaddr.IPNetwork(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddr.IPNetwork('fbff:ffff::').is_private)
self.assertEqual(False, ipaddr.IPNetwork('fe00::').is_private)
self.assertEqual(True, ipaddr.IPNetwork('fea0::').is_link_local)
self.assertEqual(True, ipaddr.IPNetwork('febf:ffff::').is_link_local)
self.assertEqual(False, ipaddr.IPNetwork('fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddr.IPNetwork('fec0::').is_link_local)
self.assertEqual(True, ipaddr.IPNetwork('0:0::0:01').is_loopback)
self.assertEqual(False, ipaddr.IPNetwork('::1/127').is_loopback)
self.assertEqual(False, ipaddr.IPNetwork('::').is_loopback)
self.assertEqual(False, ipaddr.IPNetwork('::2').is_loopback)
self.assertEqual(True, ipaddr.IPNetwork('0::0').is_unspecified)
self.assertEqual(False, ipaddr.IPNetwork('::1').is_unspecified)
self.assertEqual(False, ipaddr.IPNetwork('::/127').is_unspecified)
# test addresses
self.assertEqual(True, ipaddr.IPAddress('ffff::').is_multicast)
self.assertEqual(True, ipaddr.IPAddress(2**128-1).is_multicast)
self.assertEqual(True, ipaddr.IPAddress('ff00::').is_multicast)
self.assertEqual(False, ipaddr.IPAddress('fdff::').is_multicast)
self.assertEqual(True, ipaddr.IPAddress('fecf::').is_site_local)
self.assertEqual(True, ipaddr.IPAddress(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEqual(False, ipaddr.IPAddress('fbf:ffff::').is_site_local)
self.assertEqual(False, ipaddr.IPAddress('ff00::').is_site_local)
self.assertEqual(True, ipaddr.IPAddress('fc00::').is_private)
self.assertEqual(True, ipaddr.IPAddress(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEqual(False, ipaddr.IPAddress('fbff:ffff::').is_private)
self.assertEqual(False, ipaddr.IPAddress('fe00::').is_private)
self.assertEqual(True, ipaddr.IPAddress('fea0::').is_link_local)
self.assertEqual(True, ipaddr.IPAddress('febf:ffff::').is_link_local)
self.assertEqual(False, ipaddr.IPAddress('fe7f:ffff::').is_link_local)
self.assertEqual(False, ipaddr.IPAddress('fec0::').is_link_local)
self.assertEqual(True, ipaddr.IPAddress('0:0::0:01').is_loopback)
self.assertEqual(True, ipaddr.IPAddress('::1').is_loopback)
self.assertEqual(False, ipaddr.IPAddress('::2').is_loopback)
self.assertEqual(True, ipaddr.IPAddress('0::0').is_unspecified)
self.assertEqual(False, ipaddr.IPAddress('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEqual(True, ipaddr.IPAddress('100::').is_reserved)
self.assertEqual(True, ipaddr.IPNetwork('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(ipaddr.IPAddress('::ffff:192.168.1.1').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
self.assertEqual(ipaddr.IPAddress('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddr.IPAddress('::ffff:c0a8:101').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork('10.1.1.0/26')
addr3 = ipaddr.IPNetwork('10.2.1.0/24')
addr4 = ipaddr.IPAddress('10.1.1.0')
self.assertEqual(addr1.address_exclude(addr2),
[ipaddr.IPNetwork('10.1.1.64/26'),
ipaddr.IPNetwork('10.1.1.128/25')])
self.assertRaises(ValueError, addr1.address_exclude, addr3)
self.assertRaises(TypeError, addr1.address_exclude, addr4)
self.assertEqual(addr1.address_exclude(addr1), [])
def testHash(self):
self.assertEqual(hash(ipaddr.IPNetwork('10.1.1.0/24')),
hash(ipaddr.IPNetwork('10.1.1.0/24')))
self.assertEqual(hash(ipaddr.IPAddress('10.1.1.0')),
hash(ipaddr.IPAddress('10.1.1.0')))
# i70
self.assertEqual(hash(ipaddr.IPAddress('1.2.3.4')),
hash(ipaddr.IPAddress(
long(ipaddr.IPAddress('1.2.3.4')._ip))))
ip1 = ipaddr.IPAddress('10.1.1.0')
ip2 = ipaddr.IPAddress('1::')
dummy = {}
dummy[self.ipv4] = None
dummy[self.ipv6] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertTrue(self.ipv4 in dummy)
self.assertTrue(ip2 in dummy)
def testCopyConstructor(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork(addr1)
addr3 = ipaddr.IPNetwork('2001:658:22a:cafe:200::1/64')
addr4 = ipaddr.IPNetwork(addr3)
addr5 = ipaddr.IPv4Address('1.1.1.1')
addr6 = ipaddr.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddr.IPv4Address(addr5))
self.assertEqual(addr6, ipaddr.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
'::1.2.3.4': '::102:304/128',
'1:2:3:4:5:ffff:1.2.3.4': '1:2:3:4:5:ffff:102:304/128',
'::7:6:5:4:3:2:1': '0:7:6:5:4:3:2:1/128',
'::7:6:5:4:3:2:0': '0:7:6:5:4:3:2:0/128',
'7:6:5:4:3:2:1::': '7:6:5:4:3:2:1:0/128',
'0:6:5:4:3:2:1::': '0:6:5:4:3:2:1:0/128',
}
for uncompressed, compressed in test_addresses.items():
self.assertEqual(compressed, str(ipaddr.IPv6Network(uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddr.IPv6Network('2001::1')
addr2 = ipaddr.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001/128',
addr1.exploded)
self.assertEqual('0000:0000:0000:0000:0000:0000:0000:0001/128',
ipaddr.IPv6Network('::1/128').exploded)
# issue 77
self.assertEqual('2001:0000:5ef5:79fd:0000:059d:a0e5:0ba1',
addr2.exploded)
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4))
self.assertEqual(42540616829182469433547762482097946625, int(self.ipv6))
def testHexRepresentation(self):
self.assertEqual(hex(0x1020304),
hex(self.ipv4))
self.assertEqual(hex(0x20010658022ACAFE0200000000000001),
hex(self.ipv6))
# backwards compatibility
def testBackwardsCompability(self):
self.assertEqual(ipaddr.CollapseAddrList(
[ipaddr.IPNetwork('1.1.0.0/24'), ipaddr.IPNetwork('1.1.1.0/24')]),
[ipaddr.IPNetwork('1.1.0.0/23')])
self.assertEqual(ipaddr.IPNetwork('::42:0/112').AddressExclude(
ipaddr.IPNetwork('::42:8000/113')),
[ipaddr.IPNetwork('::42:0/113')])
self.assertTrue(ipaddr.IPNetwork('1::/8').CompareNetworks(
ipaddr.IPNetwork('2::/9')) < 0)
self.assertEqual(ipaddr.IPNetwork('1::/16').Contains(
ipaddr.IPNetwork('2::/16')), False)
self.assertEqual(ipaddr.IPNetwork('0.0.0.0/0').Subnet(),
[ipaddr.IPNetwork('0.0.0.0/1'),
ipaddr.IPNetwork('128.0.0.0/1')])
self.assertEqual(ipaddr.IPNetwork('::/127').Subnet(),
[ipaddr.IPNetwork('::/128'),
ipaddr.IPNetwork('::1/128')])
self.assertEqual(ipaddr.IPNetwork('1.0.0.0/32').Supernet(),
ipaddr.IPNetwork('1.0.0.0/31'))
self.assertEqual(ipaddr.IPNetwork('::/121').Supernet(),
ipaddr.IPNetwork('::/120'))
self.assertEqual(ipaddr.IPNetwork('10.0.0.2').IsRFC1918(), True)
self.assertEqual(ipaddr.IPNetwork('10.0.0.0').IsMulticast(), False)
self.assertEqual(ipaddr.IPNetwork('127.255.255.255').IsLoopback(), True)
self.assertEqual(ipaddr.IPNetwork('169.255.255.255').IsLinkLocal(),
False)
def testForceVersion(self):
self.assertEqual(ipaddr.IPNetwork(1).version, 4)
self.assertEqual(ipaddr.IPNetwork(1, version=6).version, 6)
def testWithStar(self):
self.assertEqual(str(self.ipv4.with_prefixlen), "1.2.3.4/24")
self.assertEqual(str(self.ipv4.with_netmask), "1.2.3.4/255.255.255.0")
self.assertEqual(str(self.ipv4.with_hostmask), "1.2.3.4/0.0.0.255")
self.assertEqual(str(self.ipv6.with_prefixlen),
'2001:658:22a:cafe:200::1/64')
# rfc3513 sec 2.3 says that ipv6 only uses cidr notation for
# subnets
self.assertEqual(str(self.ipv6.with_netmask),
'2001:658:22a:cafe:200::1/64')
# this probably don't make much sense, but it's included for
# compatibility with ipv4
self.assertEqual(str(self.ipv6.with_hostmask),
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertFalse(self.ipv4._cache.has_key('network'))
self.assertFalse(self.ipv4._cache.has_key('broadcast'))
self.assertFalse(self.ipv4._cache.has_key('hostmask'))
# V4 - populate and test
self.assertEqual(self.ipv4.network, ipaddr.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4.broadcast, ipaddr.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4.hostmask, ipaddr.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertTrue(self.ipv4._cache.has_key('network'))
self.assertTrue(self.ipv4._cache.has_key('broadcast'))
self.assertTrue(self.ipv4._cache.has_key('hostmask'))
# V6 - make sure we're empty
self.assertFalse(self.ipv6._cache.has_key('network'))
self.assertFalse(self.ipv6._cache.has_key('broadcast'))
self.assertFalse(self.ipv6._cache.has_key('hostmask'))
# V6 - populate and test
self.assertEqual(self.ipv6.network,
ipaddr.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6.broadcast, ipaddr.IPv6Address(
'2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6.hostmask,
ipaddr.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertTrue(self.ipv6._cache.has_key('network'))
self.assertTrue(self.ipv6._cache.has_key('broadcast'))
self.assertTrue(self.ipv6._cache.has_key('hostmask'))
def testTeredo(self):
# stolen from wikipedia
server = ipaddr.IPv4Address('65.54.227.120')
client = ipaddr.IPv4Address('192.0.2.45')
teredo_addr = '2001:0000:4136:e378:8000:63bf:3fff:fdd2'
self.assertEqual((server, client),
ipaddr.IPAddress(teredo_addr).teredo)
bad_addr = '2000::4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddr.IPAddress(bad_addr).teredo)
bad_addr = '2001:0001:4136:e378:8000:63bf:3fff:fdd2'
self.assertFalse(ipaddr.IPAddress(bad_addr).teredo)
# i77
teredo_addr = ipaddr.IPv6Address('2001:0:5ef5:79fd:0:59d:a0e5:ba1')
self.assertEqual((ipaddr.IPv4Address('94.245.121.253'),
ipaddr.IPv4Address('95.26.244.94')),
teredo_addr.teredo)
def testsixtofour(self):
sixtofouraddr = ipaddr.IPAddress('2002:ac1d:2d64::1')
bad_addr = ipaddr.IPAddress('2000:ac1d:2d64::1')
self.assertEqual(ipaddr.IPv4Address('172.29.45.100'),
sixtofouraddr.sixtofour)
self.assertFalse(bad_addr.sixtofour)
if __name__ == '__main__':
unittest.main()
|
mozilla/stoneridge
|
wpr/third_party/ipaddr/ipaddr_test.py
|
Python
|
mpl-2.0
| 50,368
|
[
"FEFF"
] |
e82e46a274e313ae19036614f0f5ab4dc8c1c7a55564e043f3f1630a237b4c47
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Bunch is a subclass of dict with attribute-style access.
>>> b = Bunch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Bunch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
It is safe to import * from this module:
__all__ = ('Bunch', 'bunchify','unbunchify')
un/bunchify provide dictionary conversion; Bunches can also be
converted via Bunch.to/fromDict().
"""
__version__ = '1.0.1'
VERSION = tuple(map(int, __version__.split('.')))
__all__ = ('Bunch', 'bunchify','unbunchify',)
class Bunch(dict):
""" A dictionary that provides attribute-style access.
>>> b = Bunch()
>>> b.hello = 'world'
>>> b.hello
'world'
>>> b['hello'] += "!"
>>> b.hello
'world!'
>>> b.foo = Bunch(lol=True)
>>> b.foo.lol
True
>>> b.foo is b['foo']
True
A Bunch is a subclass of dict; it supports all the methods a dict does...
>>> b.keys()
['foo', 'hello']
Including update()...
>>> b.update({ 'ponies': 'are pretty!' }, hello=42)
>>> print repr(b)
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
As well as iteration...
>>> [ (k,b[k]) for k in b ]
[('ponies', 'are pretty!'), ('foo', Bunch(lol=True)), ('hello', 42)]
And "splats".
>>> "The {knights} who say {ni}!".format(**Bunch(knights='lolcats', ni='can haz'))
'The lolcats who say can haz!'
See unbunchify/Bunch.toDict, bunchify/Bunch.fromDict for notes about conversion.
"""
def __contains__(self, k):
""" >>> b = Bunch(ponies='are pretty!')
>>> 'ponies' in b
True
>>> 'foo' in b
False
>>> b['foo'] = 42
>>> 'foo' in b
True
>>> b.hello = 'hai'
>>> 'hello' in b
True
"""
try:
return hasattr(self, k) or dict.__contains__(self, k)
except:
return False
# only called if k not found in normal places
def __getattr__(self, k):
""" Gets key if it exists, otherwise throws AttributeError.
nb. __getattr__ is only called if key is not found in normal places.
>>> b = Bunch(bar='baz', lol={})
>>> b.foo
Traceback (most recent call last):
...
AttributeError: foo
>>> b.bar
'baz'
>>> getattr(b, 'bar')
'baz'
>>> b['bar']
'baz'
>>> b.lol is b['lol']
True
>>> b.lol is getattr(b, 'lol')
True
"""
try:
# Throws exception if not in prototype chain
return object.__getattribute__(self, k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError(k)
def __setattr__(self, k, v):
""" Sets attribute k if it exists, otherwise sets key k. A KeyError
raised by set-item (only likely if you subclass Bunch) will
propagate as an AttributeError instead.
>>> b = Bunch(foo='bar', this_is='useful when subclassing')
>>> b.values #doctest: +ELLIPSIS
<built-in method values of Bunch object at 0x...>
>>> b.values = 'uh oh'
>>> b.values
'uh oh'
>>> b['values']
Traceback (most recent call last):
...
KeyError: 'values'
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
self[k] = v
except:
raise AttributeError(k)
else:
object.__setattr__(self, k, v)
def __delattr__(self, k):
""" Deletes attribute k if it exists, otherwise deletes key k. A KeyError
raised by deleting the key--such as when the key is missing--will
propagate as an AttributeError instead.
>>> b = Bunch(lol=42)
>>> del b.values
Traceback (most recent call last):
...
AttributeError: 'Bunch' object attribute 'values' is read-only
>>> del b.lol
>>> b.lol
Traceback (most recent call last):
...
AttributeError: lol
"""
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
del self[k]
except KeyError:
raise AttributeError(k)
else:
object.__delattr__(self, k)
def toDict(self):
""" Recursively converts a bunch back into a dictionary.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> b.toDict()
{'ponies': 'are pretty!', 'foo': {'lol': True}, 'hello': 42}
See unbunchify for more info.
"""
return unbunchify(self)
def __repr__(self):
""" Invertible* string-form of a Bunch.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> print repr(b)
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> eval(repr(b))
Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
(*) Invertible so long as collection contents are each repr-invertible.
"""
keys = self.keys()
keys.sort()
args = ', '.join(['%s=%r' % (key, self[key]) for key in keys])
return '%s(%s)' % (self.__class__.__name__, args)
@staticmethod
def fromDict(d):
""" Recursively transforms a dictionary into a Bunch via copy.
>>> b = Bunch.fromDict({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
See bunchify for more info.
"""
return bunchify(d)
# While we could convert abstract types like Mapping or Iterable, I think
# bunchify is more likely to "do what you mean" if it is conservative about
# casting (ex: isinstance(str,Iterable) == True ).
#
# Should you disagree, it is not difficult to duplicate this function with
# more aggressive coercion to suit your own purposes.
def bunchify(x):
""" Recursively transforms a dictionary into a Bunch via copy.
>>> b = bunchify({'urmom': {'sez': {'what': 'what'}}})
>>> b.urmom.sez.what
'what'
bunchify can handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = bunchify({ 'lol': ('cats', {'hah':'i win again'}),
... 'hello': [{'french':'salut', 'german':'hallo'}] })
>>> b.hello[0].french
'salut'
>>> b.lol[1].hah
'i win again'
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return Bunch( (k, bunchify(v)) for k,v in x.iteritems() )
elif isinstance(x, (list, tuple)):
return type(x)( bunchify(v) for v in x )
else:
return x
def unbunchify(x):
""" Recursively converts a Bunch into a dictionary.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> unbunchify(b)
{'ponies': 'are pretty!', 'foo': {'lol': True}, 'hello': 42}
unbunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42,
... ponies=('are pretty!', Bunch(lies='are trouble!')))
>>> unbunchify(b) #doctest: +NORMALIZE_WHITESPACE
{'ponies': ('are pretty!', {'lies': 'are trouble!'}),
'foo': ['bar', {'lol': True}], 'hello': 42}
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
if isinstance(x, dict):
return dict( (k, unbunchify(v)) for k,v in x.iteritems() )
elif isinstance(x, (list, tuple)):
return type(x)( unbunchify(v) for v in x )
else:
return x
### Serialization
try:
try:
import json
except ImportError:
import simplejson as json
def toJSON(self, **options):
""" Serializes this Bunch to JSON. Accepts the same keyword options as `json.dumps()`.
>>> b = Bunch(foo=Bunch(lol=True), hello=42, ponies='are pretty!')
>>> json.dumps(b)
'{"ponies": "are pretty!", "foo": {"lol": true}, "hello": 42}'
>>> b.toJSON()
'{"ponies": "are pretty!", "foo": {"lol": true}, "hello": 42}'
"""
return json.dumps(self, **options)
Bunch.toJSON = toJSON
except ImportError:
pass
try:
# Attempt to register ourself with PyYAML as a representer
import yaml
from yaml.representer import Representer, SafeRepresenter
def from_yaml(loader, node):
""" PyYAML support for Bunches using the tag `!bunch` and `!bunch.Bunch`.
>>> import yaml
>>> yaml.load('''
... Flow style: !bunch.Bunch { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki }
... Block style: !bunch
... Clark : Evans
... Brian : Ingerson
... Oren : Ben-Kiki
... ''') #doctest: +NORMALIZE_WHITESPACE
{'Flow style': Bunch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki'),
'Block style': Bunch(Brian='Ingerson', Clark='Evans', Oren='Ben-Kiki')}
This module registers itself automatically to cover both Bunch and any
subclasses. Should you want to customize the representation of a subclass,
simply register it with PyYAML yourself.
"""
data = Bunch()
yield data
value = loader.construct_mapping(node)
data.update(value)
def to_yaml_safe(dumper, data):
""" Converts Bunch to a normal mapping node, making it appear as a
dict in the YAML output.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
"""
return dumper.represent_dict(data)
def to_yaml(dumper, data):
""" Converts Bunch to a representation node.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.dump(b, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
"""
return dumper.represent_mapping(u'!bunch.Bunch', data)
yaml.add_constructor(u'!bunch', from_yaml)
yaml.add_constructor(u'!bunch.Bunch', from_yaml)
SafeRepresenter.add_representer(Bunch, to_yaml_safe)
SafeRepresenter.add_multi_representer(Bunch, to_yaml_safe)
Representer.add_representer(Bunch, to_yaml)
Representer.add_multi_representer(Bunch, to_yaml)
# Instance methods for YAML conversion
def toYAML(self, **options):
""" Serializes this Bunch to YAML, using `yaml.safe_dump()` if
no `Dumper` is provided. See the PyYAML documentation for more info.
>>> b = Bunch(foo=['bar', Bunch(lol=True)], hello=42)
>>> import yaml
>>> yaml.safe_dump(b, default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> b.toYAML(default_flow_style=True)
'{foo: [bar, {lol: true}], hello: 42}\\n'
>>> yaml.dump(b, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
>>> b.toYAML(Dumper=yaml.Dumper, default_flow_style=True)
'!bunch.Bunch {foo: [bar, !bunch.Bunch {lol: true}], hello: 42}\\n'
"""
opts = dict(indent=4, default_flow_style=False)
opts.update(options)
if 'Dumper' not in opts:
return yaml.safe_dump(self, **opts)
else:
return yaml.dump(self, **opts)
def fromYAML(*args, **kwargs):
return bunchify( yaml.load(*args, **kwargs) )
Bunch.toYAML = Bunch.__str__ = toYAML
Bunch.fromYAML = staticmethod(fromYAML)
except ImportError:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Nexenta/s3-tests
|
virtualenv/lib/python2.7/site-packages/bunch/__init__.py
|
Python
|
mit
| 13,203
|
[
"Brian"
] |
1517e0070937d695bdb8fc3391cecf30eeabb2386dd42f91f2076429b1646388
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
from espresso import Real3D, infinity
import espresso.unittest
from espresso.interaction.SoftCosine import *
class TestSoftCosine(espresso.unittest.TestCase):
def testDefaults(self):
sc=SoftCosine()
self.assertEqual(sc.A, 1.0)
self.assertEqual(sc.cutoff, infinity)
self.assertEqual(sc.shift, 0.0)
def testEnergy(self):
sc=SoftCosine(A=2.0)
self.assertAlmostEqual(sc.computeEnergy(0.0), 4.0)
def testForce(self):
sc=SoftCosine(A=1.0, cutoff=2.0, shift=0.0)
# force in the minimum
self.assertAlmostEqual(
(sc.computeForce(0.1, 0.2, 0.3) -
Real3D(0.0, 0.0, 0.0)).sqr(), 0.87097538776667)
def testProperties(self):
sc=SoftCosine()
sc.A=2.0
sc.cutoff=1.1
sc.shift=0.0
# here we test energy computation, as testing property access
# would always work
self.assertAlmostEqual(sc.computeEnergy(0.0), 4.0)
self.assertAlmostEqual(sc.computeEnergy(1.1), 0.0)
self.assertAlmostEqual(sc.computeEnergy(2.5), 0.0)
if __name__ == "__main__":
unittest.main()
|
BackupTheBerlios/espressopp
|
src/interaction/unittest/PTestSoftCosine.py
|
Python
|
gpl-3.0
| 2,031
|
[
"ESPResSo"
] |
391e172d7227991397d4f7049ca4ad6df63b0a8399f702a70cae8c25927f0e6e
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from xml.dom import minidom
from nova.api.openstack import xmlutil
from nova import exception
from nova import test
from nova.tests import utils as tests_utils
class SelectorTest(test.NoDBTestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_repr(self):
sel = xmlutil.Selector()
self.assertEqual(repr(sel), "Selector()")
def test_empty_selector(self):
sel = xmlutil.EmptyStringSelector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
self.assertEqual(
repr(self.obj_for_test),
"{'test': {'values': [1, 2, 3], 'name': 'test', 'attrs': "
"{'baz': 3, 'foo': 1, 'bar': 2}}}")
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertIsNone(sel(self.obj_for_test))
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
self.assertEqual(repr(sel), "'Foobar'")
class TemplateElementTest(test.NoDBTestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
self.assertTrue(repr(elem))
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertIsNone(elem.subselector)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertIn('child', elem)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertIn(children[idx].tag, elem)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertIsNone(elem.text)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertIsNone(elem.text)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertIsNone(elem.text)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
# Check with a subselector
tmpl_elem = xmlutil.TemplateElement(
'test',
subselector=xmlutil.ConstantSelector('foo'))
parent = etree.Element('parent')
# Try a render with no object
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
def test_tree(self):
# Create a template element
elem = xmlutil.TemplateElement('test', attr3='attr3')
elem.text = 'test'
self.assertEqual(elem.tree(),
"<test !selector=Selector() "
"!text=Selector('test',) "
"attr3=Selector('attr3',)"
"/>")
# Create a template element
elem = xmlutil.TemplateElement('test2')
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
self.assertEqual(elem.tree(),
"<test2 !selector=Selector()>"
"<child !selector=Selector()/></test2>")
class TemplateTest(test.NoDBTestCase):
def test_tree(self):
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertTrue(tmpl.tree())
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
self.assertTrue(repr(tmpl))
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
self.assertTrue(repr(slave))
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
templ = xmlutil.Template(None)
self.assertEqual(templ.serialize(None), '')
def test_serialize_with_colon_tagname_support(self):
# Our test object to serialize
obj = {'extra_specs': {'foo:bar': '999'}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
'</extra_specs>'))
# Set up our master template
root = xmlutil.TemplateElement('extra_specs', selector='extra_specs',
colon_ns=True)
value = xmlutil.SubTemplateElement(root, 'foo:bar', selector='foo:bar',
colon_ns=True)
value.text = xmlutil.Selector()
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test__serialize_with_empty_datum_selector(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'image': ''
},
}
root = xmlutil.TemplateElement('test', selector='test',
name='name')
master = xmlutil.MasterTemplate(root, 1)
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
slave = xmlutil.SlaveTemplate(root_slave, 1)
master.attach(slave)
siblings = master._siblings()
result = master._serialize(None, obj, siblings)
self.assertEqual(result.tag, 'test')
self.assertEqual(result[0].tag, 'image')
self.assertEqual(result[0].get('id'), str(obj['test']['image']))
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.NoDBTestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(MasterTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertIsNotNone(MasterTemplateBuilder._tmpl)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertIsNone(SlaveTemplateBuilder._tmpl)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertIsNotNone(SlaveTemplateBuilder._tmpl)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.NoDBTestCase):
def test_validate_schema(self):
xml = '''<?xml version='1.0' encoding='UTF-8'?>
<metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
<meta key="key6">value6</meta><meta key="key4">value4</meta>
</metadata>
'''
xmlutil.validate_schema(xml, 'metadata')
# No way to test the return value of validate_schema.
# It just raises an exception when something is wrong.
self.assertTrue(True)
def test_make_links(self):
elem = xmlutil.TemplateElement('image', selector='image')
self.assertTrue(repr(xmlutil.make_links(elem, 'links')))
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<ns0:wrapper xmlns:ns0="ns"><ns0:a>foo</ns0:a><ns0:b>bar</ns0:b>'
"</ns0:wrapper>")
root = xmlutil.make_flat_dict('wrapper', ns='ns')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
def test_make_flat_dict_with_colon_tagname_support(self):
# Our test object to serialize
obj = {'extra_specs': {'foo:bar': '999'}}
expected_xml = (("<?xml version='1.0' encoding='UTF-8'?>\n"
'<extra_specs><foo:bar xmlns:foo="foo">999</foo:bar>'
'</extra_specs>'))
# Set up our master template
root = xmlutil.make_flat_dict('extra_specs', colon_ns=True)
master = xmlutil.MasterTemplate(root, 1)
result = master.serialize(obj)
self.assertEqual(expected_xml, result)
def test_safe_parse_xml(self):
normal_body = ('<?xml version="1.0" ?>'
'<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
dom = xmlutil.safe_minidom_parse_string(normal_body)
# Some versions of minidom inject extra newlines so we ignore them
result = str(dom.toxml()).replace('\n', '')
self.assertEqual(normal_body, result)
self.assertRaises(exception.MalformedRequestBody,
xmlutil.safe_minidom_parse_string,
tests_utils.killer_xml_body())
class SafeParserTestCase(test.NoDBTestCase):
def test_external_dtd(self):
xml_string = ("""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head/>
<body>html with dtd</body>
</html>""")
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_external_file(self):
xml_string = """<!DOCTYPE external [
<!ENTITY ee SYSTEM "file:///PATH/TO/root.xml">
]>
<root>ⅇ</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
def test_notation(self):
xml_string = """<?xml version="1.0" standalone="no"?>
<!-- comment data -->
<!DOCTYPE x [
<!NOTATION notation SYSTEM "notation.jpeg">
]>
<root attr1="value1">
</root>"""
parser = xmlutil.ProtectedExpatParser(forbid_dtd=False,
forbid_entities=True)
self.assertRaises(ValueError,
minidom.parseString,
xml_string, parser)
|
sacharya/nova
|
nova/tests/api/openstack/test_xmlutil.py
|
Python
|
apache-2.0
| 33,261
|
[
"VisIt"
] |
7483912ce0ea6341c76809c30e8ffdfd6866837583b889d28946356ad97645da
|
from bsPlugins import *
from bein import execution
from bbcflib.track import track, convert
from bbcflib.mapseq import bam_to_density
from bbcflib.gfminer.stream import merge_scores
import os, sys, pysam
__requires__ = ["pysam"]
output_opts=["sql", "bedGraph", "bigWig"]
meta = {'version': "1.0.0",
'author': "BBCF",
'contact': "webmaster-bbcf@epfl.ch"}
in_parameters = [{'id': 'sample', 'type': 'bam', 'required': True, 'multiple': True, 'label': ' Test BAMs: ', 'help_text': 'Select main bam file(s)'},
{'id': 'control', 'type': 'bam', 'label': 'Control BAM: ', 'help_text': 'Select control bam file to compute enrichment' },
{'id': 'output', 'type': 'listing', 'label': 'Output format: ', 'help_text': 'Format of the output file', 'options': output_opts, 'prompt_text': None},
{'id': 'normalization', 'type': 'int', 'label': 'Normalization: ', 'help_text': 'Normalization factor, default is total number of reads'},
{'id': 'merge_strands', 'type': 'int', 'label': 'Shift and merge strands: ', 'help_text': 'Shift value (in bp) if you want to merge strand-specific densities (will not merge if negative)', 'value': -1},
{'id': 'read_extension', 'type': 'int', 'label': 'Read extension: ','help_text': 'Read extension (in bp) to be applied when constructing densities (will use read length if negative)', 'value': -1 },
{'id': 'no_nh_flag', 'type':'boolean', 'required':True, 'label': 'Do not use NH flag: ', 'help_text': 'Do not use NH (multiple mapping counts) as weights', 'value': False},
{'id': 'single_end', 'type':'boolean', 'required':True, 'label': 'As single end: ', 'help_text': 'Considered a paired-end bam as single-end (default: False, namely whole-fragment densities instead of read densities)', 'value': False},
{'id': 'stranded', 'type':'boolean', 'required':True, 'label': 'As strand-specific: ', 'help_text': 'If the sequencing protocol was paired-end strand-specific, generate plus and minus densities (default: False)', 'value': False}
]
out_parameters = [{'id': 'density_merged', 'type': 'track'},
{'id': 'density_fwd', 'type': 'track'},
{'id': 'density_rev', 'type': 'track'},
{'id': 'density_plus_merged', 'type': 'track'},
{'id': 'density_plus_fwd', 'type': 'track'},
{'id': 'density_plus_rev', 'type': 'track'},
{'id': 'density_minus_merged', 'type': 'track'},
{'id': 'density_minus_fwd', 'type': 'track'},
{'id': 'density_minus_rev', 'type': 'track'}]
class Bam2DensityForm(BaseForm):
class BamMulti(twb.BsMultiple):
label='Test BAMs: '
sample = twb.BsFileField(label=' ',
help_text='Select main bam file(s)',
validator=twb.BsFileFieldValidator(required=True))
control = twb.BsFileField(label='Control BAM: ',
help_text='Select control bam file to compute enrichment',
validator=twb.BsFileFieldValidator(required=False))
format = twf.SingleSelectField(label='Output format: ',
options=["sql", "bedGraph", "bigWig"],
prompt_text=None,
help_text='Format of the output file')
normalization = twf.TextField(label='Normalization: ',
validator=twc.IntValidator(),
help_text='Normalization factor, default is total number of reads')
merge_strands = twf.TextField(label='Shift and merge strands: ',
validator=twc.IntValidator(),
value=-1,
help_text='Shift value (in bp) if you want to merge strand-specific densities (will not merge if negative)')
read_extension = twf.TextField(label='Read extension: ',
validator=twc.IntValidator(),
value=-1,
help_text='Read extension (in bp) to be applied when constructing densities (will use read length if negative)')
single_end = twf.CheckBox(label='As single end: ',
value=False,
help_text='Considered a paired-end bam as single-end (default: False, namely whole-fragment densities instead of read densities)')
no_nh_flag = twf.CheckBox(label='Do not use NH flag: ',
value=False,
help_text='Do not use NH (multiple mapping counts) as weights')
stranded = twf.CheckBox(label='As strand-specific: ',
value=False,
help_text='If the sequencing protocol was paired-end strand-specific, generate plus and minus densities (default: False)')
submit = twf.SubmitButton(id="submit", value='bam2density')
class Bam2DensityPlugin(BasePlugin):
"""From a BAM file, creates a track file of the read count/density along the whole genome,
in the chosen format.
Read counts are divided by 10^-7 times the normalization factor (which is total number of reads by default).
Positive and negative strand densities are generated and optionally merged (averaged) if a
shift value >=0 is given. The read extension is the number of basepairs a read will cover,
starting from its most 5' position (e.g. with a read extension of 1, only the starting position of
each alignment will be considered, default is read length).
"""
info = {
'title': 'Genome-wide reads density from BAM',
'description': __doc__,
'path': ['Files', 'Bam2density'],
# 'output': Bam2DensityForm,
'in': in_parameters,
'out': out_parameters,
'meta': meta,
}
def __call__(self, **kw):
b2wargs = []
control = None
#samples = kw.get('BamMulti',{}).get('sample', [])
samples = kw.get('sample', [])
if not isinstance(samples, list): samples = [samples]
samples = {'_': [os.path.abspath(s) for s in samples if os.path.exists(s)]}
if kw.get('control'):
control = kw['control']
b2wargs = ["-c", str(control)]
assert os.path.exists(str(control)), "Control file not found: '%s'." % control
control = os.path.abspath(control)
try:
nreads = int(kw.get('normalization'))
except (ValueError, TypeError):
nreads = -1
bamfiles = [track(s, format='bam') for s in samples['_']]
if nreads < 0:
_nreads = [0]*len(samples['_'])
if control is not None:
b2wargs += ["-r"]
else:
_nreads = [nreads for s in samples['_']]
try:
merge_strands = int(kw.get('merge_strands'))
except (ValueError, TypeError):
merge_strands = -1
try:
read_extension = int(kw.get('read_extension'))
except (ValueError, TypeError):
read_extension = -1
single_end = kw.get('single_end',False)
if isinstance(single_end, basestring):
single_end = (single_end.lower() in ['1', 'true', 't','on'])
no_nh = kw.get('no_nh_flag',False)
if isinstance(no_nh, basestring):
no_nh = (no_nh.lower() in ['1', 'true', 't','on'])
if no_nh: b2wargs += ["--no_nh"]
output = {'_': [self.temporary_path(fname=b.name+'_density_') for b in bamfiles]}
stranded = kw.get('stranded',False)
if isinstance(stranded, basestring):
stranded = (stranded.lower() in ['1', 'true', 't','on'])
if stranded:
output = {'_plus_': [], '_minus_': []}
samples = {'_plus_': [], '_minus_': []}
trout = {}
for bam in bamfiles:
for orient in output:
bamname = self.temporary_path(fname= bam.name+orient+".bam")
trout[orient] = pysam.Samfile(bamname, "wb", template=bam.filehandle)
samples[orient].append(os.path.abspath(bamname))
outname = self.temporary_path(fname=bam.name+orient)
output[orient].append(os.path.abspath(outname))
bam.open()
for read in bam.filehandle:
if not (read.is_paired and read.is_proper_pair): continue
if (read.is_read1 and read.is_reverse) or (read.is_read2 and read.mate_is_reverse):
trout['_plus_'].write(read)
elif (read.is_read2 and read.is_reverse) or (read.is_read1 and read.mate_is_reverse):
trout['_minus_'].write(read)
(t.close() for t in trout.values())
format = kw.get('output', 'sql')
info = {'datatype': 'quantitative', 'read_extension': read_extension}
if merge_strands >= 0:
suffixes = ["merged"]
info['shift'] = merge_strands
else:
suffixes = ["fwd", "rev"]
chrmeta = bamfiles[0].chrmeta
files = {}
with execution(None) as ex:
files = dict((o,[bam_to_density( ex, s, output[o][n], nreads=_nreads[n],
merge=merge_strands,
read_extension=read_extension,
sql=True, se=single_end, args=b2wargs )
for n,s in enumerate(sample)])
for o,sample in samples.items())
for suf in suffixes:
all_s_files = dict((o,[x for y in f for x in y if x.endswith(suf+".sql")])
for o,f in files.items())
for orient, sfiles in all_s_files.iteritems():
if len(sfiles) > 1:
x = self.temporary_path(fname="Density_average"+orient+suf+".sql")
tsql = track( x, fields=['start', 'end', 'score'], chrmeta=chrmeta, info=info )
insql = []
for f in sfiles:
t = track( f, format='sql', fields=['start', 'end', 'score'],
chrmeta=chrmeta, info=info )
t.save()
insql.append(t)
for c in tsql.chrmeta:
tsql.write(merge_scores([t.read(c) for t in insql]),chrom=c)
else:
x = sfiles[0]
tsql = track( x, format='sql', fields=['start', 'end', 'score'],
chrmeta=chrmeta, info=info )
tsql.save()
if format in [None,"sql"]:
outname = x
else:
outname = os.path.splitext(x)[0]+"."+format
convert(x, outname, mode="overwrite")
self.new_file(outname, 'density'+orient+suf)
return self.display_time()
|
bbcf/bsPlugins
|
bsPlugins/Bam2Density.py
|
Python
|
gpl-3.0
| 11,177
|
[
"pysam"
] |
dd99dfd20f8491a26bdf05d88ee7eab3a57d2f1e89bbc44f243697e7d9818313
|
import vtk
from vtk.util.colors import *
cubeSource = vtk.vtkCubeSource()
cubeSource.SetBounds(-5, 5, -5, 5, -5, 5)
ee = vtk.vtkExtractEdges()
ee.SetInput(cubeSource.GetOutput())
cubeTube = vtk.vtkTubeFilter()
cubeTube.SetRadius(0.1)
cubeTube.SetNumberOfSides(20)
cubeTube.UseDefaultNormalOn()
cubeTube.SetDefaultNormal(.5, .5, .5)
cubeTube.SetInput(ee.GetOutput())
cubeMapper = vtk.vtkPolyDataMapper()
cubeMapper.SetInput(cubeTube.GetOutput())
cubeActor = vtk.vtkActor()
cubeActor.SetMapper(cubeMapper)
cubeActor.GetProperty().SetDiffuseColor(lamp_black)
cubeActor.GetProperty().SetSpecular(0.4)
cubeActor.GetProperty().SetSpecularPower(10)
cubeMapper2 = vtk.vtkPolyDataMapper()
cubeMapper2.SetInput(cubeSource.GetOutput())
cubeActor2 = vtk.vtkActor()
cubeActor2.SetMapper(cubeMapper2)
cubeActor2.GetProperty().SetDiffuseColor(banana)
#cubeActor2.GetProperty().SetOpacity(0.9)
sphereSource = vtk.vtkSphereSource()
sphereSource.SetPhiResolution(32)
sphereSource.SetThetaResolution(32)
sphereSource.SetRadius(0.5)
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInput(sphereSource.GetOutput())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
sphereActor.GetProperty().SetDiffuseColor(tomato)
planeSize = 10
planeSources = [vtk.vtkPlaneSource() for dummy in range(3)]
for planeSource in planeSources:
planeSource.SetOrigin(planeSize,planeSize,0)
# orthogonal to z axis
planeSources[0].SetPoint1(-planeSize, planeSize, 0)
planeSources[0].SetPoint2(planeSize, -planeSize, 0)
# orthogonal to y axis
planeSources[1].SetPoint1(-planeSize,planeSize,0)
planeSources[1].SetPoint2(planeSize,planeSize,planeSize*2)
# orthogonal to x axis
planeSources[2].SetPoint1(planeSize, planeSize, planeSize*2)
planeSources[2].SetPoint2(planeSize, -planeSize, 0)
planeMappers = [vtk.vtkPolyDataMapper() for dummy in planeSources]
for planeMapper,planeSource in zip(planeMappers, planeSources):
planeMapper.SetInput(planeSource.GetOutput())
aRenderer = vtk.vtkRenderer()
aRenderer.SetBackground(slate_grey)
aRenderer.AddActor(cubeActor)
aRenderer.AddActor(cubeActor2)
aRenderer.AddActor(sphereActor)
planeActors = [vtk.vtkActor() for dummy in planeMappers]
for planeActor,planeMapper in zip(planeActors,planeMappers):
planeActor.SetMapper(planeMapper)
#planeActor.GetProperty().SetDiffuseColor(banana)
planeActor.GetProperty().SetOpacity(.2)
planeActor.GetProperty().SetAmbientColor(0,1,0)
planeActor.GetProperty().SetAmbient(1)
aRenderer.AddActor(planeActor)
planes = [vtk.vtkPlane() for dummy in planeSources]
cutters = [vtk.vtkCutter() for dummy in planes]
strippers = [vtk.vtkStripper() for dummy in cutters]
cutTubes = [vtk.vtkTubeFilter() for dummy in cutters]
cutMappers = [vtk.vtkPolyDataMapper() for dummy in cutters]
cutActors = [vtk.vtkActor() for dummy in cutters]
for plane,cutter,planeSource,stripper,cutTube,cutMapper,cutActor in zip(
planes,cutters,planeSources,strippers,cutTubes,cutMappers,cutActors):
cutter.SetCutFunction(plane)
cutter.SetInput(cubeSource.GetOutput())
stripper.SetInput(cutter.GetOutput())
cutTube.SetRadius(0.1)
cutTube.SetNumberOfSides(20)
cutTube.UseDefaultNormalOn()
cutTube.SetDefaultNormal(.5,.5,.5)
cutTube.SetInput(stripper.GetOutput())
cutMapper.SetInput(cutTube.GetOutput())
cutActor.SetMapper(cutMapper)
cutActor.GetProperty().SetDiffuseColor(lamp_black)
cutActor.GetProperty().SetSpecular(0.4)
cutActor.GetProperty().SetSpecularPower(10)
aRenderer.AddActor(cutActor)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(aRenderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
pw = vtk.vtkPointWidget()
pw.PlaceWidget(-planeSize*2, planeSize*2, -planeSize*2, planeSize*2, -planeSize*2, planeSize*2)
pw.SetPosition(planeSize,planeSize,0)
pw.SetInteractor(iren)
pw.AllOff()
pw.On()
def observerPW(widget, eventName):
sphereActor.SetPosition(widget.GetPosition())
for planeSource,cutter in zip(planeSources,cutters):
planeSource.SetCenter(widget.GetPosition())
cutter.GetCutFunction().SetNormal(planeSource.GetNormal())
cutter.GetCutFunction().SetOrigin(planeSource.GetOrigin())
# init
observerPW(pw, '')
# connect
pw.AddObserver('InteractionEvent', observerPW)
renWin.Render()
iren.Start()
|
zhangfangyan/devide.vtkdevide
|
Examples/Rendering/Python/pbtf.py
|
Python
|
bsd-3-clause
| 4,366
|
[
"VTK"
] |
d0a014c2ecfa38929488956cffb25d0a906427b36726ebcb9e51a00e1f46efa6
|
import h5py
import numpy as np
import pytest
import pysisyphus
from pysisyphus.calculators.OverlapCalculator import OverlapCalculator
from pysisyphus.calculators.PySCF import PySCF
from pysisyphus.helpers import geom_loader
from pysisyphus.helpers_pure import describe
from pysisyphus.init_logging import init_logging
from pysisyphus.testing import using
@pytest.mark.parametrize(
"cdds, fallback, mwfn, jmol",
[
(None, None, True, True),
("calc", None, False, True),
("render", "calc", True, False),
("render", None, False, False),
],
)
def test_cdds_fallback(cdds, fallback, mwfn, jmol, monkeypatch):
# Disable Mwfn/Jmol as requested
def mock_get_cmd(calculator):
return {
"jmol": jmol,
"mwfn": mwfn,
}[calculator]
monkeypatch.setattr(
pysisyphus.calculators.OverlapCalculator, "get_cmd", mock_get_cmd
)
calc_kwargs = {
"cdds": cdds,
}
calc = OverlapCalculator(**calc_kwargs)
assert calc.cdds == fallback
@pytest.fixture
def water():
geom = geom_loader("lib:h2o.xyz")
init_logging()
calc_kwargs = {
"xc": "pbe0",
"method": "tddft",
"basis": "sto3g",
"nstates": 2,
"root": 1,
# OverlapCalculator specific
"track": True,
"cdds": "calc",
"ovlp_type": "tden",
}
calc = PySCF(**calc_kwargs)
geom.set_calculator(calc)
return geom
@using("pyscf")
def test_mwfn_crash_fallback(water, monkeypatch):
calc = water.calculator
calc.cdds = "calc"
# Mock method to ensure the CDD calculation always crashes.
def mock_calc_cdd_cube(*args):
raise Exception("Mocked Multiwfn crash!")
monkeypatch.setattr(OverlapCalculator, "calc_cdd_cube", mock_calc_cdd_cube)
energy = water.energy
# Force recalculation
water.clear()
energy_ = water.energy
# Check that CDD calculation was disabled, after calc_cdds_crashed
assert calc.cdds == None
@pytest.mark.parametrize(
"h5_fn",
[
"cytosin_orca_overlap_data.h5",
"cytosin_trip_orca_overlap_data.h5",
],
)
def test_tden_self_overlap(h5_fn, this_dir):
with h5py.File(this_dir / h5_fn, "r") as handle:
mo_coeffs = handle["mo_coeffs"][:]
ci_coeffs = handle["ci_coeffs"][:]
calc = OverlapCalculator()
def tden_self_overlap(mo_coeffs, ci_coeffs):
ao_ovlp = calc.get_sao_from_mo_coeffs(mo_coeffs)
ci_norm = np.linalg.norm(ci_coeffs, axis=(1, 2))
mo_norm = calc.get_mo_norms(mo_coeffs, ao_ovlp)
ci_coeffs = ci_coeffs / ci_norm[:, None, None]
mo_coeffs = calc.renorm_mos(mo_coeffs, ao_ovlp)
ci_norm = np.linalg.norm(ci_coeffs, axis=(1, 2))
mo_norm = calc.get_mo_norms(mo_coeffs, ao_ovlp)
# print(f"norm(CI): {ci_norm}")
# print(f"norm(MOs): {describe(mo_norm)}")
overlaps = calc.tden_overlaps(
mo_coeffs, ci_coeffs, mo_coeffs, ci_coeffs, ao_ovlp
)
return overlaps
for i, (moc, cic) in enumerate(zip(mo_coeffs, ci_coeffs)):
ovlps = tden_self_overlap(moc, cic)
I = np.eye(ovlps.shape[0])
np.testing.assert_allclose(ovlps, I, atol=5e-5)
|
eljost/pysisyphus
|
tests/test_overlap_calculator/test_overlap_calculator.py
|
Python
|
gpl-3.0
| 3,247
|
[
"Jmol",
"PySCF"
] |
55b0ac84dce3986195ab6cfb740b0016ad9778b13738aadea2a3bc62442db689
|
# Copyright (C) 2015-2020 The Software Heritage developers
# See the AUTHORS file at the top-level directory of this distribution
# License: GNU Affero General Public License version 3, or any later version
# See top-level LICENSE file for more information
from collections import defaultdict
import functools
from functools import wraps
import os
import re
import textwrap
from typing import List
import docutils.nodes
import docutils.parsers.rst
import docutils.utils
from rest_framework.decorators import api_view
from swh.web.api.apiresponse import make_api_response
from swh.web.api.apiurls import APIUrls
from swh.web.common.utils import parse_rst
class _HTTPDomainDocVisitor(docutils.nodes.NodeVisitor):
"""
docutils visitor for walking on a parsed docutils document containing sphinx
httpdomain roles. Its purpose is to extract relevant info regarding swh
api endpoints (for instance url arguments) from their docstring written
using sphinx httpdomain; and produce the main description back into a ReST
string
"""
# httpdomain roles we want to parse (based on sphinxcontrib.httpdomain 1.6)
parameter_roles = ("param", "parameter", "arg", "argument")
request_json_object_roles = ("reqjsonobj", "reqjson", "<jsonobj", "<json")
request_json_array_roles = ("reqjsonarr", "<jsonarr")
response_json_object_roles = ("resjsonobj", "resjson", ">jsonobj", ">json")
response_json_array_roles = ("resjsonarr", ">jsonarr")
query_parameter_roles = ("queryparameter", "queryparam", "qparam", "query")
request_header_roles = ("<header", "reqheader", "requestheader")
response_header_roles = (">header", "resheader", "responseheader")
status_code_roles = ("statuscode", "status", "code")
def __init__(self, document, data):
super().__init__(document)
self.data = data
self.args_set = set()
self.params_set = set()
self.inputs_set = set()
self.returns_set = set()
self.status_codes_set = set()
self.reqheaders_set = set()
self.resheaders_set = set()
self.current_json_obj = None
self.current_field_name = None
def _default_visit(self, node: docutils.nodes.Element) -> str:
"""Simply visits a text node, drops its start and end tags, visits
the children, and concatenates their results."""
return "".join(map(self.dispatch_visit, node.children))
def visit_emphasis(self, node: docutils.nodes.emphasis) -> str:
return f"*{self._default_visit(node)}*"
def visit_strong(self, node: docutils.nodes.emphasis) -> str:
return f"**{self._default_visit(node)}**"
def visit_reference(self, node: docutils.nodes.reference) -> str:
text = self._default_visit(node)
refuri = node.attributes.get("refuri")
if refuri is not None:
return f"`{text} <{refuri}>`__"
else:
return f"`{text}`_"
def visit_target(self, node: docutils.nodes.reference) -> str:
parts = ["\n"]
parts.extend(
f".. _{name}: {node.attributes['refuri']}"
for name in node.attributes["names"]
)
return "\n".join(parts)
def visit_literal(self, node: docutils.nodes.literal) -> str:
return f"``{self._default_visit(node)}``"
def visit_field_name(self, node: docutils.nodes.field_name) -> str:
self.current_field_name = node.astext()
return ""
def visit_field_body(self, node: docutils.nodes.field_body) -> str:
text = self._default_visit(node).strip()
assert text, str(node)
field_data = self.current_field_name.split(" ")
# Parameters
if field_data[0] in self.parameter_roles:
if field_data[2] not in self.args_set:
self.data["args"].append(
{"name": field_data[2], "type": field_data[1], "doc": text}
)
self.args_set.add(field_data[2])
# Query Parameters
if field_data[0] in self.query_parameter_roles:
if field_data[2] not in self.params_set:
self.data["params"].append(
{"name": field_data[2], "type": field_data[1], "doc": text}
)
self.params_set.add(field_data[2])
# Request data type
if (
field_data[0] in self.request_json_array_roles
or field_data[0] in self.request_json_object_roles
):
# array
if field_data[0] in self.request_json_array_roles:
self.data["input_type"] = "array"
# object
else:
self.data["input_type"] = "object"
# input object field
if field_data[2] not in self.inputs_set:
self.data["inputs"].append(
{"name": field_data[2], "type": field_data[1], "doc": text}
)
self.inputs_set.add(field_data[2])
self.current_json_obj = self.data["inputs"][-1]
# Response type
if (
field_data[0] in self.response_json_array_roles
or field_data[0] in self.response_json_object_roles
):
# array
if field_data[0] in self.response_json_array_roles:
self.data["return_type"] = "array"
# object
else:
self.data["return_type"] = "object"
# returned object field
if field_data[2] not in self.returns_set:
self.data["returns"].append(
{"name": field_data[2], "type": field_data[1], "doc": text}
)
self.returns_set.add(field_data[2])
self.current_json_obj = self.data["returns"][-1]
# Status Codes
if field_data[0] in self.status_code_roles:
if field_data[1] not in self.status_codes_set:
self.data["status_codes"].append({"code": field_data[1], "doc": text})
self.status_codes_set.add(field_data[1])
# Request Headers
if field_data[0] in self.request_header_roles:
if field_data[1] not in self.reqheaders_set:
self.data["reqheaders"].append({"name": field_data[1], "doc": text})
self.reqheaders_set.add(field_data[1])
# Response Headers
if field_data[0] in self.response_header_roles:
if field_data[1] not in self.resheaders_set:
resheader = {"name": field_data[1], "doc": text}
self.data["resheaders"].append(resheader)
self.resheaders_set.add(field_data[1])
if (
resheader["name"] == "Content-Type"
and resheader["doc"] == "application/octet-stream"
):
self.data["return_type"] = "octet stream"
# Don't return anything in the description; these nodes only add text
# to other fields
return ""
# We ignore these nodes and handle their subtrees directly in
# visit_field_name and visit_field_body
visit_field = visit_field_list = _default_visit
def visit_paragraph(self, node: docutils.nodes.paragraph) -> str:
"""
Visit relevant paragraphs to parse
"""
# only parsed top level paragraphs
text = self._default_visit(node)
return "\n\n" + text
def visit_literal_block(self, node: docutils.nodes.literal_block) -> str:
"""
Visit literal blocks
"""
text = node.astext()
return f"\n\n::\n\n{textwrap.indent(text, ' ')}\n"
def visit_bullet_list(self, node: docutils.nodes.bullet_list) -> str:
parts = ["\n\n"]
for child in node.traverse():
# process list item
if isinstance(child, docutils.nodes.paragraph):
line_text = self.dispatch_visit(child)
parts.append("\t* %s\n" % textwrap.indent(line_text, "\t ").strip())
return "".join(parts)
# visit_bullet_list collects and handles this with a more global view:
visit_list_item = _default_visit
def visit_warning(self, node: docutils.nodes.warning) -> str:
text = self._default_visit(node)
return "\n\n.. warning::\n%s\n" % textwrap.indent(text, "\t")
def visit_Text(self, node: docutils.nodes.Text) -> str:
"""Leaf node"""
return str(node).replace("\n", " ") # Prettier in generated HTML
def visit_problematic(self, node: docutils.nodes.problematic) -> str:
# api urls cleanup to generate valid links afterwards
text = self._default_visit(node)
subs_made = 1
while subs_made:
(text, subs_made) = re.subn(r"(:http:.*)(\(\w+\))", r"\1", text)
subs_made = 1
while subs_made:
(text, subs_made) = re.subn(r"(:http:.*)(\[.*\])", r"\1", text)
text = re.sub(r"([^:])//", r"\1/", text)
# transform references to api endpoints doc into valid rst links
text = re.sub(":http:get:`([^,`]*)`", r"`\1 <\1doc/>`_", text)
# transform references to some elements into bold text
text = re.sub(":http:header:`(.*)`", r"**\1**", text)
text = re.sub(":func:`(.*)`", r"**\1**", text)
# extract example urls
if ":swh_web_api:" in text:
# Extract examples to their own section
examples_str = re.sub(":swh_web_api:`(.+)`.*", r"/api/1/\1", text)
self.data["examples"] += examples_str.split("\n")
return text
def visit_block_quote(self, node: docutils.nodes.block_quote) -> str:
return self._default_visit(node)
return (
f".. code-block::\n"
f"{textwrap.indent(self._default_visit(node), ' ')}\n"
)
def visit_title_reference(self, node: docutils.nodes.title_reference) -> str:
text = self._default_visit(node)
raise Exception(
f"Unexpected title reference. "
f"Possible cause: you used `{text}` instead of ``{text}``"
)
def visit_document(self, node: docutils.nodes.document) -> None:
text = self._default_visit(node)
# Strip examples; they are displayed separately
text = re.split("\n\\*\\*Examples?:\\*\\*\n", text)[0]
self.data["description"] = text.strip()
def unknown_visit(self, node) -> str:
raise NotImplementedError(
f"Unknown node type: {node.__class__.__name__}. Value: {node}"
)
def unknown_departure(self, node):
pass
def _parse_httpdomain_doc(doc, data):
doc_lines = doc.split("\n")
doc_lines_filtered = []
urls = defaultdict(list)
default_http_methods = ["HEAD", "OPTIONS"]
# httpdomain is a sphinx extension that is unknown to docutils but
# fortunately we can still parse its directives' content,
# so remove lines with httpdomain directives before executing the
# rst parser from docutils
for doc_line in doc_lines:
if ".. http" not in doc_line:
doc_lines_filtered.append(doc_line)
else:
url = doc_line[doc_line.find("/") :]
# emphasize url arguments for html rendering
url = re.sub(r"\((\w+)\)", r" **\(\1\)** ", url)
method = re.search(r"http:(\w+)::", doc_line).group(1)
urls[url].append(method.upper())
for url, methods in urls.items():
data["urls"].append({"rule": url, "methods": methods + default_http_methods})
# parse the rst docstring and do not print system messages about
# unknown httpdomain roles
document = parse_rst("\n".join(doc_lines_filtered), report_level=5)
# remove the system_message nodes from the parsed document
for node in document.traverse(docutils.nodes.system_message):
node.parent.remove(node)
# visit the document nodes to extract relevant endpoint info
visitor = _HTTPDomainDocVisitor(document, data)
document.walkabout(visitor)
class APIDocException(Exception):
"""
Custom exception to signal errors in the use of the APIDoc decorators
"""
def api_doc(
route: str, noargs: bool = False, tags: List[str] = [], api_version: str = "1",
):
"""
Decorator for an API endpoint implementation used to generate a dedicated
view displaying its HTML documentation.
The documentation will be generated from the endpoint docstring based on
sphinxcontrib-httpdomain format.
Args:
route: documentation page's route
noargs: set to True if the route has no arguments, and its
result should be displayed anytime its documentation
is requested. Default to False
tags: Further information on api endpoints. Two values are
possibly expected:
* hidden: remove the entry points from the listing
* upcoming: display the entry point but it is not followable
api_version: api version string
"""
tags_set = set(tags)
# @api_doc() Decorator call
def decorator(f):
# if the route is not hidden, add it to the index
if "hidden" not in tags_set:
doc_data = get_doc_data(f, route, noargs)
doc_desc = doc_data["description"]
APIUrls.add_doc_route(
route,
re.split(r"\.\s", doc_desc)[0],
noargs=noargs,
api_version=api_version,
tags=tags_set,
)
# create a dedicated view to display endpoint HTML doc
@api_view(["GET", "HEAD"])
@wraps(f)
def doc_view(request):
doc_data = get_doc_data(f, route, noargs)
return make_api_response(request, None, doc_data)
route_name = "%s-doc" % route[1:-1].replace("/", "-")
urlpattern = f"^{api_version}{route}doc/$"
view_name = "api-%s-%s" % (api_version, route_name)
APIUrls.add_url_pattern(urlpattern, doc_view, view_name)
@wraps(f)
def documented_view(request, **kwargs):
doc_data = get_doc_data(f, route, noargs)
try:
return {"data": f(request, **kwargs), "doc_data": doc_data}
except Exception as exc:
exc.doc_data = doc_data
raise exc
return documented_view
return decorator
@functools.lru_cache(maxsize=32)
def get_doc_data(f, route, noargs):
"""
Build documentation data for the decorated api endpoint function
"""
data = {
"description": "",
"response_data": None,
"urls": [],
"args": [],
"params": [],
"input_type": "",
"inputs": [],
"resheaders": [],
"reqheaders": [],
"return_type": "",
"returns": [],
"status_codes": [],
"examples": [],
"route": route,
"noargs": noargs,
}
if not f.__doc__:
raise APIDocException(
"apidoc: expected a docstring" " for function %s" % (f.__name__,)
)
# use raw docstring as endpoint documentation if sphinx
# httpdomain is not used
if ".. http" not in f.__doc__:
data["description"] = f.__doc__
# else parse the sphinx httpdomain docstring with docutils
# (except when building the swh-web documentation through autodoc
# sphinx extension, not needed and raise errors with sphinx >= 1.7)
elif "SWH_DOC_BUILD" not in os.environ:
_parse_httpdomain_doc(f.__doc__, data)
# process input/returned object info for nicer html display
inputs_list = ""
returns_list = ""
for inp in data["inputs"]:
# special case for array of non object type, for instance
# :<jsonarr string -: an array of string
if inp["name"] != "-":
inputs_list += "\t* **%s (%s)**: %s\n" % (
inp["name"],
inp["type"],
textwrap.indent(inp["doc"], "\t "),
)
for ret in data["returns"]:
# special case for array of non object type, for instance
# :>jsonarr string -: an array of string
if ret["name"] != "-":
returns_list += "\t* **%s (%s)**: %s\n" % (
ret["name"],
ret["type"],
textwrap.indent(ret["doc"], "\t "),
)
data["inputs_list"] = inputs_list
data["returns_list"] = returns_list
return data
DOC_COMMON_HEADERS = """
:reqheader Accept: the requested response content type,
either ``application/json`` (default) or ``application/yaml``
:resheader Content-Type: this depends on :http:header:`Accept`
header of request"""
DOC_RESHEADER_LINK = """
:resheader Link: indicates that a subsequent result page is
available and contains the url pointing to it
"""
DEFAULT_SUBSTITUTIONS = {
"common_headers": DOC_COMMON_HEADERS,
"resheader_link": DOC_RESHEADER_LINK,
}
def format_docstring(**substitutions):
def decorator(f):
f.__doc__ = f.__doc__.format(**{**DEFAULT_SUBSTITUTIONS, **substitutions})
return f
return decorator
|
SoftwareHeritage/swh-web-ui
|
swh/web/api/apidoc.py
|
Python
|
agpl-3.0
| 17,293
|
[
"VisIt"
] |
46cdce01677145b7a7f6bc1bad199cdf136502086a2044a619161ed1e1a609f6
|
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
import CogHQLoader
from toontown.toonbase import ToontownGlobals
from direct.gui import DirectGui
from toontown.toonbase import TTLocalizer
from toontown.toon import Toon
from direct.fsm import State
from toontown.coghq import BossbotHQExterior
from toontown.coghq import BossbotHQBossBattle
from toontown.coghq import BossbotOfficeExterior
from toontown.coghq import CountryClubInterior
from pandac.PandaModules import DecalEffect, TextEncoder
import random
aspectSF = 0.7227
class BossbotCogHQLoader(CogHQLoader.CogHQLoader):
notify = DirectNotifyGlobal.directNotify.newCategory('BossbotCogHQLoader')
def __init__(self, hood, parentFSMState, doneEvent):
CogHQLoader.CogHQLoader.__init__(self, hood, parentFSMState, doneEvent)
self.fsm.addState(State.State('countryClubInterior', self.enterCountryClubInterior, self.exitCountryClubInterior, ['quietZone', 'cogHQExterior']))
for stateName in ['start', 'cogHQExterior', 'quietZone']:
state = self.fsm.getStateNamed(stateName)
state.addTransition('countryClubInterior')
self.musicFile = random.choice(['phase_12/audio/bgm/Bossbot_Entry_v1.ogg', 'phase_12/audio/bgm/Bossbot_Entry_v2.ogg', 'phase_12/audio/bgm/Bossbot_Entry_v3.ogg'])
self.cogHQExteriorModelPath = 'phase_12/models/bossbotHQ/CogGolfHub'
self.factoryExteriorModelPath = 'phase_11/models/lawbotHQ/LB_DA_Lobby'
self.cogHQLobbyModelPath = 'phase_12/models/bossbotHQ/CogGolfCourtyard'
self.geom = None
def load(self, zoneId):
CogHQLoader.CogHQLoader.load(self, zoneId)
Toon.loadBossbotHQAnims()
def unloadPlaceGeom(self):
if self.geom:
self.geom.removeNode()
self.geom = None
CogHQLoader.CogHQLoader.unloadPlaceGeom(self)
def loadPlaceGeom(self, zoneId):
self.notify.info('loadPlaceGeom: %s' % zoneId)
zoneId = zoneId - zoneId % 100
self.notify.debug('zoneId = %d ToontownGlobals.BossbotHQ=%d' % (zoneId, ToontownGlobals.BossbotHQ))
if zoneId == ToontownGlobals.BossbotHQ:
self.geom = loader.loadModel(self.cogHQExteriorModelPath)
gzLinkTunnel = self.geom.find('**/LinkTunnel1')
gzLinkTunnel.setName('linktunnel_gz_17000_DNARoot')
self.makeSigns()
top = self.geom.find('**/TunnelEntrance')
origin = top.find('**/tunnel_origin')
origin.setH(-33.33)
self.geom.flattenMedium()
elif zoneId == ToontownGlobals.BossbotLobby:
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: COGHQ: Visit BossbotLobby')
self.notify.debug('cogHQLobbyModelPath = %s' % self.cogHQLobbyModelPath)
self.geom = loader.loadModel(self.cogHQLobbyModelPath)
self.geom.flattenMedium()
else:
self.notify.warning('loadPlaceGeom: unclassified zone %s' % zoneId)
CogHQLoader.CogHQLoader.loadPlaceGeom(self, zoneId)
def makeSigns(self):
def makeSign(topStr, signStr, textId):
top = self.geom.find('**/' + topStr)
sign = top.find('**/' + signStr)
locator = top.find('**/sign_origin')
signText = DirectGui.OnscreenText(text=TextEncoder.upper(TTLocalizer.GlobalStreetNames[textId][-1]), font=ToontownGlobals.getSuitFont(), scale=TTLocalizer.BCHQLsignText, fg=(0, 0, 0, 1), parent=sign)
signText.setPosHpr(locator, 0, -0.1, -0.25, 0, 0, 0)
signText.setDepthWrite(0)
makeSign('Gate_2', 'Sign_6', 10700)
makeSign('TunnelEntrance', 'Sign_2', 1000)
makeSign('Gate_3', 'Sign_3', 10600)
makeSign('Gate_4', 'Sign_4', 10500)
makeSign('GateHouse', 'Sign_5', 10200)
def unload(self):
CogHQLoader.CogHQLoader.unload(self)
Toon.unloadSellbotHQAnims()
def enterStageInterior(self, requestStatus):
self.placeClass = StageInterior.StageInterior
self.stageId = requestStatus['stageId']
self.enterPlace(requestStatus)
def exitStageInterior(self):
self.exitPlace()
self.placeClass = None
return
def getExteriorPlaceClass(self):
self.notify.debug('getExteriorPlaceClass')
return BossbotHQExterior.BossbotHQExterior
def getBossPlaceClass(self):
self.notify.debug('getBossPlaceClass')
return BossbotHQBossBattle.BossbotHQBossBattle
def enterFactoryExterior(self, requestStatus):
self.placeClass = BossbotOfficeExterior.BossbotOfficeExterior
self.enterPlace(requestStatus)
def exitFactoryExterior(self):
taskMgr.remove('titleText')
self.hood.hideTitleText()
self.exitPlace()
self.placeClass = None
return
def enterCogHQBossBattle(self, requestStatus):
self.notify.debug('BossbotCogHQLoader.enterCogHQBossBattle')
CogHQLoader.CogHQLoader.enterCogHQBossBattle(self, requestStatus)
base.cr.forbidCheesyEffects(1)
def exitCogHQBossBattle(self):
self.notify.debug('BossbotCogHQLoader.exitCogHQBossBattle')
CogHQLoader.CogHQLoader.exitCogHQBossBattle(self)
base.cr.forbidCheesyEffects(0)
def enterCountryClubInterior(self, requestStatus):
self.placeClass = CountryClubInterior.CountryClubInterior
self.notify.info('enterCountryClubInterior, requestStatus=%s' % requestStatus)
self.countryClubId = requestStatus['countryClubId']
self.enterPlace(requestStatus)
def exitCountryClubInterior(self):
self.exitPlace()
self.placeClass = None
del self.countryClubId
return
|
ToontownUprising/src
|
toontown/coghq/BossbotCogHQLoader.py
|
Python
|
mit
| 5,759
|
[
"VisIt"
] |
8c65d9d09ddc5c7601c633dbfaadbf8c784310b59f93764049aeade5f7ad43cc
|
import numpy as np
from .deriv import pderiv3D
__all__ = ['avg_eqlat', 'epflux_eddyterms', 'epflux_all', 'theta']
def avg_equivlat(in_field, pv_field, n_lon, n_lat):
"""
Average a 2-D input field along equivalent latitude bands
using global 2-D field of potential vorticity.
Equivalent latitude is defined as
.. math::
\phi_e = \mathrm{arcsin}[1- (A/(2*\pi*R^2))].
where A is the area enclosed by the equivalent latitude band
and R is the radius of the Earth.
This function uses the 'piece-wise constant' method
where PV is assumed to be constant within each grid box.
Parameters
----------
in_field: array_like
The data to be averaged along equivalent latitude bands.
It must be a 2-D array or data that can be converted to such.
pv_field: array_like
The data along the isolines of which the in_field data is to
be averaged. In atmospheric sciences this is usually the potential
vorticity. It must be a 2-D array with exactly the same dimensions
as in_field.
n_lon: int (or sequence)
Longitude values, can be an integer or sequence. If integer
assume that longitude values are divided evenly across the
globe. i.e. d_lon = 2*PI/n_lon
n_lat: int (or sequence)
Number of latitudes (or a sequence of latitudes) If integer then
assume latitudes are equally divided between 90S and 90N
FIXME!!!! Currently code only works if n_lon and n_lat are integers.
Need to generalise to take 1-D arrays
Returns
-------
latitude: list
Equivalent latitude values
infield_eq: list
Values of in_field averaged along equivalent latitude bands.
"""
# constants
PI = np.pi
# grid characteristics
n_grid = int(n_lon)*int(n_lat)
phi = PI/n_lat
phih = 0.5*PI - phi*np.arange(n_lat+1)
area_field = np.zeros([n_lon, n_lat])
for j in range(n_lat):
area_field[:, j] = 2*PI*(np.sin(phih[j]) - np.sin(phih[j+1]))/n_lon
# reorder the fields
ord_ind = np.argsort(pv_field, axis=None)[::-1]
infield_ordered = in_field.flatten()[ord_ind]
pv_ordered = pv_field.flatten()[ord_ind]
area_ordered = area_field.flatten()[ord_ind]
# areas of equivalent latitude bands for output
# sum area along latitude bands
area_band = np.sum(area_field, axis = 0)
infield_eq = np.zeros(n_lat)
ll = 0
area_now = 0.0
infield_tot = 0.0
# loop to average in equivalent latitude bands
for nn in range(n_grid):
area_now += area_ordered[nn]
infield_tot += area_ordered[nn]*infield_ordered[nn]
if (area_now >= area_band[ll] or (nn == n_grid-1)):
infield_tot -= (area_now - area_band[ll])*infield_ordered[nn]
infield_eq[ll] = infield_tot/area_band[ll]
infield_tot = (area_now - area_band[ll])*infield_ordered[nn]
area_now -= area_band[ll]
ll += 1
# in field is averaged along eq. latitude bands from 90N - 90S
# legacy from times when we were mostly interested in NH
lat = PI/2 - np.arange(n_lat)*phi
return (lat, infield_eq)
def epflux_all(U, V, W, T, longitude, latitude, press, boa=None):
"""
Calculate the Eliassen-Palm flux and related variables
Basic equations are 3.53a, 3.53b of "Middle Atmospheric Dnamics",
by Andrews, Holton and Leovy (1987).
Parameters
-----------
U, V, T on pressure levels, 3D, one file at a time
nc: boolean (optional)
If set, output data as boa object
Output
------
epflux_eddyterms: <U>, <V>, <T>, <V'T'>, <U'V'>
epflux_all:
"""
pass
def epflux_eddyterms():
"""
Return the eddy terms only
"""
pass
def epflux_boa(netcdf = False):
"""
Input boa variables U, V, T
Ouput boa variable. Save to netcdf file if netcdf is true
"""
pass
def theta():
"""
Convert values on pressure levels to theta (potential temperature)
"""
pass
|
yl238/boa
|
boa/science/dynamics.py
|
Python
|
mit
| 4,113
|
[
"NetCDF"
] |
dd6099430015e7674f58d306f62f102ec9cd15dc2146d51be1efec23b7ab7070
|
from __main__ import vtk, qt, ctk, slicer
import string
class LabelStatisticsLogic:
"""This Logic is copied from the Label Statistics Module -Steve Pieper (Isomics)"""
"""Implement the logic to calculate label statistics.
Nodes are passed in as arguments.
Results are stored as 'statistics' instance variable.
"""
def __init__(self, grayscaleNode, labelNode, fileName=None):
volumeName = grayscaleNode.GetName()
self.keys = ("Volume", "Curve Type", "Voxel Count", "Volume mm^3", "Volume cc", "Minimum Intensity", "Maximum Intensity", "Mean Intensity", "Standard Deviation")
cubicMMPerVoxel = reduce(lambda x,y: x*y, labelNode.GetSpacing())
ccPerCubicMM = 0.001
self.labelStats = {}
self.labelStats['Labels'] = []
stataccum = vtk.vtkImageAccumulate()
if vtk.VTK_MAJOR_VERSION <= 5:
stataccum.SetInput(labelNode.GetImageData())
else:
stataccum.SetInputData(labelNode.GetImageData())
stataccum.Update()
lo = int(stataccum.GetMin()[0])
hi = int(stataccum.GetMax()[0])
for i in xrange(lo,hi+1):
thresholder = vtk.vtkImageThreshold()
if vtk.VTK_MAJOR_VERSION <= 5:
thresholder.SetInput(labelNode.GetImageData())
else:
thresholder.SetInputData(labelNode.GetImageData())
thresholder.SetInValue(1)
thresholder.SetOutValue(0)
thresholder.ReplaceOutOn()
thresholder.ThresholdBetween(i,i)
thresholder.SetOutputScalarTypeToUnsignedChar()
thresholder.Update()
stencil = vtk.vtkImageToImageStencil()
if vtk.VTK_MAJOR_VERSION <= 5:
stencil.SetInput(thresholder.GetOutput())
else:
stencil.SetInputConnection(thresholder.GetOutputPort())
stencil.ThresholdBetween(1, 1)
stencil.Update()
stat1 = vtk.vtkImageAccumulate()
if vtk.VTK_MAJOR_VERSION <= 5:
stat1.SetInput(grayscaleNode.GetImageData())
stat1.SetStencil(stencil.GetOutput())
else:
stat1.SetInputConnection(grayscaleNode.GetImageDataConnection())
stat1.SetStencilData(stencil.GetOutput())
stat1.Update()
curveType = 'Curve Type'
if i == 32:
curveType = 'Washout Curve'
elif i == 306:
curveType = 'Persistent Curve'
elif i == 291:
curveType = 'Plateau Curve'
elif i == 0:
curveType = 'Unsegmented Region'
if stat1.GetVoxelCount() > 0:
# add an entry to the LabelStats list
self.labelStats["Labels"].append(i)
self.labelStats[i,"Volume"] = volumeName
self.labelStats[i,"Curve Type"] = curveType
self.labelStats[i,"Voxel Count"] = stat1.GetVoxelCount()
self.labelStats[i,"Volume mm^3"] = self.labelStats[i,"Voxel Count"] * cubicMMPerVoxel
self.labelStats[i,"Volume cc"] = self.labelStats[i,"Volume mm^3"] * ccPerCubicMM
self.labelStats[i,"Minimum Intensity"] = stat1.GetMin()[0]
self.labelStats[i,"Maximum Intensity"] = stat1.GetMax()[0]
self.labelStats[i,"Mean Intensity"] = stat1.GetMean()[0]
self.labelStats[i,"Standard Deviation"] = stat1.GetStandardDeviation()[0]
def createStatsChart(self, labelNode, valueToPlot, ignoreZero=False):
"""Make a MRML chart of the current stats
"""
layoutNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLLayoutNode')
layoutNodes.InitTraversal()
layoutNode = layoutNodes.GetNextItemAsObject()
layoutNode.SetViewArrangement(slicer.vtkMRMLLayoutNode.SlicerLayoutConventionalQuantitativeView)
chartViewNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLChartViewNode')
chartViewNodes.InitTraversal()
chartViewNode = chartViewNodes.GetNextItemAsObject()
arrayNodeLabel = slicer.mrmlScene.AddNode(slicer.vtkMRMLDoubleArrayNode())
array = arrayNodeLabel.GetArray()
samples = len(self.labelStats["Labels"])
tuples = samples
if ignoreZero and self.labelStats["Labels"].__contains__(0):
tuples -= 1
array.SetNumberOfTuples(tuples)
tuple = 0
for i in xrange(samples):
index = self.labelStats["Labels"][i]
if not (ignoreZero and index == 0):
array.SetComponent(tuple, 0, index)
array.SetComponent(tuple, 1, self.labelStats[index,valueToPlot])
array.SetComponent(tuple, 2, 0)
tuple += 1
chartNode = slicer.mrmlScene.AddNode(slicer.vtkMRMLChartNode())
chartNode.AddArray(valueToPlot, arrayNodeLabel.GetID())
chartViewNode.SetChartNodeID(chartNode.GetID())
chartNode.SetProperty('default', 'title', 'OpenCAD Label Statistics')
chartNode.SetProperty('default', 'xAxisLabel', 'OpenCAD Label')
chartNode.SetProperty('default', 'yAxisLabel', valueToPlot)
chartNode.SetProperty('default', 'type', 'Bar');
chartNode.SetProperty('default', 'xAxisType', 'categorical')
chartNode.SetProperty('default', 'showLegend', 'off')
# series level properties
if labelNode.GetDisplayNode() != None and labelNode.GetDisplayNode().GetColorNode() != None:
chartNode.SetProperty(valueToPlot, 'lookupTable', labelNode.GetDisplayNode().GetColorNodeID());
|
vnarayan13/Slicer-OpenCAD
|
SegmentCAD/LabelStatisticsLogic/LabelStatisticsLogic.py
|
Python
|
mit
| 5,235
|
[
"VTK"
] |
8d1b1020bd59f39852eccc8120dbb4860514b3262e2d2261d23ce12ab3603c78
|
#!/usr/bin/python
# Handle unicode encoding
import collections
import csv
import errno
import getpass
import itertools
import locale
import os
import platform
import threading
import time
import shlex
import socket
import sys
import tempfile
import urllib2
import re
import fileinput
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
from re import compile, escape, sub
from subprocess import Popen, call, PIPE, STDOUT
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, 'wb')
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
try:
import json
HAS_JSON = True
except Exception:
HAS_JSON = False
fsli_C_FAILED = 1
fsli_C_OK = 2
fsli_C_SKIP = 4
fsli_C_WARN = 3
CURRENT = 0
UPDATE = 1
UPGRADE = 2
BOURNE_SHELLS = ('sh', 'bash', 'zsh', 'ksh', 'dash', )
C_SHELLS = ('csh', 'tcsh', )
class Version(object):
def __init__(self, version_string):
if ':' in version_string:
version_string = version_string.split(':')[0]
v_vals = version_string.split('.')
for v in v_vals:
if not v.isdigit():
raise ValueError('Bad version string')
self.major = int(v_vals[0])
try:
self.minor = int(v_vals[1])
except IndexError:
self.minor = 0
try:
self.patch = int(v_vals[2])
except IndexError:
self.patch = 0
try:
self.hotfix = int(v_vals[3])
except IndexError:
self.hotfix = 0
def __repr__(self):
return "Version(%s,%s,%s,%s)" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __str__(self):
if self.hotfix == 0:
return "%s.%s.%s" % (self.major, self.minor, self.patch)
else:
return "%s.%s.%s.%s" % (
self.major,
self.minor,
self.patch,
self.hotfix)
def __ge__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self > other or self == other:
return True
return False
def __le__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self < other or self == other:
return True
return False
def __cmp__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__lt__(other):
return -1
if self.__gt__(other):
return 1
return 0
def __lt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major < other.major:
return True
if self.major > other.major:
return False
if self.minor < other.minor:
return True
if self.minor > other.minor:
return False
if self.patch < other.patch:
return True
if self.patch > other.patch:
return False
if self.hotfix < other.hotfix:
return True
if self.hotfix > other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __gt__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.major > other.major:
return True
if self.major < other.major:
return False
if self.minor > other.minor:
return True
if self.minor < other.minor:
return False
if self.patch > other.patch:
return True
if self.patch < other.patch:
return False
if self.hotfix > other.hotfix:
return True
if self.hotfix < other.hotfix:
return False
# major, minor and patch all match so this is not less than
return False
def __eq__(self, other):
if not isinstance(other, Version):
return NotImplemented
if (
self.major == other.major and
self.minor == other.minor and
self.patch == other.patch and
self.hotfix == other.hotfix):
return True
return False
def __ne__(self, other):
if not isinstance(other, Version):
return NotImplemented
if self.__eq__(other):
return False
return True
version = Version('3.1.0')
def memoize(f):
cache = f.cache = {}
def g(*args, **kwargs):
key = (f, tuple(args), frozenset(kwargs.items()))
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return g
class InstallError(Exception):
pass
class shell_colours(object):
default = '\033[0m'
rfg_kbg = '\033[91m'
gfg_kbg = '\033[92m'
yfg_kbg = '\033[93m'
mfg_kbg = '\033[95m'
yfg_bbg = '\033[104;93m'
bfg_kbg = '\033[34m'
bold = '\033[1m'
class MsgUser(object):
__debug = False
__quiet = False
@classmethod
def debugOn(cls):
cls.__debug = True
@classmethod
def debugOff(cls):
cls.__debug = False
@classmethod
def quietOn(cls):
cls.__quiet = True
@classmethod
def quietOff(cls):
cls.__quiet = False
@classmethod
def isquiet(cls):
return cls.__quiet
@classmethod
def isdebug(cls):
return cls.__debug
@classmethod
def debug(cls, message, newline=True):
if cls.__debug:
mess = str(message)
if newline:
mess += "\n"
sys.stderr.write(mess)
@classmethod
def message(cls, msg):
if cls.__quiet:
return
print msg
@classmethod
def question(cls, msg):
print msg,
@classmethod
def skipped(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.mfg_kbg, "[Skipped] ", shell_colours.default, msg))
@classmethod
def ok(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.gfg_kbg, "[OK] ", shell_colours.default, msg))
@classmethod
def failed(cls, msg):
print "".join(
(shell_colours.rfg_kbg, "[FAILED] ", shell_colours.default, msg))
@classmethod
def warning(cls, msg):
if cls.__quiet:
return
print "".join(
(shell_colours.bfg_kbg,
shell_colours.bold,
"[Warning]",
shell_colours.default, " ", msg))
class Progress_bar(object):
def __init__(self, x=0, y=0, mx=1, numeric=False, percentage=False):
self.x = x
self.y = y
self.width = 50
self.current = 0
self.max = mx
self.numeric = numeric
self.percentage = percentage
def update(self, reading):
if MsgUser.isquiet():
return
percent = int(round(reading * 100.0 / self.max))
cr = '\r'
if not self.numeric and not self.percentage:
bar = '#' * int(percent)
elif self.numeric:
bar = "/".join(
(str(reading),
str(self.max))) + ' - ' + str(percent) + "%\033[K"
elif self.percentage:
bar = "%s%%" % (percent)
sys.stdout.write(cr)
sys.stdout.write(bar)
sys.stdout.flush()
self.current = percent
if percent == 100:
sys.stdout.write(cr)
if not self.numeric and not self.percentage:
sys.stdout.write(" " * int(percent))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.numeric:
sys.stdout.write(" " * (len(str(self.max))*2 + 8))
sys.stdout.write(cr)
sys.stdout.flush()
elif self.percentage:
sys.stdout.write("100%")
sys.stdout.write(cr)
sys.stdout.flush()
def temp_file_name(mode='r', close=False):
'''Return a name for a temporary file - uses mkstemp to create the file and
returns a tuple (file object, file name).
Opens as read-only unless mode specifies otherwise. If close is set to True
will close the file before returning.
The file object is a fdopen file object so lacks a useable file name.'''
(tmpfile, fname) = tempfile.mkstemp()
file_obj = os.fdopen(tmpfile, mode)
if close:
file_obj.close()
return (file_obj, fname)
class RunCommandError(Exception):
pass
class Spinner(object):
spinner = itertools.cycle(('-', '\\', '|', '/', ))
busy = False
delay = 0.2
def __init__(self, delay=None, quiet=False):
if delay:
try:
self.delay = float(delay)
except ValueError:
pass
self.quiet = quiet
def spin_it(self):
while self.busy:
sys.stdout.write(self.spinner.next())
sys.stdout.flush()
time.sleep(self.delay)
sys.stdout.write('\b')
sys.stdout.flush()
def start(self):
if not self.quiet:
self.busy = True
threading.Thread(target=self.spin_it).start()
def stop(self):
self.busy = False
time.sleep(self.delay)
def run_cmd_dropstdout(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=None, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(_, error) = cmd.communicate()
except Exception:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
def run_cmd(command, as_root=False):
'''Run the command and return result.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
else:
sudo_pwd = ''
MsgUser.debug("Will call %s" % (command_line))
try:
my_spinner = Spinner(quiet=MsgUser.isquiet())
my_spinner.start()
cmd = Popen(command_line, stdin=PIPE, stdout=PIPE, stderr=PIPE)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
(output, error) = cmd.communicate()
except Exception:
raise
finally:
my_spinner.stop()
if cmd.returncode:
MsgUser.debug("An error occured (%s, %s)" % (cmd.returncode, error))
raise RunCommandError(error)
MsgUser.debug("Command completed successfully (%s)" % (output))
return output
def run_cmd_displayoutput(command, as_root=False):
'''Run the command and display output.'''
command_line = shlex.split(command)
if as_root and os.getuid() != 0:
try:
sudo_pwd = get_sudo_pwd()
except SudoPasswordError:
raise RunCommandError(
"Unable to get valid administrator's password")
command_line.insert(0, '-S')
command_line.insert(0, 'sudo')
MsgUser.debug("Will call %s" % (command_line))
cmd = Popen(
command_line,
stdin=PIPE, stdout=sys.stdout, stderr=sys.stderr)
if sudo_pwd:
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
return_code = cmd.returncode
else:
return_code = call(command_line)
if return_code:
MsgUser.debug("An error occured (%s)" % (return_code))
raise RunCommandError(return_code)
MsgUser.debug("Command completed successfully")
def check_sudo(sudo_pwd):
command_line = ['sudo', '-S', 'true']
MsgUser.debug("Checking sudo password")
cmd = Popen(
command_line,
stdin=PIPE,
stdout=DEVNULL,
stderr=DEVNULL
)
cmd.stdin.write(sudo_pwd + '\n')
cmd.stdin.flush()
cmd.communicate()
if cmd.returncode != 0:
return False
else:
return True
class SudoPasswordError(Exception):
pass
@memoize
def get_sudo_pwd():
'''Get the sudo password from the user'''
MsgUser.message("We require your password to continue...")
attempts = 0
valid = False
while attempts < 3 and not valid:
sudo_pwd = getpass.getpass('password: ')
valid = check_sudo(sudo_pwd)
if not valid:
MsgUser.failed("Incorrect password")
attempts += 1
if not valid:
raise SudoPasswordError()
return sudo_pwd
class DeletionRefused(Exception):
pass
class SafeDeleteError(Exception):
pass
def safe_delete(fs_object, as_root=False):
'''Delete file/folder, becoming root if necessary.
Run some sanity checks on object'''
banned_items = ['/', '/usr', '/usr/bin', '/usr/local', '/bin',
'/sbin', '/opt', '/Library', '/System', '/System/Library',
'/var', '/tmp', '/var/tmp', '/lib', '/lib64', '/Users',
'/home', '/Applications', '/private', '/etc', '/dev',
'/Network', '/net', '/proc']
if os.path.isdir(fs_object):
del_opts = "-rf"
else:
del_opts = '-f'
if fs_object in banned_items:
raise DeletionRefused('Will not delete %s!' % (fs_object))
command_line = " ".join(('rm', del_opts, fs_object))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise SafeDeleteError(str(e))
return result
class MoveError(Exception):
pass
def move(source, target, as_root):
try:
run_cmd_dropstdout(" ".join(('mv', source, target)), as_root)
except RunCommandError, e:
raise MoveError(str(e))
class IsDirectoryError(Exception):
pass
class CopyFileError(Exception):
pass
def copy_file(fname, destination, as_root):
'''Copy a file using sudo if necessary'''
MsgUser.debug("Copying %s to %s (as root? %s)" % (
fname, destination, as_root))
if os.path.isdir(fname):
raise IsDirectoryError('Source (%s) is a directory!' % (fname))
if os.path.isdir(destination):
# Ensure that copying into a folder we have a terminating slash
destination = destination.rstrip('/') + "/"
copy_opts = '-p'
fname = '"%s"' % fname
destination = '"%s"' % destination
command_line = " ".join(('cp', copy_opts, fname, destination))
try:
result = run_cmd(command_line, as_root)
except RunCommandError, e:
raise CopyFileError(str(e))
return result
def file_contains(fname, search_for):
'''Equivalent of grep'''
regex = compile(escape(search_for))
found = False
MsgUser.debug("In file_contains.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = True
break
f.close()
return found
def file_contains_1stline(fname, search_for):
'''Equivalent of grep - returns first occurrence'''
regex = compile(escape(search_for))
found = ''
MsgUser.debug("In file_contains_1stline.")
MsgUser.debug("Looking for %s in %s." % (search_for, fname))
f = open(fname, 'r')
for l in f:
if regex.search(l):
found = l
break
f.close()
return found
def line_string_replace(line, search_for, replace_with):
return sub(escape(search_for), escape(replace_with), line)
def line_starts_replace(line, search_for, replace_with):
if line.startswith(search_for):
return replace_with + '\n'
return line
class MoveFileError(Exception):
pass
def move_file(from_file, to_file, requires_root=False):
'''Move a file, using /bin/cp via sudo if requested.
Will work around known bugs in python.'''
if requires_root:
try:
run_cmd_dropstdout(" ".join(
("/bin/cp", from_file, to_file)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to move %s (%s)" % (from_file, str(e)))
os.remove(from_file)
else:
try:
move(from_file, to_file, requires_root)
except OSError, e:
# Handle bug in some python versions on OS X writing to NFS home
# folders, Python tries to preserve file flags but NFS can't do
# this. It fails to catch this error and ends up leaving the file
# in the original and new locations!
if e.errno == 45:
# Check if new file has been created:
if os.path.isfile(to_file):
# Check if original exists
if os.path.isfile(from_file):
# Destroy original and continue
os.remove(from_file)
else:
try:
run_cmd_dropstdout("/bin/cp %s %s" % (
from_file, to_file), as_root=False)
except RunCommandError, e:
MsgUser.debug(e)
raise MoveFileError("Failed to copy from %s (%s)" % (
from_file, str(e)))
os.remove(from_file)
else:
raise
except Exception:
raise
class EditFileError(Exception):
pass
def edit_file(fname, edit_function, search_for, replace_with, requires_root):
'''Search for a simple string in the file given and replace
it with the new text'''
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
line = edit_function(line, search_for, replace_with)
tmpfile.write(line)
src.close()
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
MsgUser.debug(e)
os.remove(tmpfname)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise EditFileError("Failed to edit %s (%s)" % (fname, str(e)))
MsgUser.debug("Modified %s (search %s; replace %s)." % (
fname, search_for, replace_with))
class AddToFileError(Exception):
pass
def add_to_file(fname, add_lines, requires_root):
'''Add lines to end of a file'''
if isinstance(add_lines, basestring):
add_lines = add_lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
src = open(fname)
for line in src:
tmpfile.write(line)
src.close()
tmpfile.write('\n')
for line in add_lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except MoveFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise AddToFileError("Failed to add to file %s (%s)" % (
fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror + tmpfname + fname)
raise AddToFileError("Failed to add to file %s" % (fname))
MsgUser.debug("Modified %s (added %s)" % (fname, '\n'.join(add_lines)))
class CreateFileError(Exception):
pass
def create_file(fname, lines, requires_root):
'''Create a new file containing lines given'''
if isinstance(lines, basestring):
lines = lines.split('\n')
try:
(tmpfile, tmpfname) = temp_file_name(mode='w')
for line in lines:
tmpfile.write(line)
tmpfile.write('\n')
tmpfile.close()
try:
move_file(tmpfname, fname, requires_root)
except CreateFileError, e:
os.remove(tmpfname)
MsgUser.debug(e)
raise CreateFileError("Failed to edit %s (%s)" % (fname, str(e)))
except IOError, e:
MsgUser.debug(e.strerror)
raise CreateFileError("Failed to create %s" % (fname))
MsgUser.debug("Created %s (added %s)" % (fname, '\n'.join(lines)))
class UnsupportedOs(Exception):
pass
class Host(object):
'''Work out which platform we are running on'''
o_s = platform.system().lower()
arch = platform.machine()
applever = ''
os_type = os.name
supported = True
if o_s == 'darwin':
vendor = 'apple'
version = Version(platform.release())
(applever, _, _) = platform.mac_ver()
glibc = ''
elif o_s == 'linux':
if hasattr(platform, 'linux_distribution'):
# We have a modern python (>2.4)
(vendor, version, _) = platform.linux_distribution(
full_distribution_name=0)
# check if vendor still empty from above call (useful for AWS linux 2 or other rare OSes)
if not vendor:
(vendor, version, _) = platform.linux_distribution(supported_dists=['system'])
else:
(vendor, version, _) = platform.dist()
vendor = vendor.lower()
version = Version(version)
glibc = platform.libc_ver()[1]
else:
supported = False
if arch == 'x86_64':
bits = '64'
elif arch == 'i686':
bits = '32'
elif arch == 'Power Macintosh':
bits = ''
def is_writeable(location):
'''Check if we can write to the location given'''
writeable = True
try:
tfile = tempfile.NamedTemporaryFile(mode='w+b', dir=location)
tfile.close()
except OSError, e:
if e.errno == errno.EACCES or e.errno == errno.EPERM:
writeable = False
else:
raise
return writeable
def is_writeable_as_root(location):
'''Check if sudo can write to a given location'''
# This requires us to use sudo
(f, fname) = temp_file_name(mode='w')
f.write("FSL")
f.close()
result = False
tmptarget = '/'.join((location, os.path.basename(fname)))
MsgUser.debug(" ".join(('/bin/cp', fname, tmptarget)))
try:
run_cmd_dropstdout(" ".join(('/bin/cp',
fname, tmptarget)), as_root=True)
result = True
os.remove(fname)
run_cmd_dropstdout(" ".join(('/bin/rm',
'-f', tmptarget)), as_root=True)
except RunCommandError, e:
MsgUser.debug(e)
os.remove(fname)
result = False
MsgUser.debug("Writeable as root? %s" % (result))
return result
class ChecksumCalcError(Exception):
pass
def sha256File(filename, bs=1048576):
'''Returns the sha256 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
fhash = hashlib.sha256()
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
except ImportError:
# No SHA256 support on python pre-2.5 so call the OS to do it.
try:
result = run_cmd(" ".join(('sha256sum', '-b', filename)))
return parsesha256sumfile(result)
except RunCommandError, e:
MsgUser.debug("SHA256 calculation error %s" % (str(e)))
raise ChecksumCalcError
def parsesha256sumfile(sha256string):
'''Returns sha256 sum extracted from the output of sha256sum or shasum -a
256 from OS X/Linux platforms'''
(sha256, _) = sha256string.split("*")
return sha256.strip()
def md5File(filename, bs=1048576):
'''Returns the MD5 sum of the given file.'''
MsgUser.message("Checking FSL package")
try:
import hashlib
fhash = hashlib.md5()
except ImportError:
import md5
fhash = md5.new()
f = open(filename, 'rb')
pb = Progress_bar(mx=os.path.getsize(filename), percentage=True)
pb.position = 0
data = f.read(bs)
while len(data) == bs:
fhash.update(data)
data = f.read(bs)
pb.position += len(data)
pb.update(pb.position)
fhash.update(data)
f.close()
return fhash.hexdigest()
def file_checksum(filename, chktype='sha256'):
if chktype == 'sha256':
return sha256File(filename)
if chktype == 'md5':
return md5File(filename)
else:
raise ChecksumCalcError('Unrecognised checksum type')
class OpenUrlError(Exception):
pass
def open_url(url, start=0, timeout=20):
socket.setdefaulttimeout(timeout)
MsgUser.debug("Attempting to download %s." % (url))
try:
req = urllib2.Request(url)
if start != 0:
req.headers['Range'] = 'bytes=%s-' % (start)
rf = urllib2.urlopen(req)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (url, e.msg))
raise OpenUrlError("Cannot find file %s on server (%s). "
"Try again later." % (url, e.msg))
except urllib2.URLError, e:
if type(e.reason) != str:
errno = e.reason.args[0]
if len(e.reason.args) > 1:
message = e.reason.args[1]
# give up on trying to identify both the errno and message
else:
message = e.reason.args
if errno == 8:
# Bad host name
MsgUser.debug("%s %s" % (url,
"Unable to find FSL download "
"server in the DNS"))
else:
# Other error
MsgUser.debug("%s %s" % (url, message))
else:
message = str(e.reason)
raise OpenUrlError(
"Cannot find %s (%s). Try again later." % (url, message))
except socket.timeout, e:
MsgUser.debug(e)
raise OpenUrlError("Failed to contact FSL web site. Try again later.")
return rf
class DownloadFileError(Exception):
pass
def download_file(url, localf, timeout=20):
'''Get a file from the url given storing it in the local file specified'''
try:
rf = open_url(url, 0, timeout)
except OpenUrlError, e:
raise DownloadFileError(str(e))
metadata = rf.info()
rf_size = int(metadata.getheaders("Content-Length")[0])
dl_size = 0
block = 16384
x = 0
y = 0
pb = Progress_bar(x, y, rf_size, numeric=True)
for attempt in range(1, 6):
# Attempt download 5 times before giving up
pause = timeout
try:
try:
lf = open(localf, 'ab')
except Exception:
raise DownloadFileError("Failed to create temporary file.")
while True:
buf = rf.read(block)
if not buf:
break
dl_size += len(buf)
lf.write(buf)
pb.update(dl_size)
lf.close()
except (IOError, socket.timeout), e:
MsgUser.debug(e.strerror)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
pause = 0
if dl_size != rf_size:
time.sleep(pause)
MsgUser.message("\nDownload failed re-trying (%s)..." % attempt)
try:
rf = open_url(url, dl_size, timeout)
except OpenUrlError, e:
MsgUser.debug(e)
else:
break
if dl_size != rf_size:
raise DownloadFileError("Failed to download file.")
def build_url_with_protocol(protocol, base, parts):
part_l = [protocol + '://' + base.strip('/')]
part_l.extend([x.strip('/') for x in parts])
return '/'.join(part_l)
def build_url(parts):
part_l = [parts[0].strip('/')]
part_l.extend([x.strip('/') for x in parts[1:]])
return '/'.join(part_l)
class SiteNotResponding(Exception):
pass
def fastest_mirror(main_mirrors, mirrors_file, timeout=20):
'''Find the fastest mirror for FSL downloads.'''
MsgUser.debug("Calculating fastest mirror")
socket.setdefaulttimeout(timeout)
# Get the mirror list from the url
fastestmirrors = {}
mirrorlist = []
for m in main_mirrors:
MsgUser.debug("Trying %s" % (m))
m_url = '/'.join((m.strip('/'), mirrors_file))
MsgUser.debug("Attempting to open %s" % (m_url))
try:
response = urllib2.urlopen(url=m_url)
except urllib2.HTTPError, e:
MsgUser.debug("%s %s" % (m_url, e.msg))
raise SiteNotResponding(e.msg)
except urllib2.URLError, e:
if isinstance(e.reason, socket.timeout):
MsgUser.debug("Time out trying %s" % (m_url))
raise SiteNotResponding(m)
else:
MsgUser.debug(str(e.reason))
raise SiteNotResponding(str(e.reason))
except socket.timeout, e:
MsgUser.debug(e)
raise SiteNotResponding(str(e))
except Exception, e:
MsgUser.debug("Unhandled exception %s" % (str(e)))
raise
else:
mirrorlist = response.read().strip().split('\n')
MsgUser.debug("Received the following "
"mirror list %s" % (mirrorlist))
continue
if len(mirrorlist) == 0:
raise ServerFailure("Cannot find FSL download servers")
# Check timings from the urls specified
if len(mirrorlist) > 1:
for mirror in mirrorlist:
MsgUser.debug("Trying %s" % (mirror))
then = time.time()
if mirror.startswith('http:'):
serverport = 80
elif mirror.startswith('https:'):
serverport = 443
else:
raise ServerFailure("Unrecognised protocol")
try:
mysock = socket.create_connection((mirror, serverport),
timeout)
pingtime = time.time() - then
mysock.close()
fastestmirrors[pingtime] = mirror
MsgUser.debug("Mirror responded in %s seconds" % (pingtime))
except socket.gaierror, e:
MsgUser.debug("%s can't be resolved" % (e))
except socket.timeout, e:
MsgUser.debug(e)
if len(fastestmirrors) == 0:
raise ServerFailure('Failed to contact any FSL download sites.')
download_url = fastestmirrors[min(fastestmirrors.keys())]
else:
download_url = mirrorlist[0]
return download_url
# Concept:
# Web app creates the following files:
# fslmirrorlist.txt - contains a list of mirror urls
# fslreleases.json - contains the available maps for oses
# mapping to a download url
# {'installer' {
# 'filename': 'fslinstaller.py',
# 'version': '3.0.0',
# 'date': '02/03/2017',
# 'checksum_type', 'sha256',
# 'checksum'},
# 'linux' : {
# 'centos' : {
# 'x86_64': {
# '6': {
# '5.0.9': {
# 'filename': 'fsl-5.0.9-centos6_64.tar.gz',
# 'version': '5.0.9',
# 'date': '01/02/2017',
# 'checksum_type', 'sha256',
# 'checksum': 'abf645662bcf4453235',
# },
# },
# },
# },
# 'rhel' : {'alias': 'centos'}},
# 'apple' : {
# 'darwin' : {
# 'x86_64': {
# '11': {
# ....
# },
# }
@memoize
def get_web_manifest(download_url, timeout=20):
'''Download the FSL manifest from download_url'''
socket.setdefaulttimeout(timeout)
MsgUser.debug("Looking for manifest at %s." % (download_url))
if HAS_JSON:
MsgUser.debug("Downloading JSON file")
return get_json(download_url + Settings.manifest_json)
else:
MsgUser.debug("Downloading CSV file")
return get_csv_dict(download_url + Settings.manifest_csv)
class GetFslDirError(Exception):
pass
@memoize
def get_fsldir(specified_dir=None, install=False):
'''Find the installed version of FSL using FSLDIR
or location of this script'''
def validate_fsldir(directory):
parent = os.path.dirname(directory)
if parent == directory:
raise GetFslDirError(
"%s appears to be the root folder" %
parent)
if not os.path.exists(parent):
raise GetFslDirError(
"%s doesn't exist" %
parent)
if not os.path.isdir(parent):
raise GetFslDirError(
"%s isn't a directory" %
parent)
if (os.path.exists(directory) and not
os.path.exists(os.path.join(
directory, 'etc', 'fslversion'
))):
raise GetFslDirError(
"%s exists and doesn't appear to be an installed FSL folder" %
directory)
if specified_dir:
if install is False:
if not check_fsl_install(specified_dir):
raise GetFslDirError(
"%s isn't an 'fsl' folder" %
specified_dir)
else:
validate_fsldir(specified_dir)
return specified_dir
try:
fsldir = os.environ['FSLDIR']
try:
validate_fsldir(fsldir)
except GetFslDirError:
# FSLDIR environment variable is incorrect!
MsgUser.warning('FSLDIR environment variable '
'does not point at FSL install, ignoring...')
MsgUser.debug('FSLDIR is set to %s - '
'this folder does not appear to exist' % (fsldir))
fsldir = None
else:
fsldir = fsldir.rstrip('/')
if MsgUser.isquiet():
return fsldir
except KeyError:
# Look to see if I'm in an FSL install
try:
my_parent = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
except NameError:
# Running in debugger - __file__ not set, assume it's cwd
my_parent = os.path.dirname(
os.path.dirname(os.getcwd()))
try:
validate_fsldir(my_parent)
fsldir = my_parent
except GetFslDirError:
fsldir = None
if not install:
MsgUser.debug("asking about %s" % (fsldir))
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'inst_loc', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.falied(str(e))
return fsldir
else:
if not MsgUser.isquiet():
valid_dir = False
while not valid_dir:
fsldir = Settings.inst_qus.ask_question(
'location', default=fsldir)
try:
validate_fsldir(fsldir)
valid_dir = True
except GetFslDirError, e:
MsgUser.failed(str(e))
MsgUser.message(
'''Hint - press Enter to select the default value '''
'''given in the square brackets.
If you are specifying a destination folder this needs to either be an existing
FSL install folder or a folder that doesn't already exist.''')
fsldir = None
else:
raise GetFslDirError(
"I can't locate FSL, try again using '-d <FSLDIR>' "
"to specify where to find the FSL install")
return fsldir
def archive_version(archive):
'''Takes the path to a FSL install file
and works out what version it is.'''
if not os.path.isfile(archive):
raise NotAFslVersion("%s is not a file" % (archive))
else:
# file is of form: fsl-V.V.V-platform.extensions
(_, vstring, _) = archive.strip().split('-', 2)
try:
return Version(vstring)
except ValueError:
raise NotAFslVersion(
"%s doesn't look like "
"a version number" % (vstring))
class NotAFslVersion(Exception):
pass
class GetInstalledVersionError(Exception):
pass
def get_installed_version(fsldir):
'''Takes path to FSLDIR and finds installed version details'''
MsgUser.debug("Looking for fsl in %s" % fsldir)
v_file = os.path.join(fsldir, 'etc', 'fslversion')
if os.path.exists(v_file):
f = open(v_file)
v_string = f.readline()
f.close()
try:
version = Version(v_string.strip())
except ValueError:
raise NotAFslVersion(
"%s not a valid "
"version string" % (v_string.strip()))
else:
MsgUser.debug(
"No version information found - "
"is this actually an FSL dir?")
raise GetInstalledVersionError(
"Cannot find the version information - "
"is this actually an FSL dir?")
MsgUser.debug("Found version %s" % (version))
return version
def which_shell():
return os.path.basename(os.getenv("SHELL"))
class SelfUpdateError(Exception):
pass
def self_update(server_url):
'''Check for and apply an update to myself'''
# See if there is a newer version available
if 'fslinstaller' in sys.argv[0]:
try:
installer = get_installer(server_url)
except GetInstallerError, e:
MsgUser.debug("Failed to get installer version %s." % (str(e)))
raise SelfUpdateError('Failed to get installer version. '
'Please try again later.')
MsgUser.debug("Server has version " + installer['version'])
if Version(installer['version']) <= version:
MsgUser.debug("Installer is up-to-date.")
return
# There is a new version available - download it
MsgUser.message("There is a newer version (%s) of the installer "
"(you have %s) updating..." % (
installer['version'], version))
(_, tmpfname) = temp_file_name(mode='w', close=True)
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), installer['filename']))
download_file(
url=file_url,
localf=tmpfname)
if (
file_checksum(tmpfname, installer['checksum_type']) !=
installer['checksum']):
raise SelfUpdateError(
"Found update to installer but download "
"was corrupt. Please try again later.")
except DownloadFileError, e:
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
MsgUser.debug("Failed to update installer %s." % (str(e)))
raise SelfUpdateError(
'Found update to installer but unable to '
'download the new version. Please try again.')
else:
downloaded = True
# Now run the new installer
# EXEC new script with the options we were given
os.chmod(tmpfname, 0755)
c_args = [sys.executable, tmpfname, ]
c_args.extend(sys.argv[1:])
MsgUser.debug(
"Calling %s %s" % (sys.executable, c_args))
os.execv(sys.executable, c_args)
else:
# We are now running the newly downloaded installer
MsgUser.ok('Installer updated to latest version %s' % (str(version)))
MsgUser.ok("Installer self update successful.")
class ServerFailure(Exception):
pass
class BadVersion(Exception):
pass
class GetInstallerError(Exception):
pass
def get_installer(server_url):
MsgUser.debug("Checking %s for "
"installer information" % (server_url))
manifest = get_web_manifest(server_url)
return manifest['installer']
@memoize
def get_releases(server_url):
'''Return a hash with all information about available
versions for this OS'''
computer = Host
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
os_definition = manifest[computer.o_s][computer.vendor]
except KeyError:
raise UnsupportedOs("%s %s not supported by this installer" % (
computer.o_s, computer.vendor
))
t_version = computer.version.major
alias_t = 'alias'
if alias_t in os_definition.keys():
if str(t_version) in os_definition[alias_t]:
os_parent = os_definition[alias_t][
str(t_version)]['parent']
t_version = os_definition[alias_t][
str(t_version)]['version']
os_definition = manifest[computer.o_s][os_parent]
if computer.arch not in os_definition.keys():
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.arch
))
os_def = os_definition[computer.arch]
while t_version > 0:
MsgUser.debug("Trying version %s" % (t_version))
if str(t_version) not in os_def.keys():
MsgUser.debug("...not found")
t_version -= 1
else:
break
if t_version == 0:
raise UnsupportedOs("%s %s not supported" % (
computer.vendor,
computer.version.major
))
elif t_version != computer.version.major:
MsgUser.warning(
"%s %s not officially supported "
"- trying to locate support for an earlier "
"version - this may not work" % (
computer.vendor, computer.version.major))
return os_definition[computer.arch][str(t_version)]
class ExtraDownloadError(Exception):
pass
@memoize
def get_extra(server_url, extra_type):
'''Return a hash with all information about available
versions of source code'''
MsgUser.debug("Getting web manifest")
manifest = get_web_manifest(server_url)
try:
extra = manifest[extra_type]
except KeyError:
raise ExtraDownloadError("Unrecognised extra %s" % (extra_type))
return extra
class ImproperlyConfigured(Exception):
pass
def list_releases(url):
releases = get_releases(url)
MsgUser.message("Available FSL versions for this OS:")
MsgUser.debug(releases)
rels = []
for v, release in releases.items():
if 'date' in release:
rdate = release['date']
else:
rdate = "Third-party package"
rels.append((v, rdate))
for v, rdate in sorted(rels, reverse=True):
MsgUser.message("%s\t(%s)" % (v, rdate))
def list_builds(url):
'''Lists all available FSL builds. '''
manifest = dict(get_web_manifest(url))
MsgUser.message("All available FSL builds:")
centos = manifest['linux']['centos']['x86_64']
macos = manifest['darwin']['apple']['x86_64']
def get_platform(s):
match = re.match(r'^fsl-(.+)-(.+).tar.gz$', s)
plat = match.group(2)
return plat
fslversions = collections.defaultdict(set)
for builds in itertools.chain(centos.values(), macos.values()):
for fslversion, info in builds.items():
fslversions[fslversion].add(get_platform(info['filename']))
for fslversion, plats in fslversions.items():
MsgUser.message('%s - %s' % (fslversion, ', '.join(plats)))
def latest_release(url):
releases = get_releases(url)
MsgUser.debug("Got version information: %s" % (releases))
versions = [Version(x) for x in releases.keys()]
MsgUser.debug("Versions: %s" % (versions))
return releases[str(sorted(versions)[-1])]
class InstallInstallerError(Exception):
pass
def install_installer(fsldir):
'''Install this script into $FSLDIR/etc'''
targetfolder = os.path.join(fsldir, 'etc')
as_root = False
installer = os.path.abspath(__file__)
MsgUser.debug(
"Copying fslinstaller (%s) to %s" % (
installer,
targetfolder))
if not is_writeable(targetfolder):
if not is_writeable_as_root(targetfolder):
raise InstallInstallerError("Cannot write to folder as root user.")
else:
as_root = True
copy_file(
installer, os.path.join(targetfolder, "fslinstaller.py"),
as_root)
class InstallQuestions(object):
def __init__(self):
self.questions = {}
self.validators = {}
self.type = {}
self.default = {}
self.defaults = False
def add_question(self, key, question, default, qtype, validation_f):
self.questions[key] = question
self.default[key] = default
self.type[key] = qtype
self.validators[key] = validation_f
def ask_question(self, key, default=None):
# Ask a question
no_answer = True
validator = self.validators[key]
def parse_answer(q_type, answer):
if q_type == 'bool':
if answer.lower() == 'yes':
return True
else:
return False
else:
return answer
if not default:
default = self.default[key]
if self.defaults:
MsgUser.debug(self.questions[key])
MsgUser.debug("Automatically using the default %s" % (default))
self.answers[key] = parse_answer(self.type[key], default)
no_answer = False
while no_answer:
MsgUser.question(
"%s? %s:" % (
self.questions[key],
'[%s]' % (default)))
your_answer = raw_input()
MsgUser.debug("Your answer was %s" % (your_answer))
if your_answer == '':
MsgUser.debug("You want the default")
your_answer = default
if validator(your_answer):
answer = parse_answer(self.type[key], your_answer)
no_answer = False
MsgUser.debug("Returning the answer %s" % (answer))
return answer
def yes_no(answer):
if answer.lower() == 'yes' or answer.lower() == 'no':
return True
else:
MsgUser.message("Please enter yes or no.")
return False
def check_install_location(folder):
'''Don't allow relative paths'''
MsgUser.debug("Checking %s is an absolute path" % (folder))
if (folder == '.' or
folder == '..' or
folder.startswith('./') or
folder.startswith('../') or
folder.startswith('~')):
MsgUser.message("Please enter an absolute path.")
return False
return True
def external_validate(what_to_check):
'''We will validate elsewhere'''
return True
def check_fsl_install(fsldir):
'''Check if this folder contains FSL install'''
MsgUser.debug("Checking %s is an FSL install" % (fsldir))
if os.path.isdir(fsldir):
if os.path.exists(
os.path.join(fsldir, 'etc', 'fslversion')
):
return True
return False
def fsl_downloadname(suffix, version):
return 'fsl-%s-%s' % (
version, suffix)
class Settings(object):
version = version
title = "--- FSL Installer - Version %s ---" % (version)
main_server = 'fsl.fmrib.ox.ac.uk'
mirrors = [build_url_with_protocol('https',
main_server, ('fsldownloads',
'')), ]
mirrors_file = 'fslmirrorlist.txt'
manifest_json = 'manifest.json'
manifest_csv = 'manifest.csv'
main_mirror = mirrors[0]
mirror = main_mirror
applications = ['bin/fslview.app', 'bin/assistant.app']
x11 = {'bad_versions': [],
'download_url': "http://xquartz.macosforge.org/landing/",
'apps': ['XQuartz.app', 'X11.app', ],
'location': "/Applications/Utilities"}
default_location = '/usr/local/fsl'
post_inst_dir = "etc/fslconf"
inst_qus = InstallQuestions()
inst_qus.add_question('version_match',
"The requested version matches the installed "
"version - do you wish to re-install FSL",
'no', 'bool', yes_no)
inst_qus.add_question('location',
"Where would you like the FSL install to be "
"(including the FSL folder name)",
default_location, 'path', check_install_location)
inst_qus.add_question('del_old',
"FSL exists in the current location, "
"would you like to keep a backup of the old "
"version (N.B. You will not be able to use the old "
"version)",
'no', 'bool', yes_no)
inst_qus.add_question('create',
"Install location doesn't exist, should I create it",
'yes', 'bool', yes_no)
inst_qus.add_question('inst_loc',
"Where is the FSL folder (e.g. /usr/local/fsl)",
default_location, 'path', check_fsl_install)
inst_qus.add_question('skipmd5',
"I was unable to download the checksum of "
"the install file so cannot confirm it is correct. "
"Would you like to install anyway",
'no', 'bool', yes_no)
inst_qus.add_question('overwrite',
"There is already a local copy of the file, would "
"you like to overwrite it",
"yes", 'bool', yes_no)
inst_qus.add_question('upgrade',
"Would you like to install upgrade",
"yes", 'bool', yes_no)
inst_qus.add_question('update',
"Would you like to install update",
"yes", 'bool', yes_no)
def get_json(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
return json.load(url)
except OpenUrlError, e:
raise ServerFailure(str(e))
# [ linux, centos, x86_64, 6, filename, 'fname',
# version, 'version', date, 'date', checksum_type, 'checksum_type',
# checksum, 'checksum', supported, 'true/false', notes, 'notes',
# instructions, 'instructions']
# [ linux, redhat, alias, centos, supported, True/false, version, 'version' ]
# [ 'installer', filename, 'fname', version, 'version', date, 'date',
# checksum_type, 'checksum_type', checksum, 'checksum', supported,
# 'true/false', notes, 'notes', instructions, 'instructions']
# [ feeds, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
# [ sources, filename, 'fname', version, 'version',
# date, 'date', checksum_type, 'checksum_type', checksum, 'checksum',
# supported, 'true/false', notes, 'notes', instructions, 'instructions']
class AutoDict(dict):
'''Automatically create a nested dict'''
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
def freeze(self):
'''Returns a dict representation of an AutoDict'''
frozen = {}
for k, v in self.items():
if type(v) == type(self):
frozen[k] = v.freeze()
else:
frozen[k] = v
return frozen
def get_csv_dict(web_url):
MsgUser.debug("Opening "+web_url)
try:
url = open_url(web_url)
manifest_reader = csv.reader(
url, delimiter=',', quoting=csv.QUOTE_MINIMAL)
a_dict = AutoDict()
for line in manifest_reader:
MsgUser.debug(line)
if line[0] == 'feeds':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'sources':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
elif line[0] == 'installer':
items = iter(line[1:])
base_dict = dict(zip(items, items))
a_dict[line[0]] = base_dict
else:
# Install package or alias
if line[2] == 'alias':
items = iter(line[4:])
base_dict = dict(zip(items, items))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])] = base_dict
else:
items = iter(line[5:])
base_dict = dict(zip(items, items))
MsgUser.debug(
",".join(
(line[0], line[1], line[2], line[3], line[4])))
a_dict[
str(line[0])][
str(line[1])][
str(line[2])][
str(line[3])][
str(line[4])] = base_dict
except OpenUrlError, e:
raise ServerFailure(str(e))
MsgUser.debug(a_dict)
return a_dict.freeze()
class InvalidVersion(Exception):
pass
def get_web_version_and_details(
server_url=Settings.mirror,
request_version=None):
if request_version is None:
details = latest_release(server_url)
try:
version = Version(details['version'])
except KeyError:
try:
redirect = details['redirect']
raise DownloadError(
"Installer not supported on this platform."
"Please visit %s for download instructions" % redirect)
except KeyError:
MsgUser.debug(
"Can't find version or redirect - %s" % details)
raise DownloadError(
"Unsupported OS"
)
else:
MsgUser.debug("Requested version %s" % request_version)
releases = get_releases(server_url)
try:
version = Version(request_version)
except ValueError:
raise DownloadError(
"%s doesn't look like a version" % request_version)
if request_version not in releases.keys():
raise DownloadError(
"%s isn't an available version" % request_version)
details = releases[request_version]
return (version, details)
def download_release(
server_url=Settings.mirror, to_temp=False,
request_version=None, skip_verify=False,
keep=False, source_code=False, feeds=False):
(version, details) = get_web_version_and_details(
server_url, request_version)
if request_version is None:
request_version = str(version)
if source_code or feeds:
if source_code:
extra_type = 'sources'
MsgUser.message("Downloading source code")
else:
extra_type = 'feeds'
MsgUser.message("Downloading FEEDS")
try:
releases = get_extra(server_url, extra_type)
except ExtraDownloadError, e:
raise DownloadError(
"Unable to find details for %s" % (extra_type)
)
to_temp = False
try:
details = releases[request_version]
except KeyError:
raise DownloadError(
"%s %s isn't available" % (request_version, extra_type)
)
MsgUser.debug(details)
if to_temp:
try:
(_, local_filename) = temp_file_name(close=True)
except Exception, e:
MsgUser.debug("Error getting temporary file name %s" % (str(e)))
raise DownloadError("Unable to begin download")
else:
local_filename = details['filename']
if os.path.exists(local_filename):
if os.path.isfile(local_filename):
MsgUser.message("%s exists" % (local_filename))
overwrite = Settings.inst_qus.ask_question('overwrite')
if overwrite:
MsgUser.warning(
"Erasing existing file %s" % local_filename)
try:
os.remove(local_filename)
except Exception:
raise DownloadError(
"Unabled to remove local file %s - remove"
" it and try again" % local_filename)
else:
raise DownloadError("Aborting download")
else:
raise DownloadError(
"There is a directory named %s "
"- cannot overwrite" % local_filename)
MsgUser.debug(
"Downloading to file %s "
"(this may take some time)." % (local_filename))
MsgUser.message(
"Downloading...")
downloaded = False
while downloaded is False:
try:
file_url = '/'.join(
(Settings.mirror.rstrip('/'), details['filename']))
download_file(
url=file_url,
localf=local_filename)
if (not skip_verify and
(details['checksum'] !=
file_checksum(local_filename, details['checksum_type']))):
raise DownloadError('Downloaded file fails checksum')
MsgUser.ok("File downloaded")
except DownloadFileError, e:
MsgUser.debug(str(e))
if Settings.mirror != Settings.main_mirror:
MsgUser.warning(
"Download from mirror failed, re-trying from "
"main FSL download site")
Settings.mirror = Settings.main_mirror
else:
raise DownloadError(str(e))
else:
downloaded = True
return (local_filename, version, details)
class DownloadError(Exception):
pass
def shell_config(shell, fsldir, skip_root=False):
MsgUser.debug("Building environment for %s" % (shell))
env_lines = ''
if shell in BOURNE_SHELLS:
if skip_root:
env_lines += '''if [ -x /usr/bin/id ]; then
if [ -z "$EUID" ]; then
# ksh and dash doesn't setup the EUID environment var
EUID=`id -u`
fi
fi
if [ "$EUID" != "0" ]; then
'''
env_lines += '''
# FSL Setup
FSLDIR=%s
PATH=${FSLDIR}/bin:${PATH}
export FSLDIR PATH
. ${FSLDIR}/etc/fslconf/fsl.sh
'''
if skip_root:
env_lines += '''fi'''
match = "FSLDIR="
replace = "FSLDIR=%s"
elif shell in C_SHELLS:
if skip_root:
env_lines += '''if ( $uid != 0 ) then
'''
env_lines += '''
# FSL Setup
setenv FSLDIR %s
setenv PATH ${FSLDIR}/bin:${PATH}
source ${FSLDIR}/etc/fslconf/fsl.csh
'''
if skip_root:
env_lines += '''
endif'''
match = "setenv FSLDIR"
replace = "setenv FSLDIR %s"
elif shell == 'matlab':
env_lines = '''
%% FSL Setup
setenv( 'FSLDIR', '%s' );
setenv('FSLOUTPUTTYPE', 'NIFTI_GZ');
fsldir = getenv('FSLDIR');
fsldirmpath = sprintf('%%s/etc/matlab',fsldir);
path(path, fsldirmpath);
clear fsldir fsldirmpath;
'''
match = "setenv( 'FSLDIR',"
replace = "setenv( 'FSLDIR', '%s' );"
else:
raise ValueError("Unknown shell type %s" % shell)
return (env_lines % (fsldir), match, replace % (fsldir))
def get_profile(shell):
home = os.path.expanduser("~")
dotprofile = os.path.join(home, '.profile')
if shell == 'bash':
profile = os.path.join(home, '.bash_profile')
if not os.path.isfile(profile) and os.path.isfile(dotprofile):
profile = dotprofile
elif shell == 'zsh':
profile = os.path.join(home, '.zprofile')
# ZSH will never source .profile
elif shell == 'sh':
profile = dotprofile
else:
cshprofile = os.path.join(home, '.cshrc')
if shell == 'csh':
profile = cshprofile
elif shell == 'tcsh':
profile = os.path.join(home, '.tcshrc')
if not os.path.isfile(profile) and os.path.isfile(cshprofile):
profile = cshprofile
else:
raise ValueError("Unsupported shell")
return profile
class FixFslDirError(Exception):
pass
def fix_fsldir(shell, fsldir):
(_, match, replace) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug(
"Editing %s, replacing line beginning:%s with %s." %
(profile, match, replace))
try:
edit_file(profile, line_starts_replace, match, replace, False)
except EditFileError, e:
raise FixFslDirError(str(e))
class AddFslDirError(Exception):
pass
def add_fsldir(shell, fsldir):
(env_lines, _, _) = shell_config(shell, fsldir)
profile = get_profile(shell)
MsgUser.debug("Adding %s to %s" % (env_lines, profile))
try:
add_to_file(profile, env_lines, False)
except AddToFileError, e:
raise AddFslDirError(str(e))
class ConfigureMatlabError(Exception):
pass
class ConfigureMatlabWarn(Exception):
pass
def configure_matlab(fsldir, m_startup='', c_file=True):
'''Setup your startup.m file to enable FSL MATLAB functions to work'''
(mlines, match, replace) = shell_config('matlab', fsldir)
if m_startup == '':
m_startup = os.path.join(
os.path.expanduser('~'), 'Documents', 'MATLAB', 'startup.m')
if os.path.exists(m_startup):
# Check if already configured
MsgUser.debug("Looking for %s in %s" % (match, m_startup))
if file_contains(m_startup, match):
try:
MsgUser.debug('Updating MATLAB startup file.')
edit_file(
m_startup, line_starts_replace,
match, replace, False)
except EditFileError, e:
raise ConfigureMatlabError(str(e))
else:
MsgUser.debug('Adding FSL settings to MATLAB.')
try:
add_to_file(m_startup, mlines, False)
except AddToFileError, e:
raise ConfigureMatlabError(str(e))
elif c_file:
# No startup.m file found. Create one
try:
MsgUser.debug('No MATLAB startup.m file found, creating one.')
if not os.path.isdir(os.path.dirname(m_startup)):
MsgUser.debug('No MATLAB startup.m file found, creating one.')
os.mkdir(os.path.dirname(m_startup))
create_file(m_startup, mlines, False)
except (OSError, CreateFileError), e:
MsgUser.debug(
'Unable to create ~/Documents/MATLAB/ folder or startup.m file,'
' cannot configure (%).' % (str(e)))
raise ConfigureMatlabError(
"Unable to create your ~/Documents/MATLAB/ folder or startup.m, "
"so cannot configure MATLAB for FSL.")
else:
MsgUser.debug('MATLAB may not be installed, doing nothing.')
raise ConfigureMatlabWarn("I can't tell if you have MATLAB installed.")
class SetupEnvironmentError(Exception):
pass
class SetupEnvironmentSkip(Exception):
pass
def setup_system_environment(fsldir):
'''Add a system-wide profile setting up FSL for all users.
Only supported on Redhat/Centos'''
profile_d = '/etc/profile.d'
profile_files = ['fsl.sh', 'fsl.csh']
exceptions = []
skips = []
if os.getuid() != 0:
sudo = True
else:
sudo = False
if os.path.isdir(profile_d):
for profile in profile_files:
pf = profile.split('.')[1]
(lines, match, replace) = shell_config(pf, fsldir)
this_profile = os.path.join(profile_d, profile)
if os.path.exists(this_profile):
# Already has a profile file
# Does it contain an exact match for current FSLDIR?
match = file_contains_1stline(this_profile, replace)
if match != '':
# If there is an fsl.(c)sh then just fix
# the entry for FSLDIR
MsgUser.debug(
"Fixing %s for FSLDIR location." % (this_profile))
try:
edit_file(
this_profile, line_starts_replace,
match, replace, sudo)
except EditFileError, e:
exceptions.append(str(e))
else:
# No need to do anything
MsgUser.debug(
"%s already configured - skipping." %
(this_profile))
skips.append(profile)
else:
# Create the file
try:
create_file(this_profile, lines, sudo)
except CreateFileError, e:
exceptions.append(str(e))
else:
raise SetupEnvironmentError(
"No system-wide configuration folder found - Skipped")
if exceptions:
raise SetupEnvironmentError(".".join(exceptions))
if skips:
raise SetupEnvironmentSkip(".".join(skips))
def setup_environment(fsldir=None, system=False, with_matlab=False):
'''Setup the user's environment so that their
terminal finds the FSL tools etc.'''
# Check for presence of profile file:
if fsldir is None:
fsldir = get_fsldir()
user_shell = which_shell()
MsgUser.debug("User's shell is %s" % (user_shell))
try:
(profile_lines, _, _) = shell_config(user_shell, fsldir)
profile = get_profile(user_shell)
except ValueError, e:
raise SetupEnvironmentError(str(e))
cfile = False
if not os.path.isfile(profile):
MsgUser.debug("User is missing a shell setup file.")
cfile = True
if cfile:
MsgUser.debug("Creating file %s" % (profile))
try:
create_file(profile, profile_lines, False)
except CreateFileError, e:
raise SetupEnvironmentError(
"Unable to create profile %s" % (profile))
else:
# Check if user already has FSLDIR set
MsgUser.message("Setting up FSL software...")
try:
if file_contains(profile, "FSLDIR"):
MsgUser.debug("Updating FSLDIR entry.")
fix_fsldir(user_shell, fsldir)
else:
MsgUser.debug("Adding FSLDIR entry.")
add_fsldir(user_shell, fsldir)
except (AddFslDirError, FixFslDirError), e:
raise SetupEnvironmentError(
"Unable to update your profile %s"
" with FSL settings" % (profile))
if with_matlab:
MsgUser.debug("Setting up MATLAB")
try:
configure_matlab(fsldir)
except ConfigureMatlabError, e:
MsgUser.debug(str(e))
raise SetupEnvironmentError(str(e))
except ConfigureMatlabWarn, e:
MsgUser.skipped(str(e))
class PostInstallError(Exception):
pass
class InstallArchiveError(Exception):
pass
class UnknownArchiveType(Exception):
pass
def archive_type(archive):
'''Determine file type based on extension and check
that file looks like this file type'''
archive_types = {
'gzip': ('tar', '-z'),
'bzip2': ('tar', '-j'),
'zip': ('zip', ''), }
try:
file_type = run_cmd("file %s" % (archive))
except RunCommandError, e:
raise UnknownArchiveType(str(e))
file_type = file_type.lower()
for f_type in ('gzip', 'bzip2', 'zip', ):
if f_type in file_type:
return archive_types[f_type]
raise UnknownArchiveType(archive)
def asl_gui_604_patch(fsldir, as_root=False):
'''
fsl 6.0.4 shipped with a broken fsleyes preview in asl_gui.
This function applies the simple patch to any new installation
that downloads FSL 6.0.4 using the fslinstaller.
1. parse fsl version
2. if version == 6.0.4 apply asl_gui patch, else do nothing and return
to test this patch with an existing fsl 6.0.4:
1. make a minimal $FSLDIR folder structure
- cd ~
- mkdir fsl_test
- cd fsl_test
- mkdir fsl
- cp -r $FSLDIR/etc fsl/
- cp -r $FSLDIR/python fsl/
- mkdir fsl/bin
2. tar it up
- tar -czf fsl-6.0.4-centos7_64.tar.gz fsl
- rm -r fsl # remove the fsl folder after tar-ing
3. run a test python install from the tar file
- be sure to use python 2.X (e.g. 2.7 works fine)
- python fslinstaller.py -f ~/fsl_test/fsl-6.0.4-centos7_64.tar.gz -d ~/fsl_test/fsl -p -M -D
'''
asl_file = os.path.join(fsldir, 'python', 'oxford_asl', 'gui', 'preview_fsleyes.py') #$FSLDIR/python/oxford_asl/gui/preview_fsleyes.py
vfile = os.path.join(fsldir, 'etc', 'fslversion')
vstring = ''
with open(vfile, 'r') as f:
vstring = f.readline()
v = vstring.split(':')[0] # e.g. 6.0.4:wkj2w3jh
if v == '6.0.4':
MsgUser.message("Patching asl_gui for fsl 6.0.4")
tfile = os.path.join(tempfile.mkdtemp(), "preview_fsleyes.py")
# backup asl_file
run_cmd_displayoutput('cp {} {}.bkup'.format(asl_file, asl_file), as_root=as_root)
# copy asl_file to tempfile
run_cmd_displayoutput('cp {} {}'.format(asl_file, tfile), as_root=as_root)
# ensure script can open temp file
run_cmd_displayoutput('chmod 775 {}'.format(tfile), as_root=as_root)
for line in fileinput.input(files=tfile, inplace=True):
line = re.sub('parent=parent, ready=ready', 'ready=ready, raiseErrors=True', line.rstrip())
print(line)
run_cmd_displayoutput('cp {} {}'.format(tfile, asl_file), as_root=as_root)
os.remove(tfile)
def post_install(
fsldir, settings, script="post_install.sh", quiet=False,
app_links=False, x11=False):
MsgUser.message("Performing post install tasks")
if is_writeable(fsldir):
as_root = False
elif is_writeable_as_root(fsldir):
as_root = True
else:
raise PostInstallError(
"Unable to write to target folder (%s)" % (fsldir))
install_installer(fsldir)
# apply asl_gui patch if fsl 6.0.4
asl_gui_604_patch(fsldir, as_root=as_root)
script_path = os.path.join(fsldir, Settings.post_inst_dir, script)
if x11:
try:
check_X11(settings.x11)
except CheckX11Warning, e:
MsgUser.warning(str(e))
else:
MsgUser.ok("X11 (required for GUIs) found")
if os.path.exists(script_path):
MsgUser.debug("Found post-install script %s" % (script_path))
if not os.access(script_path, os.X_OK):
raise PostInstallError(
"Unable to run post install script %s" % (script_path)
)
script_opts = '-f "%s"' % (fsldir)
if quiet:
script_opts += " -q"
command_line = " ".join((script_path, script_opts))
try:
run_cmd_displayoutput(command_line, as_root=as_root)
except RunCommandError, e:
raise PostInstallError(
"Error running post installation script (error %s)"
" - check the install log" % (str(e))
)
# Work around for mistake in 5.0.10 post setup script
mal = os.path.join(
fsldir, Settings.post_inst_dir,
'make_applications_links.sh')
if (os.path.exists(mal) and
not file_contains(script_path, "make_applications_links.sh")):
MsgUser.debug(
"Work around necessary for missing app link creation")
else:
app_links = False
if app_links:
try:
make_applications_links(fsldir, settings.applications)
except MakeApplicationLinksError, e:
for message in e.app_messages.values():
MsgUser.warning(message)
else:
MsgUser.ok("/Applications links created/updated")
MsgUser.ok("Post installation setup complete")
def install_archive(archive, fsldir=None):
def clean_up_temp():
try:
safe_delete(tempfolder, as_root)
except SafeDeleteError, sd_e:
MsgUser.debug(
"Unable to clean up temporary folder! "
"%s" % (str(sd_e)))
if not os.path.isfile(archive):
raise InstallError("%s isn't a file" % (archive))
if not fsldir:
try:
fsldir = get_fsldir(specified_dir=fsldir, install=True)
except GetFslDirError, e:
raise InstallError(str(e))
MsgUser.debug("Requested install of %s as %s" % (archive, fsldir))
if os.path.exists(fsldir):
# move old one out of way
MsgUser.debug("FSL version already installed")
keep_old = Settings.inst_qus.ask_question('del_old')
else:
keep_old = False
install_d = os.path.dirname(fsldir)
MsgUser.debug("Checking %s is writeable." % (install_d))
if is_writeable(install_d):
as_root = False
elif is_writeable_as_root(install_d):
as_root = True
else:
raise InstallArchiveError(
"Unable to write to target folder (%s), "
"even as a super user." % (install_d))
MsgUser.debug("Does %s require root for deletion? %s" % (
install_d, as_root))
try:
unarchive, ua_option = archive_type(archive)
except UnknownArchiveType, e:
raise InstallArchiveError(str(e))
# Generate a temporary name - eg fsl-<mypid>-date
tempname = '-'.join(('fsl', str(os.getpid()), str(time.time())))
tempfolder = os.path.join(install_d, tempname)
try:
run_cmd_dropstdout("mkdir %s" % (tempfolder), as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError(
"Unable to create folder to install into.")
MsgUser.debug(
"Unpacking %s into folder %s." % (archive, tempfolder))
try:
if unarchive == 'tar':
unpack_cmd = 'tar -C %s -x %s -o -f %s' % (
tempfolder, ua_option, archive)
elif unarchive == 'zip':
MsgUser.debug(
"Calling unzip %s %s" % (ua_option, archive)
)
unpack_cmd = 'unzip %s %s' % (ua_option, archive)
try:
run_cmd_dropstdout(unpack_cmd, as_root=as_root)
except RunCommandError, e:
raise InstallArchiveError("Unable to unpack FSL.")
new_fsl = os.path.join(tempfolder, 'fsl')
if os.path.exists(fsldir):
# move old one out of way
try:
old_version = get_installed_version(fsldir)
except (NotAFslVersion, GetInstalledVersionError), e:
if keep_old:
old_version = Version('0.0.0')
MsgUser.warning(
"The contents of %s doesn't look like an "
"FSL installation! - "
"moving to fsl-0.0.0" % (fsldir))
old_fsl = '-'.join((fsldir, str(old_version)))
if os.path.exists(old_fsl):
MsgUser.debug(
"Looks like there is another copy of the "
"old version of FSL - deleting...")
try:
safe_delete(old_fsl, as_root)
except SafeDeleteError, e:
raise InstallError(
";".join((
"Install location already has a "
"%s - I've tried to delete it but"
" failed" % (old_fsl), str(e))))
if keep_old:
try:
MsgUser.debug(
"Moving %s to %s" % (fsldir, old_fsl))
move(fsldir, old_fsl, as_root)
MsgUser.message(
'''You can find your archived version of FSL in %s.
If you wish to restore it, remove %s and rename %s to %s''' % (
old_fsl, fsldir, old_fsl, fsldir))
except MoveError, mv_e:
# failed to move the old version
MsgUser.debug(
"Failed to move old version "
"- %s" % (str(mv_e)))
raise InstallError(
"Failed to backup old version (%s)" % (str(mv_e)))
else:
MsgUser.debug("Removing existing FSL install")
try:
safe_delete(fsldir, as_root)
MsgUser.debug("Deleted %s." % (fsldir))
except SafeDeleteError, e:
raise InstallError(
"Failed to delete %s - %s." % (fsldir, str(e)))
else:
old_fsl = ''
try:
MsgUser.debug("Moving %s to %s" % (new_fsl, fsldir))
move(new_fsl, fsldir, as_root)
except MoveError, e:
# Unable to move new install into place
MsgUser.debug(
"Move failed - %s." % (str(e)))
raise InstallError(
'Failed to move new version into place.')
except InstallError, e:
clean_up_temp()
raise InstallArchiveError(str(e))
clean_up_temp()
MsgUser.debug("Install complete")
MsgUser.ok("FSL software installed.")
return fsldir
def check_for_updates(url, fsldir, requested_v=None):
# Start an update
MsgUser.message("Looking for new version.")
try:
this_version = get_installed_version(fsldir)
except GetInstalledVersionError, e:
# We can't find an installed version of FSL!
raise InstallError(str(e))
else:
MsgUser.debug("You have version %s" % (this_version))
if not requested_v:
version = Version(latest_release(url)['version'])
else:
try:
version = Version(requested_v)
except NotAFslVersion:
raise InstallError(
"%s doesn't look like a version" % requested_v)
if version > this_version:
# Update Available
if version.major > this_version.major:
# We don't support patching between major
# versions so download a fresh copy
return (UPGRADE, version)
else:
return (UPDATE, version)
else:
return (CURRENT, None)
class MakeApplicationLinksError(Exception):
def __init__(self, *args):
super(MakeApplicationLinksError, self).__init__(*args)
try:
self.app_messages = args[0]
except IndexError:
self.app_messages = []
def make_applications_links(fsldir, apps):
'''Create symlinks in /Applications'''
MsgUser.message("Creating Application links...")
results = {}
for app in apps:
app_location = os.path.join('/Applications', os.path.basename(app))
app_target = os.path.join(fsldir, app)
create_link = True
MsgUser.debug("Looking for existing link %s" % (app_location))
if os.path.lexists(app_location):
MsgUser.debug(
"Is a link: %s; realpath: %s" % (
os.path.islink(app_location),
os.path.realpath(app_location)))
if os.path.islink(app_location):
MsgUser.debug("A link already exists.")
if os.path.realpath(app_location) != app_target:
MsgUser.debug(
"Deleting old (incorrect) link %s" % (app_location))
try:
run_cmd_dropstdout("rm " + app_location, as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to remove broken"
" link to %s (%s)." % (app_target, str(e)))
results[app] = 'Unable to remove broken link to %s' % (
app_target)
create_link = False
else:
MsgUser.debug("Link is correct, skipping.")
create_link = False
else:
MsgUser.debug(
"%s doesn't look like a symlink, "
"so let's not delete it." % (app_location))
results[app] = (
"%s is not a link so hasn't been updated to point at the "
"new FSL install.") % (app_location)
create_link = False
if create_link:
MsgUser.debug('Create a link for %s' % (app))
if os.path.exists(app_target):
try:
run_cmd_dropstdout(
"ln -s %s %s" % (app_target, app_location),
as_root=True)
except RunCommandError, e:
MsgUser.debug(
"Unable to create link to %s (%s)." % (
app_target, str(e)))
results[app] = (
'Unable to create link to %s.') % (app_target)
else:
MsgUser.debug(
'Unable to find application'
' %s to link to.') % (app_target)
if results:
raise MakeApplicationLinksError(results)
class CheckX11Warning(Exception):
pass
def check_X11(x11):
'''Function to find X11 install on Mac OS X and confirm it is compatible.
Advise user to download Xquartz if necessary'''
MsgUser.message(
"Checking for X11 windowing system (required for FSL GUIs).")
xbin = ''
for x in x11['apps']:
if os.path.exists(os.path.join(x11['location'], x)):
xbin = x
if xbin != '':
# Find out what version is installed
x_v_cmd = [
'/usr/bin/mdls', '-name',
'kMDItemVersion', os.path.join(x11['location'], xbin)]
try:
cmd = Popen(x_v_cmd, stdout=PIPE, stderr=STDOUT)
(vstring, _) = cmd.communicate()
except Exception, e:
raise CheckX11Warning(
"Unable to check X11 version (%s)" % (str(e)))
if cmd.returncode:
MsgUser.debug("Error finding the version of X11 (%s)" % (vstring))
# App found, but can't tell version, warn the user
raise CheckX11Warning(
"X11 (required for FSL GUIs) is installed but I"
" can't tell what the version is.")
else:
# Returns:
# kMDItemVersion = "2.3.6"\n
(_, _, version) = vstring.strip().split()
if version.startswith('"'):
version = version[1:-1]
if version in x11['bad_versions']:
raise CheckX11Warning(
"X11 (required for FSL GUIs) is a version that"
" is known to cause problems. We suggest you"
" upgrade to the latest XQuartz release from "
"%s" % (x11['download_url']))
else:
MsgUser.debug(
"X11 found and is not a bad version"
" (%s: %s)." % (xbin, version))
else:
# No X11 found, warn the user
raise CheckX11Warning(
"The FSL GUIs require the X11 window system which I can't"
" find in the usual places. You can download a copy from %s"
" - you will need to install this before the GUIs will"
" function" % (x11['download_url']))
def do_install(options, settings):
MsgUser.message(
shell_colours.bold + settings.title + shell_colours.default)
if options.test_installer:
settings.main_mirror = options.test_installer
this_computer = Host
if not this_computer.supported:
MsgUser.debug("Unsupported host %s %s %s" % (
this_computer.o_s,
this_computer.arch,
this_computer.os_type))
raise InstallError(
"Unsupported host - you could try building from source")
if this_computer.o_s == "linux":
system_environment = True
with_matlab = False
application_links = False
x11 = False
elif this_computer.o_s == "darwin":
system_environment = False
with_matlab = True
application_links = True
x11 = True
else:
MsgUser.debug("Unrecognised OS %s" % (this_computer.o_s))
raise InstallError("Unrecognised OS")
my_uid = os.getuid()
def configure_environment(fsldir, env_all=False, skip=False, matlab=False):
if skip:
return
if env_all:
if system_environment:
# Setup the system-wise environment
try:
setup_system_environment(fsldir)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(
"Failed to configure system-wide profiles "
"with FSL settings: %s" % (str(e)))
except SetupEnvironmentSkip, e:
MsgUser.skipped(
"Some shells already configured: %s" % (str(e)))
else:
MsgUser.debug("System-wide profiles setup.")
MsgUser.ok("System-wide FSL configuration complete.")
else:
MsgUser.skipped(
"System-wide profiles not supported on this OS")
elif my_uid != 0:
# Setup the environment for the current user
try:
setup_environment(fsldir, with_matlab=matlab)
except SetupEnvironmentError, e:
MsgUser.debug(str(e))
MsgUser.failed(str(e))
else:
MsgUser.ok(
"User profile updated with FSL settings, you will need "
"to log out and back in to use the FSL tools.")
if my_uid != 0:
if options.quiet:
settings.inst_qus.defaults = True
print '''
We may need administrator rights, but you have specified fully automated
mode - you may still be asked for an admin password if required.'''
print '''
To install fully automatedly, either ensure this is running as the root
user (use sudo) or that you can write to the folder you wish to install
FSL in.'''
elif (not options.download and
not options.list_versions and
not options.list_builds and
not options.get_source and
not options.get_feeds):
MsgUser.warning(
'''Some operations of the installer require administative rights,
for example installing into the default folder of /usr/local.
If your account is an 'Administrator' (you have 'sudo' rights)
then you will be prompted for your administrator password
when necessary.''')
if not options.d_dir and options.quiet:
raise InstallError(
"Quiet mode requires you to specify the install location"
" (e.g. /usr/local)")
if not options.quiet and not (options.list_versions or options.list_builds):
MsgUser.message(
"When asked a question, the default answer is given in square "
"brackets.\nHit the Enter key to accept this default answer.")
if options.env_only and my_uid != 0:
configure_environment(
get_fsldir(specified_dir=options.d_dir),
options.env_all)
return
if options.archive:
if not options.skipchecksum:
if not options.checksum:
raise InstallError(
"No checksum provided and checking not disabled")
else:
checksummer = globals()[options.checksum_type + 'File']
if options.checksum != checksummer(options.archive):
raise InstallError("FSL archive doesn't match checksum")
else:
MsgUser.ok("FSL Package looks good")
arc_version = archive_version(options.archive)
MsgUser.message(
"Installing FSL software version %s..." % (arc_version))
fsldir = install_archive(
archive=options.archive, fsldir=options.d_dir)
try:
post_install(fsldir=fsldir, settings=settings, quiet=options.quiet)
except PostInstallError, e:
raise InstallError(str(e))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
return
# All the following options require the Internet...
try:
settings.mirror = fastest_mirror(
settings.mirrors, settings.mirrors_file)
except SiteNotResponding, e:
# We can't find the FSL site - possibly the internet is down
raise InstallError(e)
try:
self_update(settings.mirror)
except SelfUpdateError, e:
MsgUser.debug("Self update error: %s" % (str(e)))
MsgUser.warning("Error checking for updates to installer - continuing")
if options.list_versions:
# Download a list of available downloads from the webserver
list_releases(settings.mirror)
return
if options.list_builds:
# List all available builds
list_builds(settings.mirror)
return
if options.download:
MsgUser.debug("Attempting to download latest release")
try:
download_release(request_version=options.requestversion,
skip_verify=options.skipchecksum)
except DownloadFileError, e:
raise("Unable to download release %s" % (str(e)))
return
if options.update:
fsldir = get_fsldir()
status, new_v = check_for_updates(settings.mirror, fsldir=fsldir)
if status == UPDATE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('update'):
return
elif status == UPGRADE:
MsgUser.ok("Version %s available." % new_v)
if not settings.inst_qus.ask_question('upgrade'):
return
else:
MsgUser.ok("FSL is up-to-date.")
return
if options.get_source:
MsgUser.debug("Attempting to download source")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
source_code=True)
except DownloadFileError, e:
raise("Unable to download source code %s" % (str(e)))
return
if options.get_feeds:
MsgUser.debug("Attempting to download FEEDS")
try:
download_release(
request_version=options.requestversion,
skip_verify=options.skipchecksum,
feeds=True)
except DownloadFileError, e:
raise("Unable to download FEEDS %s" % (str(e)))
return
try:
(version, details) = get_web_version_and_details(
request_version=options.requestversion
)
if 'redirect' in details:
MsgUser.message("Please download FSL using the instructions here:")
MsgUser.message("%s" % (details['redirect']))
return
fsldir = get_fsldir(specified_dir=options.d_dir, install=True)
reinstall = True
if os.path.exists(fsldir):
inst_version = get_installed_version(fsldir)
if inst_version == version:
reinstall = Settings.inst_qus.ask_question('version_match')
if reinstall:
(fname, version, details) = download_release(
to_temp=True,
request_version=options.requestversion,
skip_verify=options.skipchecksum)
if not details['supported']:
MsgUser.debug(
"This OS is not officially supported -"
" you may experience issues"
)
MsgUser.debug(
"Installing %s from %s (details: %s)" % (
fname, version, details))
MsgUser.message(
"Installing FSL software version %s..." % (version))
install_archive(
archive=fname, fsldir=fsldir)
try:
safe_delete(fname)
except SafeDeleteError, e:
MsgUser.debug(
"Unable to delete downloaded package %s ; %s" % (
fname, str(e)))
if details['notes']:
MsgUser.message(details['notes'])
try:
post_install(
fsldir=fsldir, settings=settings,
quiet=options.quiet, x11=x11,
app_links=application_links)
except PostInstallError, e:
raise InstallError(str(e))
except DownloadError, e:
MsgUser.debug("Unable to download FSL %s" % (str(e)))
raise InstallError("Unable to download FSL")
except InstallArchiveError, e:
MsgUser.debug("Unable to unpack FSL ; %s" % (str(e)))
raise InstallError("Unable to unpack FSL - %s" % (str(e)))
configure_environment(
fsldir=fsldir, env_all=options.env_all,
skip=options.skip_env, matlab=with_matlab)
if details['notes']:
MsgUser.message(details['notes'])
def parse_options(args):
usage = "usage: %prog [options]"
ver = "%%prog %s" % (version)
parser = OptionParser(usage=usage, version=ver)
parser.add_option("-d", "--dest", dest="d_dir",
help="Install into folder given by DESTDIR - "
"e.g. /usr/local/fsl",
metavar="DESTDIR", action="store",
type="string")
parser.add_option("-e", dest="env_only",
help="Only setup/update your environment",
action="store_true")
parser.add_option("-E", dest="env_all",
help="Setup/update the environment for ALL users",
action="store_true")
parser.add_option("-v", help="Print version number and exit",
action="version")
parser.add_option("-c", "--checkupdate", dest='update',
help="Check for FSL updates -"
" needs an internet connection",
action="store_true")
parser.add_option("-o", "--downloadonly", dest="download",
help=SUPPRESS_HELP,
action="store_true")
advanced_group = OptionGroup(
parser, "Advanced Install Options",
"These are advanced install options")
advanced_group.add_option(
"-l", "--listversions", dest="list_versions",
help="List available versions of FSL",
action="store_true")
advanced_group.add_option(
"-b", "--listbuilds", dest="list_builds",
help="List available FSL builds",
action="store_true")
advanced_group.add_option(
"-B", "--fslbuild", dest="requestbuild",
help="Download the specific FSLBUILD of FSL",
metavar="FSLBUILD", action="store",
type="string")
advanced_group.add_option(
"-V", "--fslversion", dest="requestversion",
help="Download the specific version FSLVERSION of FSL",
metavar="FSLVERSION", action="store",
type="string")
advanced_group.add_option(
"-s", "--source", dest="get_source",
help="Download source code for FSL",
action="store_true")
advanced_group.add_option(
"-F", "--feeds", dest="get_feeds",
help="Download FEEDS",
action="store_true")
advanced_group.add_option(
"-q", "--quiet", dest='quiet',
help="Silence all messages - useful if scripting install",
action="store_true")
advanced_group.add_option(
"-p", dest="skip_env",
help="Don't setup the environment",
action="store_true")
parser.add_option_group(advanced_group)
debug_group = OptionGroup(
parser, "Debugging Options",
"These are for use if you have a problem running this installer.")
debug_group.add_option(
"-f", "--file", dest="archive",
help="Install a pre-downloaded copy of the FSL archive",
metavar="ARCHIVEFILE", action="store",
type="string")
debug_group.add_option(
"-C", "--checksum", dest="checksum",
help="Supply the expected checksum for the pre-downloaded FSL archive",
metavar="CHECKSUM", action="store",
type="string")
debug_group.add_option(
"-T", "--checksum-type", dest="checksum_type",
default="sha256",
help="Specify the type of checksum",
action="store",
type="string")
debug_group.add_option(
"-M", "--nochecksum", dest="skipchecksum",
help="Don't check the pre-downloaded FSL archive",
action="store_true")
debug_group.add_option(
"-D", dest="verbose",
help="Switch on debug messages",
action="store_true")
debug_group.add_option(
"-G", dest="test_installer",
help=SUPPRESS_HELP,
action="store",
type="string")
debug_group.add_option(
"-w", dest="test_csv",
help=SUPPRESS_HELP,
action="store_true"
)
parser.add_option_group(debug_group)
return parser.parse_args(args)
def override_host(requestbuild):
'''Overrides attributes of the Host class in the event that the user
has requested a specific FSL build.
'''
if requestbuild == 'centos7_64':
Host.o_s = 'linux'
Host.arch = 'x86_64'
Host.vendor = 'centos'
Host.version = Version('7.8.2003')
Host.glibc = '2.2.5'
Host.supported = True
Host.bits = '64'
elif requestbuild == 'centos6_64':
Host.o_s = 'linux'
Host.arch = 'x86_64'
Host.vendor = 'centos'
Host.version = Version('6.10')
Host.glibc = '2.2.5'
Host.supported = True
Host.bits = '64'
elif requestbuild == 'macOS_64':
Host.o_s = 'darwin'
Host.arch = 'x86_64'
Host.vendor = 'apple'
Host.version = Version('19.6.0')
Host.glibc = ''
Host.supported = True
Host.bits = '64'
# Download x86 version if running on Apple
# M1, as it runs just fine under emulation
elif (requestbuild is None and
Host.o_s == 'darwin' and
Host.arch == 'arm64'):
Host.arch = 'x86_64'
if __name__ == '__main__':
(options, args) = parse_options(sys.argv[1:])
if options.verbose:
MsgUser.debugOn()
print options
if options.quiet:
MsgUser.quietOn()
if options.test_csv:
HAS_JSON = False
override_host(options.requestbuild)
installer_settings = Settings()
try:
do_install(options, installer_settings)
except BadVersion, e:
MsgUser.debug(str(e))
MsgUser.failed("Unable to find requested version!")
sys.exit(1)
except (InstallError, GetFslDirError, GetInstalledVersionError), e:
MsgUser.failed(str(e))
sys.exit(1)
except UnsupportedOs, e:
MsgUser.failed(str(e))
sys.exit(1)
except KeyboardInterrupt, e:
MsgUser.message('')
MsgUser.failed("Install aborted.")
sys.exit(1)
|
haanme/FinnBrain
|
dockerfiles/fsl_anaconda3/fslinstaller.py
|
Python
|
mit
| 102,918
|
[
"VisIt"
] |
95daa0b2d269829db98e365b5357b83f5eccd6946321c69f4d69962503c3e24f
|
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2015 Gerome Fournier <jef(at)foutaise.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""module for creating simple ASCII tables
Example:
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print table.draw() + "\\n"
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["Texttable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'LGPL'
__version__ = '0.8.8'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
"""
import sys
import string
import unicodedata
try:
if sys.version >= '2.3':
import textwrap
elif sys.version >= '2.2':
from optparse import textwrap
else:
from optik import textwrap
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
if sys.version >= '2.7':
from functools import reduce
if sys.version >= '3.0':
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters
"""
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
unicode_data = obj2unicode(iterable)
if hasattr(unicodedata, 'east_asian_width'):
w = unicodedata.east_asian_width
return sum([w(c) in 'WF' and 2 or 1 for c in unicode_data])
else:
return unicode_data.__len__()
else:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size
"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class Texttable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
def __init__(self, max_width=80):
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
if max_width <= 0:
max_width = False
self._max_width = max_width
self._precision = 3
self._deco = Texttable.VLINES | Texttable.HLINES | Texttable.BORDER | \
Texttable.HEADER
self.set_chars(['-', '|', '+', '='])
self.reset()
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinaison of:
Texttable.BORDER: Border around the table
Texttable.HEADER: Horizontal line below the header
Texttable.HLINES: Horizontal lines between rows
Texttable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
Texttable.BORDER | Texttable.HEADER
"""
self._deco = deco
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either "a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1]
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
try:
f = float(x)
except:
return obj2unicode(x)
n = self._precision
dtype = self._dtype[i]
if dtype == 'i':
return str(int(round(f)))
elif dtype == 'f':
return '%.*f' % (n, f)
elif dtype == 'e':
return '%.*e' % (n, f)
elif dtype == 't':
return obj2unicode(x)
else:
if f - round(f) == 0:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return str(int(round(f)))
else:
if abs(f) > 1e8:
return '%.*e' % (n, f)
else:
return '%.*f' % (n, f)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements" \
% self._row_size)
def _has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & Texttable.VLINES > 0
def _has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & Texttable.HLINES > 0
def _has_border(self):
"""Return a boolean, if border is required or not
"""
return self._deco & Texttable.BORDER > 0
def _has_header(self):
"""Return a boolean, if header line is required or not
"""
return self._deco & Texttable.HEADER > 0
def _hline_header(self):
"""Print header's horizontal line
"""
return self._build_hline(True)
def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string
def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
items = len(maxi)
length = sum(maxi)
if self._max_width and length + items * 3 + 1 > self._max_width:
maxi = [
int(round(self._max_width / (length + items * 3 + 1) * n))
for n in maxi
]
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
for i in range(len(line[0])):
if self._has_border():
out += "%s " % self._char_vert
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = "c"
if align == "r":
out += "%s " % (fill * space + cell_line)
elif align == "c":
out += "%s " % (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += "%s " % (cell_line + fill * space)
if length < len(line):
out += "%s " % [space, self._char_vert][self._has_vlines()]
out += "%s\n" % ['', self._char_vert][self._has_border()]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrap.wrap(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
if __name__ == '__main__':
table = Texttable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw() + "\n")
table = Texttable()
table.set_deco(Texttable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
|
amisrs/one-eighty
|
angular_flask/lib/python2.7/site-packages/texttable.py
|
Python
|
mit
| 19,664
|
[
"Brian"
] |
0aefac99829d767cb17bc984f0abeb131239dbda012f910a492d8976b3313dd0
|
"""
Title: Vector-Quantized Variational Autoencoders
Author: [Sayak Paul](https://twitter.com/RisingSayak)
Date created: 2021/07/21
Last modified: 2021/07/21
Description: Training a VQ-VAE for image reconstruction and codebook sampling for generation.
"""
"""
In this example, we will develop a Vector Quantized Variational Autoencoder (VQ-VAE).
VQ-VAE was proposed in
[Neural Discrete Representation Learning](https://arxiv.org/abs/1711.00937)
by van der Oord et al. In traditional VAEs, the latent space is continuous and is sampled
from a Gaussian distribution. It is generally harder to learn such a continuous
distribution via gradient descent. VQ-VAEs, on the other hand,
operate on a discrete latent space, making the optimization problem simpler. It does so
by maintaining a discrete *codebook*. The codebook is developed by
discretizing the distance between continuous embeddings and the encoded
outputs. These discrete code words are then fed to the decoder, which is trained
to generate reconstructed samples.
For a detailed overview of VQ-VAEs, please refer to the original paper and
[this video explanation](https://www.youtube.com/watch?v=VZFVUrYcig0).
If you need a refresher on VAEs, you can refer to
[this book chapter](https://livebook.manning.com/book/deep-learning-with-python-second-edition/chapter-12/).
VQ-VAEs are one of the main recipes behind [DALL-E](https://openai.com/blog/dall-e/)
and the idea of a codebook is used in [VQ-GANs](https://arxiv.org/abs/2012.09841).
This example uses references from the
[official VQ-VAE tutorial](https://github.com/deepmind/sonnet/blob/master/sonnet/examples/vqvae_example.ipynb)
from DeepMind. To run this example, you will need TensorFlow 2.5 or higher, as well as
TensorFlow Probability, which can be installed using the command below.
"""
"""shell
pip install -q tensorflow-probability
"""
"""
## Imports
"""
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_probability as tfp
import tensorflow as tf
"""
## `VectorQuantizer` layer
Here, we will implement a custom layer to encapsulate the vector
quantizer logic, which is the central component of VQ-VAEs.
Consider an output from the encoder, with shape `(batch_size, height, width, num_channels)`.
The vector quantizer will first
flatten this output, only keeping the `num_channels` dimension intact. So, the shape would
become `(batch_size * height * width, num_channels)`. The rationale behind this is to
treat the total number of channels as the space for the latent embeddings.
An embedding table is then initialized to learn a codebook. We measure the L2-normalized
distance between the flattened encoder outputs and code words of this codebook. We take the
code that yields the minimum distance, and we apply one-hot encoding to achieve quantization.
This way, the code yielding the minimum distance to the corresponding encoder output is
mapped as one and the remaining codes are mapped as zeros.
Since the quantization process is not differentiable, we apply a
[straight-through estimator](https://www.hassanaskary.com/python/pytorch/deep%20learning/2020/09/19/intuitive-explanation-of-straight-through-estimators.html)
in between the decoder and the encoder, so that the decoder gradients are directly propagated
to the encoder. As the encoder and decoder share the same channel space, the hope is that the
decoder gradients will still be meaningful to the encoder.
"""
class VectorQuantizer(layers.Layer):
def __init__(self, num_embeddings, embedding_dim, beta=0.25, **kwargs):
super().__init__(**kwargs)
self.embedding_dim = embedding_dim
self.num_embeddings = num_embeddings
self.beta = (
beta # This parameter is best kept between [0.25, 2] as per the paper.
)
# Initialize the embeddings which we will quantize.
w_init = tf.random_uniform_initializer()
self.embeddings = tf.Variable(
initial_value=w_init(
shape=(self.embedding_dim, self.num_embeddings), dtype="float32"
),
trainable=True,
name="embeddings_vqvae",
)
def call(self, x):
# Calculate the input shape of the inputs and
# then flatten the inputs keeping `embedding_dim` intact.
input_shape = tf.shape(x)
flattened = tf.reshape(x, [-1, self.embedding_dim])
# Quantization.
encoding_indices = self.get_code_indices(flattened)
encodings = tf.one_hot(encoding_indices, self.num_embeddings)
quantized = tf.matmul(encodings, self.embeddings, transpose_b=True)
quantized = tf.reshape(quantized, input_shape)
# Calculate vector quantization loss and add that to the layer. You can learn more
# about adding losses to different layers here:
# https://keras.io/guides/making_new_layers_and_models_via_subclassing/. Check
# the original paper to get a handle on the formulation of the loss function.
commitment_loss = self.beta * tf.reduce_mean(
(tf.stop_gradient(quantized) - x) ** 2
)
codebook_loss = tf.reduce_mean((quantized - tf.stop_gradient(x)) ** 2)
self.add_loss(commitment_loss + codebook_loss)
# Straight-through estimator.
quantized = x + tf.stop_gradient(quantized - x)
return quantized
def get_code_indices(self, flattened_inputs):
# Calculate L2-normalized distance between the inputs and the codes.
similarity = tf.matmul(flattened_inputs, self.embeddings)
distances = (
tf.reduce_sum(flattened_inputs ** 2, axis=1, keepdims=True)
+ tf.reduce_sum(self.embeddings ** 2, axis=0)
- 2 * similarity
)
# Derive the indices for minimum distances.
encoding_indices = tf.argmin(distances, axis=1)
return encoding_indices
"""
**A note on straight-through estimation**:
This line of code does the straight-through estimation part: `quantized = x +
tf.stop_gradient(quantized - x)`. During backpropagation, `(quantized - x)` won't be
included in the computation graph and th gradients obtaind for `quantized`
will be copied for `inputs`. Thanks to [this video](https://youtu.be/VZFVUrYcig0?t=1393)
for helping me understand this technique.
"""
"""
## Encoder and decoder
We will now implement the encoder and the decoder for the VQ-VAE. We will keep them small so
that their capacity is a good fit for the MNIST dataset, which we will use to demonstrate
the results. The definitions of the encoder and decoder come from
[this example](https://keras.io/examples/generative/vae).
"""
def get_encoder(latent_dim=16):
encoder_inputs = keras.Input(shape=(28, 28, 1))
x = layers.Conv2D(32, 3, activation="relu", strides=2, padding="same")(
encoder_inputs
)
x = layers.Conv2D(64, 3, activation="relu", strides=2, padding="same")(x)
encoder_outputs = layers.Conv2D(latent_dim, 1, padding="same")(x)
return keras.Model(encoder_inputs, encoder_outputs, name="encoder")
def get_decoder(latent_dim=16):
latent_inputs = keras.Input(shape=get_encoder().output.shape[1:])
x = layers.Conv2DTranspose(64, 3, activation="relu", strides=2, padding="same")(
latent_inputs
)
x = layers.Conv2DTranspose(32, 3, activation="relu", strides=2, padding="same")(x)
decoder_outputs = layers.Conv2DTranspose(1, 3, padding="same")(x)
return keras.Model(latent_inputs, decoder_outputs, name="decoder")
"""
## Standalone VQ-VAE model
"""
def get_vqvae(latent_dim=16, num_embeddings=64):
vq_layer = VectorQuantizer(num_embeddings, latent_dim, name="vector_quantizer")
encoder = get_encoder(latent_dim)
decoder = get_decoder(latent_dim)
inputs = keras.Input(shape=(28, 28, 1))
encoder_outputs = encoder(inputs)
quantized_latents = vq_layer(encoder_outputs)
reconstructions = decoder(quantized_latents)
return keras.Model(inputs, reconstructions, name="vq_vae")
get_vqvae().summary()
"""
Note that the output channels of the encoder should match the `latent_dim` for the vector
quantizer.
"""
"""
## Wrapping up the training loop inside `VQVAETrainer`
"""
class VQVAETrainer(keras.models.Model):
def __init__(self, train_variance, latent_dim=32, num_embeddings=128, **kwargs):
super(VQVAETrainer, self).__init__(**kwargs)
self.train_variance = train_variance
self.latent_dim = latent_dim
self.num_embeddings = num_embeddings
self.vqvae = get_vqvae(self.latent_dim, self.num_embeddings)
self.total_loss_tracker = keras.metrics.Mean(name="total_loss")
self.reconstruction_loss_tracker = keras.metrics.Mean(
name="reconstruction_loss"
)
self.vq_loss_tracker = keras.metrics.Mean(name="vq_loss")
@property
def metrics(self):
return [
self.total_loss_tracker,
self.reconstruction_loss_tracker,
self.vq_loss_tracker,
]
def train_step(self, x):
with tf.GradientTape() as tape:
# Outputs from the VQ-VAE.
reconstructions = self.vqvae(x)
# Calculate the losses.
reconstruction_loss = (
tf.reduce_mean((x - reconstructions) ** 2) / self.train_variance
)
total_loss = reconstruction_loss + sum(self.vqvae.losses)
# Backpropagation.
grads = tape.gradient(total_loss, self.vqvae.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.vqvae.trainable_variables))
# Loss tracking.
self.total_loss_tracker.update_state(total_loss)
self.reconstruction_loss_tracker.update_state(reconstruction_loss)
self.vq_loss_tracker.update_state(sum(self.vqvae.losses))
# Log results.
return {
"loss": self.total_loss_tracker.result(),
"reconstruction_loss": self.reconstruction_loss_tracker.result(),
"vqvae_loss": self.vq_loss_tracker.result(),
}
"""
## Load and preprocess the MNIST dataset
"""
(x_train, _), (x_test, _) = keras.datasets.mnist.load_data()
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
x_train_scaled = (x_train / 255.0) - 0.5
x_test_scaled = (x_test / 255.0) - 0.5
data_variance = np.var(x_train / 255.0)
"""
## Train the VQ-VAE model
"""
vqvae_trainer = VQVAETrainer(data_variance, latent_dim=16, num_embeddings=128)
vqvae_trainer.compile(optimizer=keras.optimizers.Adam())
vqvae_trainer.fit(x_train_scaled, epochs=30, batch_size=128)
"""
## Reconstruction results on the test set
"""
def show_subplot(original, reconstructed):
plt.subplot(1, 2, 1)
plt.imshow(original.squeeze() + 0.5)
plt.title("Original")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(reconstructed.squeeze() + 0.5)
plt.title("Reconstructed")
plt.axis("off")
plt.show()
trained_vqvae_model = vqvae_trainer.vqvae
idx = np.random.choice(len(x_test_scaled), 10)
test_images = x_test_scaled[idx]
reconstructions_test = trained_vqvae_model.predict(test_images)
for test_image, reconstructed_image in zip(test_images, reconstructions_test):
show_subplot(test_image, reconstructed_image)
"""
These results look decent. You are encouraged to play with different hyperparameters
(especially the number of embeddings and the dimensions of the embeddings) and observe how
they affect the results.
"""
"""
## Visualizing the discrete codes
"""
encoder = vqvae_trainer.vqvae.get_layer("encoder")
quantizer = vqvae_trainer.vqvae.get_layer("vector_quantizer")
encoded_outputs = encoder.predict(test_images)
flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1])
codebook_indices = quantizer.get_code_indices(flat_enc_outputs)
codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1])
for i in range(len(test_images)):
plt.subplot(1, 2, 1)
plt.imshow(test_images[i].squeeze() + 0.5)
plt.title("Original")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(codebook_indices[i])
plt.title("Code")
plt.axis("off")
plt.show()
"""
The figure above shows that the discrete codes have been able to capture some
regularities from the dataset. Now, you might wonder, ***how do we use these codes to
generate new samples?*** Specifically, how do we sample from this codebook to create
novel examples? Since these codes are discrete and we imposed a categorical distribution
on them, we cannot use them yet to generate anything meaningful. These codes were not
updated during the training process as well. So, they need to be adjusted further so that
we can use for them the subsequent image generation task. The authors use a PixelCNN to
train these codes so that they can be used as powerful priors to generate novel examples.
PixelCNN was proposed in
[Conditional Image Generation with PixelCNN Decoders](https://arxiv.org/abs/1606.05328)
by van der Oord et al. We will borrow code from
[this example](https://keras.io/examples/generative/pixelcnn/)
to develop a PixelCNN. It's an auto-regressive generative model where the current outputs
are conditioned on the prior ones. In other words, a PixelCNN generates an image on a
pixel-by-pixel basis.
"""
"""
## PixelCNN hyperparameters
"""
num_residual_blocks = 2
num_pixelcnn_layers = 2
pixelcnn_input_shape = encoded_outputs.shape[1:-1]
print(f"Input shape of the PixelCNN: {pixelcnn_input_shape}")
"""
Don't worry about the input shape. It'll become clear in the following sections.
"""
"""
## PixelCNN model
Majority of this comes from
[this example](https://keras.io/examples/generative/pixelcnn/).
"""
# The first layer is the PixelCNN layer. This layer simply
# builds on the 2D convolutional layer, but includes masking.
class PixelConvLayer(layers.Layer):
def __init__(self, mask_type, **kwargs):
super(PixelConvLayer, self).__init__()
self.mask_type = mask_type
self.conv = layers.Conv2D(**kwargs)
def build(self, input_shape):
# Build the conv2d layer to initialize kernel variables
self.conv.build(input_shape)
# Use the initialized kernel to create the mask
kernel_shape = self.conv.kernel.get_shape()
self.mask = np.zeros(shape=kernel_shape)
self.mask[: kernel_shape[0] // 2, ...] = 1.0
self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0
if self.mask_type == "B":
self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0
def call(self, inputs):
self.conv.kernel.assign(self.conv.kernel * self.mask)
return self.conv(inputs)
# Next, we build our residual block layer.
# This is just a normal residual block, but based on the PixelConvLayer.
class ResidualBlock(keras.layers.Layer):
def __init__(self, filters, **kwargs):
super(ResidualBlock, self).__init__(**kwargs)
self.conv1 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
self.pixel_conv = PixelConvLayer(
mask_type="B",
filters=filters // 2,
kernel_size=3,
activation="relu",
padding="same",
)
self.conv2 = keras.layers.Conv2D(
filters=filters, kernel_size=1, activation="relu"
)
def call(self, inputs):
x = self.conv1(inputs)
x = self.pixel_conv(x)
x = self.conv2(x)
return keras.layers.add([inputs, x])
pixelcnn_inputs = keras.Input(shape=pixelcnn_input_shape, dtype=tf.int32)
ohe = tf.one_hot(pixelcnn_inputs, vqvae_trainer.num_embeddings)
x = PixelConvLayer(
mask_type="A", filters=128, kernel_size=7, activation="relu", padding="same"
)(ohe)
for _ in range(num_residual_blocks):
x = ResidualBlock(filters=128)(x)
for _ in range(num_pixelcnn_layers):
x = PixelConvLayer(
mask_type="B",
filters=128,
kernel_size=1,
strides=1,
activation="relu",
padding="valid",
)(x)
out = keras.layers.Conv2D(
filters=vqvae_trainer.num_embeddings, kernel_size=1, strides=1, padding="valid"
)(x)
pixel_cnn = keras.Model(pixelcnn_inputs, out, name="pixel_cnn")
pixel_cnn.summary()
"""
## Prepare data to train the PixelCNN
We will train the PixelCNN to learn a categorical distribution of the discrete codes.
First, we will generate code indices using the encoder and vector quantizer we just
trained. Our training objective will be to minimize the crossentropy loss between these
indices and the PixelCNN outputs. Here, the number of categories is equal to the number
of embeddings present in our codebook (128 in our case). The PixelCNN model is
trained to learn a distribution (as opposed to minimizing the L1/L2 loss), which is where
it gets its generative capabilities from.
"""
# Generate the codebook indices.
encoded_outputs = encoder.predict(x_train_scaled)
flat_enc_outputs = encoded_outputs.reshape(-1, encoded_outputs.shape[-1])
codebook_indices = quantizer.get_code_indices(flat_enc_outputs)
codebook_indices = codebook_indices.numpy().reshape(encoded_outputs.shape[:-1])
print(f"Shape of the training data for PixelCNN: {codebook_indices.shape}")
"""
## PixelCNN training
"""
pixel_cnn.compile(
optimizer=keras.optimizers.Adam(3e-4),
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=["accuracy"],
)
pixel_cnn.fit(
x=codebook_indices,
y=codebook_indices,
batch_size=128,
epochs=30,
validation_split=0.1,
)
"""
We can improve these scores with more training and hyperparameter tuning.
"""
"""
## Codebook sampling
Now that our PixelCNN is trained, we can sample distinct codes from its outputs and pass
them to our decoder to generate novel images.
"""
# Create a mini sampler model.
inputs = layers.Input(shape=pixel_cnn.input_shape[1:])
x = pixel_cnn(inputs, training=False)
dist = tfp.distributions.Categorical(logits=x)
sampled = dist.sample()
sampler = keras.Model(inputs, sampled)
"""
We now construct a prior to generate images. Here, we will generate 10 images.
"""
# Create an empty array of priors.
batch = 10
priors = np.zeros(shape=(batch,) + (pixel_cnn.input_shape)[1:])
batch, rows, cols = priors.shape
# Iterate over the priors because generation has to be done sequentially pixel by pixel.
for row in range(rows):
for col in range(cols):
# Feed the whole array and retrieving the pixel value probabilities for the next
# pixel.
probs = sampler.predict(priors)
# Use the probabilities to pick pixel values and append the values to the priors.
priors[:, row, col] = probs[:, row, col]
print(f"Prior shape: {priors.shape}")
"""
We can now use our decoder to generate the images.
"""
# Perform an embedding lookup.
pretrained_embeddings = quantizer.embeddings
priors_ohe = tf.one_hot(priors.astype("int32"), vqvae_trainer.num_embeddings).numpy()
quantized = tf.matmul(
priors_ohe.astype("float32"), pretrained_embeddings, transpose_b=True
)
quantized = tf.reshape(quantized, (-1, *(encoded_outputs.shape[1:])))
# Generate novel images.
decoder = vqvae_trainer.vqvae.get_layer("decoder")
generated_samples = decoder.predict(quantized)
for i in range(batch):
plt.subplot(1, 2, 1)
plt.imshow(priors[i])
plt.title("Code")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(generated_samples[i].squeeze() + 0.5)
plt.title("Generated Sample")
plt.axis("off")
plt.show()
"""
We can enhance the quality of these generated samples by tweaking the PixelCNN.
"""
"""
## Additional notes
* After the VQ-VAE paper was initially released, the authors developed an exponential
moving averaging scheme to update the embeddings inside the quantizer. If you're
interested you can check out
[this snippet](https://github.com/deepmind/sonnet/blob/master/sonnet/python/modules/nets/vqvae.py#L124).
* To further enhance the quality of the generated samples,
[VQ-VAE-2](https://arxiv.org/abs/1906.00446) was proposed that follows a cascaded
approach to learn the codebook and to generate the images.
"""
|
keras-team/keras-io
|
examples/generative/vq_vae.py
|
Python
|
apache-2.0
| 20,195
|
[
"Gaussian"
] |
a6cff547fab29d2786729ff1c2bd1ccf17d742c2dd31b43de6141460f90492ca
|
# -*- coding: utf-8 -*-
# Author: Aziz Köksal
class NodeVisitor:
def visit(self, node):
""" Calls a visit method for this node. """
method = getattr(self, node.__class__.visit_name, self.default_visit)
return method(node)
def default_visit(self, node):
""" Calls visit() on the subnodes of this node. """
for n in node:
self.visit(n)
|
SiegeLord/dil
|
scripts/dil/visitor.py
|
Python
|
gpl-3.0
| 367
|
[
"VisIt"
] |
c62ff03bbb79d55fe5838889cbfc6053b485d352917927c9fa21c44bc99082ad
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ruggero Marchei <ruggero.marchei@daemonzone.net>
# Copyright: (c) 2015, Brian Coca <bcoca@ansible.com>
# Copyright: (c) 2016-2017, Konstantin Shalygin <k0ste@k0ste.ru>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: find
author: Brian Coca (based on Ruggero Marchei's Tidy)
version_added: "2.0"
short_description: Return a list of files based on specific criteria
description:
- Return a list of files based on specific criteria. Multiple criteria are AND'd together.
- For Windows targets, use the M(win_find) module instead.
options:
age:
description:
- Select files whose age is equal to or greater than the specified time.
Use a negative age to find files equal to or less than the specified time.
You can choose seconds, minutes, hours, days, or weeks by specifying the
first letter of any of those words (e.g., "1w").
patterns:
default: '*'
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
aliases: ['pattern']
excludes:
description:
- One or more (shell or regex) patterns, which type is controlled by C(use_regex) option.
- Excludes is a patterns should not be returned in list. Multiple patterns can be specified
using a list.
aliases: ['exclude']
version_added: "2.5"
contains:
description:
- One or more regex patterns which should be matched against the file content.
paths:
required: true
aliases: [ name, path ]
description:
- List of paths of directories to search. All paths must be fully qualified.
file_type:
description:
- Type of file to select.
- The 'link' and 'any' choices were added in version 2.3.
choices: [ any, directory, file, link ]
default: file
recurse:
description:
- If target is a directory, recursively descend into the directory looking for files.
type: bool
default: 'no'
size:
description:
- Select files whose size is equal to or greater than the specified size.
Use a negative size to find files equal to or less than the specified size.
Unqualified values are in bytes, but b, k, m, g, and t can be appended to specify
bytes, kilobytes, megabytes, gigabytes, and terabytes, respectively.
Size is not evaluated for directories.
age_stamp:
default: mtime
choices: [ atime, ctime, mtime ]
description:
- Choose the file property against which we compare age.
hidden:
description:
- Set this to true to include hidden files, otherwise they'll be ignored.
type: bool
default: 'no'
follow:
description:
- Set this to true to follow symlinks in path for systems with python 2.6+.
type: bool
default: 'no'
get_checksum:
description:
- Set this to true to retrieve a file's sha1 checksum.
type: bool
default: 'no'
use_regex:
description:
- If false the patterns are file globs (shell) if true they are python regexes.
type: bool
default: 'no'
depth:
description:
- Set the maximum number of levels to decend into. Setting recurse
to false will override this value, which is effectively depth 1.
Default is unlimited depth.
version_added: "2.6"
notes:
- For Windows targets, use the M(win_find) module instead.
'''
EXAMPLES = r'''
- name: Recursively find /tmp files older than 2 days
find:
paths: /tmp
age: 2d
recurse: yes
- name: Recursively find /tmp files older than 4 weeks and equal or greater than 1 megabyte
find:
paths: /tmp
age: 4w
size: 1m
recurse: yes
- name: Recursively find /var/tmp files with last access time greater than 3600 seconds
find:
paths: /var/tmp
age: 3600
age_stamp: atime
recurse: yes
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
find:
paths: /var/log
patterns: '*.old,*.log.gz'
size: 10m
# Note that YAML double quotes require escaping backslashes but yaml single quotes do not.
- name: Find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
find:
paths: /var/log
patterns: "^.*?\\.(?:old|log\\.gz)$"
size: 10m
use_regex: yes
- name: Find /var/log all directories, exclude nginx and mysql
find:
paths: /var/log
recurse: no
file_type: directory
excludes: 'nginx,mysql'
'''
RETURN = r'''
files:
description: all matches found with the specified criteria (see stat module for full output of each dictionary)
returned: success
type: list
sample: [
{ path: "/var/tmp/test1",
mode: "0644",
"...": "...",
checksum: 16fac7be61a6e4591a33ef4b729c5c3302307523
},
{ path: "/var/tmp/test2",
"...": "..."
},
]
matched:
description: number of matches
returned: success
type: string
sample: 14
examined:
description: number of filesystem objects looked at
returned: success
type: string
sample: 34
'''
import fnmatch
import grp
import os
import pwd
import re
import stat
import sys
import time
from ansible.module_utils.basic import AnsibleModule
def pfilter(f, patterns=None, excludes=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None and excludes is None:
return True
if use_regex:
if patterns and excludes is None:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
elif patterns and excludes:
for p in patterns:
r = re.compile(p)
if r.match(f):
for e in excludes:
r = re.compile(e)
if r.match(f):
return False
return True
else:
if patterns and excludes is None:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
elif patterns and excludes:
for p in patterns:
if fnmatch.fnmatch(f, p):
for e in excludes:
if fnmatch.fnmatch(f, e):
return False
return True
return False
def agefilter(st, now, age, timestamp):
'''filter files older than age'''
if age is None:
return True
elif age >= 0 and now - st.__getattribute__("st_%s" % timestamp) >= abs(age):
return True
elif age < 0 and now - st.__getattribute__("st_%s" % timestamp) <= abs(age):
return True
return False
def sizefilter(st, size):
'''filter files greater than size'''
if size is None:
return True
elif size >= 0 and st.st_size >= abs(size):
return True
elif size < 0 and st.st_size <= abs(size):
return True
return False
def contentfilter(fsname, pattern):
'''filter files which contain the given expression'''
if pattern is None:
return True
try:
f = open(fsname)
prog = re.compile(pattern)
for line in f:
if prog.match(line):
f.close()
return True
f.close()
except:
pass
return False
def statinfo(st):
pw_name = ""
gr_name = ""
try: # user data
pw_name = pwd.getpwuid(st.st_uid).pw_name
except:
pass
try: # group data
gr_name = grp.getgrgid(st.st_gid).gr_name
except:
pass
return {
'mode': "%04o" % stat.S_IMODE(st.st_mode),
'isdir': stat.S_ISDIR(st.st_mode),
'ischr': stat.S_ISCHR(st.st_mode),
'isblk': stat.S_ISBLK(st.st_mode),
'isreg': stat.S_ISREG(st.st_mode),
'isfifo': stat.S_ISFIFO(st.st_mode),
'islnk': stat.S_ISLNK(st.st_mode),
'issock': stat.S_ISSOCK(st.st_mode),
'uid': st.st_uid,
'gid': st.st_gid,
'size': st.st_size,
'inode': st.st_ino,
'dev': st.st_dev,
'nlink': st.st_nlink,
'atime': st.st_atime,
'mtime': st.st_mtime,
'ctime': st.st_ctime,
'gr_name': gr_name,
'pw_name': pw_name,
'wusr': bool(st.st_mode & stat.S_IWUSR),
'rusr': bool(st.st_mode & stat.S_IRUSR),
'xusr': bool(st.st_mode & stat.S_IXUSR),
'wgrp': bool(st.st_mode & stat.S_IWGRP),
'rgrp': bool(st.st_mode & stat.S_IRGRP),
'xgrp': bool(st.st_mode & stat.S_IXGRP),
'woth': bool(st.st_mode & stat.S_IWOTH),
'roth': bool(st.st_mode & stat.S_IROTH),
'xoth': bool(st.st_mode & stat.S_IXOTH),
'isuid': bool(st.st_mode & stat.S_ISUID),
'isgid': bool(st.st_mode & stat.S_ISGID),
}
def main():
module = AnsibleModule(
argument_spec=dict(
paths=dict(type='list', required=True, aliases=['name', 'path']),
patterns=dict(type='list', default=['*'], aliases=['pattern']),
excludes=dict(type='list', aliases=['exclude']),
contains=dict(type='str'),
file_type=dict(type='str', default="file", choices=['any', 'directory', 'file', 'link']),
age=dict(type='str'),
age_stamp=dict(type='str', default="mtime", choices=['atime', 'mtime', 'ctime']),
size=dict(type='str'),
recurse=dict(type='bool', default='no'),
hidden=dict(type='bool', default='no'),
follow=dict(type='bool', default='no'),
get_checksum=dict(type='bool', default='no'),
use_regex=dict(type='bool', default='no'),
depth=dict(type='int', default=None),
),
supports_check_mode=True,
)
params = module.params
filelist = []
if params['age'] is None:
age = None
else:
# convert age to seconds:
m = re.match(r"^(-?\d+)(s|m|h|d|w)?$", params['age'].lower())
seconds_per_unit = {"s": 1, "m": 60, "h": 3600, "d": 86400, "w": 604800}
if m:
age = int(m.group(1)) * seconds_per_unit.get(m.group(2), 1)
else:
module.fail_json(age=params['age'], msg="failed to process age")
if params['size'] is None:
size = None
else:
# convert size to bytes:
m = re.match(r"^(-?\d+)(b|k|m|g|t)?$", params['size'].lower())
bytes_per_unit = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3, "t": 1024**4}
if m:
size = int(m.group(1)) * bytes_per_unit.get(m.group(2), 1)
else:
module.fail_json(size=params['size'], msg="failed to process size")
now = time.time()
msg = ''
looked = 0
for npath in params['paths']:
npath = os.path.expanduser(os.path.expandvars(npath))
if os.path.isdir(npath):
''' ignore followlinks for python version < 2.6 '''
for root, dirs, files in (sys.version_info < (2, 6, 0) and os.walk(npath)) or os.walk(npath, followlinks=params['follow']):
if params['depth']:
depth = root.replace(npath.rstrip(os.path.sep), '').count(os.path.sep)
if files or dirs:
depth += 1
if depth > params['depth']:
del(dirs[:])
continue
looked = looked + len(files) + len(dirs)
for fsobj in (files + dirs):
fsname = os.path.normpath(os.path.join(root, fsobj))
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
try:
st = os.lstat(fsname)
except:
msg += "%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if params['file_type'] == 'any':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and contentfilter(fsname, params['contains']):
r.update(statinfo(st))
if params['get_checksum']:
r['checksum'] = module.sha1(fsname)
filelist.append(r)
elif stat.S_ISLNK(st.st_mode) and params['file_type'] == 'link':
if pfilter(fsobj, params['patterns'], params['excludes'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
if not params['recurse']:
break
else:
msg += "%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
if __name__ == '__main__':
main()
|
hogarthj/ansible
|
lib/ansible/modules/files/find.py
|
Python
|
gpl-3.0
| 14,854
|
[
"Brian"
] |
637f707d77e5488ee729e135a21a5e767bf09e3600bc92bc95a18c72f4a1601c
|
import sys
import hashlib
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
JUMP_TEST_DATA = [
{
"seed": 0,
"steps": 10,
"initial": {"key_md5": "64eaf265d2203179fb5ffb73380cd589", "pos": 9},
"jumped": {"key_md5": "8cb7b061136efceef5217a9ce2cc9a5a", "pos": 598},
},
{
"seed":384908324,
"steps":312,
"initial": {"key_md5": "e99708a47b82ff51a2c7b0625b81afb5", "pos": 311},
"jumped": {"key_md5": "2ecdbfc47a895b253e6e19ccb2e74b90", "pos": 276},
},
{
"seed": [839438204, 980239840, 859048019, 821],
"steps": 511,
"initial": {"key_md5": "9fcd6280df9199785e17e93162ce283c", "pos": 510},
"jumped": {"key_md5": "433b85229f2ed853cde06cd872818305", "pos": 475},
},
]
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multidimensional_pvals(self):
assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
@pytest.mark.parametrize("config", JUMP_TEST_DATA)
def test_jumped(config):
# Each config contains the initial seed, a number of raw steps
# the md5 hashes of the initial and the final states' keys and
# the position of of the initial and the final state.
# These were produced using the original C implementation.
seed = config["seed"]
steps = config["steps"]
mt19937 = MT19937(seed)
# Burn step
mt19937.random_raw(steps)
key = mt19937.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert mt19937.state["state"]["pos"] == config["initial"]["pos"]
assert md5.hexdigest() == config["initial"]["key_md5"]
jumped = mt19937.jumped()
key = jumped.state["state"]["key"]
if sys.byteorder == 'big':
key = key.byteswap()
md5 = hashlib.md5(key)
assert jumped.state["state"]["pos"] == config["jumped"]["pos"]
assert md5.hexdigest() == config["jumped"]["key_md5"]
|
abalkin/numpy
|
numpy/random/tests/test_generator_mt19937.py
|
Python
|
bsd-3-clause
| 101,044
|
[
"Gaussian"
] |
0df91a110e1c67beaf94b75dcd1bf58b2e535301262aa319dbd239225d2cae31
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import os.path
import re
import sys
import warnings
from collections import defaultdict
try:
from setuptools import setup, find_packages
from setuptools.command.build_py import build_py as BuildPy
from setuptools.command.install_lib import install_lib as InstallLib
from setuptools.command.install_scripts import install_scripts as InstallScripts
except ImportError:
print("Ansible now needs setuptools in order to build. Install it using"
" your package manager (usually python-setuptools) or via pip (pip"
" install setuptools).", file=sys.stderr)
sys.exit(1)
# `distutils` must be imported after `setuptools` or it will cause explosions
# with `setuptools >=48.0.0, <49.1`.
# Refs:
# * https://github.com/ansible/ansible/issues/70456
# * https://github.com/pypa/setuptools/issues/2230
# * https://github.com/pypa/setuptools/commit/bd110264
from distutils.command.build_scripts import build_scripts as BuildScripts
from distutils.command.sdist import sdist as SDist
def find_package_info(*file_paths):
try:
with open(os.path.join(*file_paths), 'r') as f:
info_file = f.read()
except Exception:
raise RuntimeError("Unable to find package info.")
# The version line must have the form
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
info_file, re.M)
author_match = re.search(r"^__author__ = ['\"]([^'\"]*)['\"]",
info_file, re.M)
if version_match and author_match:
return version_match.group(1), author_match.group(1)
raise RuntimeError("Unable to find package info.")
def _validate_install_ansible_core():
"""Validate that we can install ansible-core. This checks if
ansible<=2.9 or ansible-base>=2.10 are installed.
"""
# Skip common commands we can ignore
# Do NOT add bdist_wheel here, we don't ship wheels
# and bdist_wheel is the only place we can prevent pip
# from installing, as pip creates a wheel, and installs the wheel
# and we have no influence over installation within a wheel
if set(('sdist', 'egg_info')).intersection(sys.argv):
return
if os.getenv('ANSIBLE_SKIP_CONFLICT_CHECK', '') not in ('', '0'):
return
# Save these for later restoring things to pre invocation
sys_modules = sys.modules.copy()
sys_modules_keys = set(sys_modules)
# Make sure `lib` isn't in `sys.path` that could confuse this
sys_path = sys.path[:]
abspath = os.path.abspath
sys.path[:] = [p for p in sys.path if abspath(p) != abspath('lib')]
try:
from ansible.release import __version__
except ImportError:
pass
else:
version_tuple = tuple(int(v) for v in __version__.split('.')[:2])
if version_tuple >= (2, 11):
return
elif version_tuple == (2, 10):
ansible_name = 'ansible-base'
else:
ansible_name = 'ansible'
stars = '*' * 76
raise RuntimeError(
'''
%s
Cannot install ansible-core with a pre-existing %s==%s
installation.
Installing ansible-core with ansible-2.9 or older, or ansible-base-2.10
currently installed with pip is known to cause problems. Please uninstall
%s and install the new version:
pip uninstall %s
pip install ansible-core
If you want to skip the conflict checks and manually resolve any issues
afterwards, set the ANSIBLE_SKIP_CONFLICT_CHECK environment variable:
ANSIBLE_SKIP_CONFLICT_CHECK=1 pip install ansible-core
%s
''' % (stars, ansible_name, __version__, ansible_name, ansible_name, stars))
finally:
sys.path[:] = sys_path
for key in sys_modules_keys.symmetric_difference(sys.modules):
sys.modules.pop(key, None)
sys.modules.update(sys_modules)
_validate_install_ansible_core()
SYMLINK_CACHE = 'SYMLINK_CACHE.json'
def _find_symlinks(topdir, extension=''):
"""Find symlinks that should be maintained
Maintained symlinks exist in the bin dir or are modules which have
aliases. Our heuristic is that they are a link in a certain path which
point to a file in the same directory.
.. warn::
We want the symlinks in :file:`bin/` that link into :file:`lib/ansible/*` (currently,
:command:`ansible`, :command:`ansible-test`, and :command:`ansible-connection`) to become
real files on install. Updates to the heuristic here *must not* add them to the symlink
cache.
"""
symlinks = defaultdict(list)
for base_path, dirs, files in os.walk(topdir):
for filename in files:
filepath = os.path.join(base_path, filename)
if os.path.islink(filepath) and filename.endswith(extension):
target = os.readlink(filepath)
if target.startswith('/'):
# We do not support absolute symlinks at all
continue
if os.path.dirname(target) == '':
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[os.path.basename(target)].append(link)
else:
# Count how many directory levels from the topdir we are
levels_deep = os.path.dirname(filepath).count('/')
# Count the number of directory levels higher we walk up the tree in target
target_depth = 0
for path_component in target.split('/'):
if path_component == '..':
target_depth += 1
# If we walk past the topdir, then don't store
if target_depth >= levels_deep:
break
else:
target_depth -= 1
else:
# If we managed to stay within the tree, store the symlink
link = filepath[len(topdir):]
if link.startswith('/'):
link = link[1:]
symlinks[target].append(link)
return symlinks
def _cache_symlinks(symlink_data):
with open(SYMLINK_CACHE, 'w') as f:
json.dump(symlink_data, f)
def _maintain_symlinks(symlink_type, base_path):
"""Switch a real file into a symlink"""
try:
# Try the cache first because going from git checkout to sdist is the
# only time we know that we're going to cache correctly
with open(SYMLINK_CACHE, 'r') as f:
symlink_data = json.load(f)
except (IOError, OSError) as e:
# IOError on py2, OSError on py3. Both have errno
if e.errno == 2:
# SYMLINKS_CACHE doesn't exist. Fallback to trying to create the
# cache now. Will work if we're running directly from a git
# checkout or from an sdist created earlier.
library_symlinks = _find_symlinks('lib', '.py')
library_symlinks.update(_find_symlinks('test/lib'))
symlink_data = {'script': _find_symlinks('bin'),
'library': library_symlinks,
}
# Sanity check that something we know should be a symlink was
# found. We'll take that to mean that the current directory
# structure properly reflects symlinks in the git repo
if 'ansible-playbook' in symlink_data['script']['ansible']:
_cache_symlinks(symlink_data)
else:
raise RuntimeError(
"Pregenerated symlink list was not present and expected "
"symlinks in ./bin were missing or broken. "
"Perhaps this isn't a git checkout?"
)
else:
raise
symlinks = symlink_data[symlink_type]
for source in symlinks:
for dest in symlinks[source]:
dest_path = os.path.join(base_path, dest)
if not os.path.islink(dest_path):
try:
os.unlink(dest_path)
except OSError as e:
if e.errno == 2:
# File does not exist which is all we wanted
pass
os.symlink(source, dest_path)
class BuildPyCommand(BuildPy):
def run(self):
BuildPy.run(self)
_maintain_symlinks('library', self.build_lib)
class BuildScriptsCommand(BuildScripts):
def run(self):
BuildScripts.run(self)
_maintain_symlinks('script', self.build_dir)
class InstallLibCommand(InstallLib):
def run(self):
InstallLib.run(self)
_maintain_symlinks('library', self.install_dir)
class InstallScriptsCommand(InstallScripts):
def run(self):
InstallScripts.run(self)
_maintain_symlinks('script', self.install_dir)
class SDistCommand(SDist):
def run(self):
# have to generate the cache of symlinks for release as sdist is the
# only command that has access to symlinks from the git repo
library_symlinks = _find_symlinks('lib', '.py')
library_symlinks.update(_find_symlinks('test/lib'))
symlinks = {'script': _find_symlinks('bin'),
'library': library_symlinks,
}
_cache_symlinks(symlinks)
SDist.run(self)
# Print warnings at the end because no one will see warnings before all the normal status
# output
if os.environ.get('_ANSIBLE_SDIST_FROM_MAKEFILE', False) != '1':
warnings.warn('When setup.py sdist is run from outside of the Makefile,'
' the generated tarball may be incomplete. Use `make snapshot`'
' to create a tarball from an arbitrary checkout or use'
' `cd packaging/release && make release version=[..]` for official builds.',
RuntimeWarning)
def read_file(file_name):
"""Read file and return its contents."""
with open(file_name, 'r') as f:
return f.read()
def read_requirements(file_name):
"""Read requirements file as a list."""
reqs = read_file(file_name).splitlines()
if not reqs:
raise RuntimeError(
"Unable to read requirements from the %s file"
"That indicates this copy of the source code is incomplete."
% file_name
)
return reqs
PYCRYPTO_DIST = 'pycrypto'
def get_crypto_req():
"""Detect custom crypto from ANSIBLE_CRYPTO_BACKEND env var.
pycrypto or cryptography. We choose a default but allow the user to
override it. This translates into pip install of the sdist deciding what
package to install and also the runtime dependencies that pkg_resources
knows about.
"""
crypto_backend = os.environ.get('ANSIBLE_CRYPTO_BACKEND', '').strip()
if crypto_backend == PYCRYPTO_DIST:
# Attempt to set version requirements
return '%s >= 2.6' % PYCRYPTO_DIST
return crypto_backend or None
def substitute_crypto_to_req(req):
"""Replace crypto requirements if customized."""
crypto_backend = get_crypto_req()
if crypto_backend is None:
return req
def is_not_crypto(r):
CRYPTO_LIBS = PYCRYPTO_DIST, 'cryptography'
return not any(r.lower().startswith(c) for c in CRYPTO_LIBS)
return [r for r in req if is_not_crypto(r)] + [crypto_backend]
def get_dynamic_setup_params():
"""Add dynamically calculated setup params to static ones."""
return {
# Retrieve the long description from the README
'long_description': read_file('README.rst'),
'install_requires': substitute_crypto_to_req(
read_requirements('requirements.txt'),
),
}
here = os.path.abspath(os.path.dirname(__file__))
__version__, __author__ = find_package_info(here, 'lib', 'ansible', 'release.py')
static_setup_params = dict(
# Use the distutils SDist so that symlinks are not expanded
# Use a custom Build for the same reason
cmdclass={
'build_py': BuildPyCommand,
'build_scripts': BuildScriptsCommand,
'install_lib': InstallLibCommand,
'install_scripts': InstallScriptsCommand,
'sdist': SDistCommand,
},
name='ansible-core',
version=__version__,
description='Radically simple IT automation',
author=__author__,
author_email='info@ansible.com',
url='https://ansible.com/',
project_urls={
'Bug Tracker': 'https://github.com/ansible/ansible/issues',
'CI: Shippable': 'https://app.shippable.com/github/ansible/ansible',
'Code of Conduct': 'https://docs.ansible.com/ansible/latest/community/code_of_conduct.html',
'Documentation': 'https://docs.ansible.com/ansible/',
'Mailing lists': 'https://docs.ansible.com/ansible/latest/community/communication.html#mailing-list-information',
'Source Code': 'https://github.com/ansible/ansible',
},
license='GPLv3+',
# Ansible will also make use of a system copy of python-six and
# python-selectors2 if installed but use a Bundled copy if it's not.
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
package_dir={'': 'lib',
'ansible_test': 'test/lib/ansible_test'},
packages=find_packages('lib') + find_packages('test/lib'),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
scripts=[
'bin/ansible',
'bin/ansible-playbook',
'bin/ansible-pull',
'bin/ansible-doc',
'bin/ansible-galaxy',
'bin/ansible-console',
'bin/ansible-connection',
'bin/ansible-vault',
'bin/ansible-config',
'bin/ansible-inventory',
'bin/ansible-test',
],
data_files=[],
# Installing as zip files would break due to references to __file__
zip_safe=False
)
def main():
"""Invoke installation process using setuptools."""
setup_params = dict(static_setup_params, **get_dynamic_setup_params())
ignore_warning_regex = (
r"Unknown distribution option: '(project_urls|python_requires)'"
)
warnings.filterwarnings(
'ignore',
message=ignore_warning_regex,
category=UserWarning,
module='distutils.dist',
)
setup(**setup_params)
warnings.resetwarnings()
if __name__ == '__main__':
main()
|
s-hertel/ansible
|
setup.py
|
Python
|
gpl-3.0
| 15,629
|
[
"Galaxy"
] |
53fc649075c13ba80295f1e4cd129b484934a3c6a15d055200eda15f73a5431d
|
#!/usr/bin/python
# Image querying script written by Tamara Berg,
# and extended heavily James Hays
# Modified a little bit by Haozhi Qi
# 9/26/2007 added dynamic time slices to query more efficiently.
# 8/18/2008 added new fields and set maximum time slice.
# 8/19/2008 this is a much simpler function which gets ALL geotagged photos of
# sufficient accuracy. No queries, no negative constraints.
# divides up the query results into multiple files
# 1/5/2009
# now uses date_taken instead of date_upload to get more diverse blocks of images
# 1/13/2009 - uses the original im2gps keywords, not as negative constraints though
import sys
import socket
import time
import argparse
from flickrapi2 import FlickrAPI
socket.setdefaulttimeout(30)
# 30 second time out on sockets before they throw
# an exception. I've been having trouble with urllib.urlopen hanging in the
# flickr API. This will show up as exceptions.IOError.
# The time out needs to be pretty long, it seems, because the flickr servers can be slow
# to respond to our big searches.
"""
Modify this section to reflect your data and specific search
1. APIKey and Secret, this is got from flicker official website
"""
flickrAPIKey = "b653e65cf5ffd83d7584e5c860627ae8" # API key
flickrSecret = "2df09d4260333f44" # shared "secret"
desired_photos = 250
def parse_args():
parser = argparse.ArgumentParser(description='Flicker crawler')
parser.add_argument('--query', dest='query_list', help='list to be queried',
default='demo.txt', type=str, required=True)
args = parser.parse_args()
return args
def get_queries(query_list):
query_file = open(query_list, 'r')
# aggregate all of the positive and negative queries together.
pos_queries = []
num_queries = 0
for line in query_file:
if line[0] != '#' and len(line) > 2:
# line end character is 2 long?
# print line[0:len(line)-2]
pos_queries += [line[0:len(line)-1]]
num_queries += 1
query_file.close()
return pos_queries, num_queries
def search_from_current(query_string):
# number of seconds to skip per query
# time_skip = 62899200 #two years
# time_skip = 604800 #one week
# time_skip = 172800 #two days
# time_skip = 86400 #one day
# time_skip = 3600 #one hour
# time_skip = 2257 #for resuming previous query
time_skip = 604800
current_time = int(time.time())
threshold_time = current_time - time_skip
while True:
rsp = flicker_api.photos_search(api_key=flickrAPIKey,
ispublic="1",
media="photos",
per_page="250",
page="1",
text=query_string,
min_upload_date=str(threshold_time),
max_upload_date=str(current_time))
# we want to catch these failures somehow and keep going.
time.sleep(1)
flicker_api.testFailure(rsp)
total_images = rsp.photos[0]['total']
print 'num_imgs: ' + total_images + '\n'
if total_images < desired_photos:
threshold_time -= time_skip
else:
break
return threshold_time, current_time, total_images, rsp
def write_output_list(photo_desc, out_file):
out_file.write('photo: ' + photo_desc['id'] + ' ' + photo_desc['secret'] + ' ' + photo_desc['server'] + '\n')
out_file.write('owner: ' + photo_desc['owner'] + '\n')
out_file.write('title: ' + photo_desc['title'].encode("ascii", "replace") + '\n')
out_file.write('originalsecret: ' + photo_desc['originalsecret'] + '\n')
out_file.write('originalformat: ' + photo_desc['originalformat'] + '\n')
out_file.write('o_height: ' + photo_desc['o_height'] + '\n')
out_file.write('o_width: ' + photo_desc['o_width'] + '\n')
out_file.write('datetaken: ' + photo_desc['datetaken'].encode("ascii","replace") + '\n')
out_file.write('dateupload: ' + photo_desc['dateupload'].encode("ascii","replace") + '\n')
out_file.write('tags: ' + photo_desc['tags'].encode("ascii","replace") + '\n')
out_file.write('license: ' + photo_desc['license'].encode("ascii","replace") + '\n')
out_file.write('latitude: ' + photo_desc['latitude'].encode("ascii","replace") + '\n')
out_file.write('longitude: ' + photo_desc['longitude'].encode("ascii","replace") + '\n')
out_file.write('accuracy: ' + photo_desc['accuracy'].encode("ascii","replace") + '\n')
out_file.write('views: ' + photo_desc['views'] + '\n')
out_file.write('\n')
def image_retrieval(query_string):
out_file = open('./lists/' + query_string + '.txt', 'w')
print 'query_string is ' + query_string + '\n'
total_images_queried = 0
[min_time, max_time, total_images, rsp] = search_from_current(query_string)
s = 'min_time: ' + str(min_time) + ' max_time: ' + str(max_time) + '\n'
print s
out_file.write(s + '\n')
if getattr(rsp, 'photos', None):
s = 'num_imgs: ' + total_images
print s
out_file.write(s + '\n')
current_image_num = 1
num = int(rsp.photos[0]['pages'])
s = 'total pages: ' + str(num)
print s
out_file.write(s + '\n')
# only visit 16 pages max, to try and avoid the dreaded duplicate bug
# 16 pages = 4000 images, should be duplicate safe. Most interesting pictures will be taken.
num_visit_pages = min(16, num)
s = 'visiting only ' + str(num_visit_pages) + ' pages ( up to ' + str(num_visit_pages * 250) + ' images)'
print s
out_file.write(s + '\n')
total_images_queried = total_images_queried + min((num_visit_pages * 250), int(total_images))
page_num = 1
while page_num <= num_visit_pages:
# for page_num in range(1, num_visit_pages + 1):
print ' page number ' + str(page_num)
try:
rsp = flicker_api.photos_search(
api_key=flickrAPIKey,
ispublic="1",
media="photos",
per_page="250",
page=str(page_num),
sort="interestingness-desc",
text=query_string,
min_upload_date=str(min_time),
max_upload_date=str(max_time))
time.sleep(1)
flicker_api.testFailure(rsp)
except KeyboardInterrupt:
print('Keyboard exception while querying for images, exiting\n')
raise
else:
# and print them
if getattr(rsp, 'photos', None):
if getattr(rsp.photos[0], 'photo', None):
for b in rsp.photos[0].photo:
if b is not None:
write_output_list(b, out_file)
out_file.write('interestingness: ' + str(current_image_num) + ' out of '
+ str(total_images) + '\n')
current_image_num += 1
page_num += 1 # this is in the else exception block. It won't increment for a failure.
out_file.write('Total images queried: ' + str(total_images_queried) + '\n')
out_file.close()
if __name__ == '__main__':
args = parse_args()
pos_queries, num_queries = get_queries(args.query_list)
print 'positive queries: '
print pos_queries
print 'num_queries = ' + str(num_queries)
flicker_api = FlickrAPI(flickrAPIKey, flickrSecret)
for current_tag in range(0, num_queries):
image_retrieval(pos_queries[current_tag])
|
Oh233/Flicrawler
|
query_imgs/image_crawler.py
|
Python
|
mit
| 7,857
|
[
"VisIt"
] |
f87994b3d19c9394c9679478ba2bb015c8ceee9682600d119b30397554f32001
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.