text stringlengths 12 1.05M | repo_name stringlengths 5 86 | path stringlengths 4 191 | language stringclasses 1 value | license stringclasses 15 values | size int32 12 1.05M | keyword listlengths 1 23 | text_hash stringlengths 64 64 |
|---|---|---|---|---|---|---|---|
# python retrieve_test.py $subdir/MACS2243-09/W-C-RC/SCIENCE/coadd_MACS2243-09/coadd.fits $subdir/MACS2243-09/PHOTOMETRY/panstarrs.cat
# python retrieve_test.py $subdir/MACS0911+17/W-C-RC/SCIENCE/coadd_MACS0911+17/coadd.fits $subdir/MACS0911+17/PHOTOMETRY/panstarrsstar.cat
from math import *
import os, sys, anydbm, time, string, commands, re
import astropy
import pickle
import math
def convert_to_pogson(magnitude,filter):
''' converts from luptitudes to magnitudes in the traditional (i.e. pogson), sense'''
b_values = {'u':1.4E-10,'g':0.9E-10,'r':1.2E-10,'i':1.8E-10,'z':7.4E-10}
b = b_values[filter]
try:
flux_ratio = sinh(-1.*log(10)*magnitude/2.5 - log(b))*2*b
except: flux_ratio = -99
#print flux_ratio
if flux_ratio > 0:
pogson_magnitude = -2.5 * log10(flux_ratio)
else: pogson_magnitude = -999
#print pogson_magnitude, magnitude
return pogson_magnitude
''' make sure we have a clean sample of panstarrs stars '''
def inspect_flags(flags1,flags2):
flcodes = {'PEAKCENTER':[1,5],'NOTCHECKED':[1,19],'DEBLEND_NOPEAK':[2,14],'PSF_FLUX_INTERP':[2,15],'BAD_COUNTS_ERROR':[2,8],'INTERP_CENTER':[2,12],'CR':[1,12],'BINNED1':[1,28],'BRIGHT':[1,1],'SATURATED':[1,18],'EDGE':[1,2],'BLENDED':[1,3],'NODEBLEND':[1,6],'NOPROFILE':[1,7]}
good = 1
for i in range(5):
dict = {}
if (int(flag) & 2**n): flag_up_2.append(n)
for fl in flcodes.keys():
flag_num = flcodes[fl][0]
bit_num = flcodes[fl][0]
flags = [flags1[i],flags2[i]]
dict[fl] = flags[flag_num] & 2**int(bit_num)
print dict
good = 1
return good
def run(img,outcat,type,limits=None,illum_cat='test.cat'):
#example: outcat='/gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/PHOTOMETRY/panstarrsstar.cat'
startdir=os.getcwd()
outcatdir=os.path.dirname(outcat)
os.chdir(outcatdir)
print img, outcat, type
if type == 'star': mag_type = 'psf'
if type == 'galaxy': mag_type = 'petro'
print img
if os.path.isfile("outim_panstarrs"):
os.system("rm outim_panstarrs")
if os.path.isfile(outcat):
os.system('rm ' + outcat)
if os.path.isfile("panstarrs_out"):
os.system('rm panstarrs_out')
if limits is not None:
ramin = limits['ramin']
ramax = limits['ramax']
decmin = limits['decmin']
decmax = limits['decmax']
else:
command = 'dfits ' + img + ' | fitsort -d CD2_1'
print command
print commands.getoutput(command)
if string.find(commands.getoutput(command),'KEY') == -1:
imcom = "dfits " + img + " | fitsort CRPIX1 CRPIX2 CRVAL1 CRVAL2 CD2_1 CD1_2 CD2_2 CD1_1 > ./outim_panstarrs"
else:
imcom = "dfits " + img + " | fitsort CRPIX1 CRPIX2 CRVAL1 CRVAL2 CDELT1 CDELT2 > ./outim_panstarrs"
print imcom
os.system(imcom)
print open('outim_panstarrs','r').readlines()
com = re.split('\s+',open("outim_panstarrs",'r').readlines()[1][:-1] )
print com
crpix1 = float(com[1])
crpix2 = float(com[2])
crval1 = float(com[3])
crval2 = float(com[4])
if string.find(commands.getoutput(command),'KEY') == -1:
cdelt1A = float(com[5])
cdelt2A = float(com[6])
cdelt1B = float(com[7])
cdelt2B = float(com[8])
if float(cdelt1A) != 0:
cdelt1 = cdelt1A
cdelt2 = cdelt2A
else:
cdelt1 = cdelt1B
cdelt2 = cdelt2B
else:
cdelt1 = float(com[5])
cdelt2 = float(com[6])
print crpix1, crval1, cdelt1
#ramin = crval1 - crpix1*cdelt1
ramin = crval1 - 9000*abs(cdelt1)
print ramin
ramax = crval1 + 9000*abs(cdelt1)
if ramax < ramin:
top = ramin
ramin = ramax
ramax = top
decmin = crval2 - 9000*abs(cdelt2)
decmax = crval2 + 9000*abs(cdelt2)
#adam-SHNT# this is my substitute for the sdss-version of clipping in ra/dec, check to make sure it works
illum_fo=astropy.io.ascii.read(illum_cat,'csv')
ra=illum_fo['ra']
dec=illum_fo['dec']
ra_lims=(ra<ramax)*(ra>ramin)
dec_lims=(dec<decmax)*(dec>decmin)
pos_lims=ra_lims*dec_lims
illum_fo2=illum_fo[pos_lims]
illum_fo2.write(illum_cat+'2',format='ascii.csv',overwrite=True)
#illum_fo.close()
panstarrs_fo=open(illum_cat+'2','r')
lines = panstarrs_fo.readlines()
#adam-sdss-version# query = "select clean, ra,dec,raErr,decErr," + mag_type + "Mag_u," + mag_type + "Mag_g," + mag_type + "Mag_r," + mag_type + "Mag_i," + mag_type + "Mag_z," + mag_type + "MagErr_u," + mag_type + "MagErr_g," + mag_type + "MagErr_r," + mag_type + "MagErr_i," + mag_type + "MagErr_z, flags from " + type + " where ra between " + str(ramin)[:8] + " and " + str(ramax)[:8] + " and dec between " + str(decmin)[:8] + " and " +str(decmax)[:8] + " AND clean=1 and " + flags
uu = open('store_panstarrs','w')
pickle.dump(lines,uu)
columns = lines[0][:-1].split(',')
data = []
#print columns
#print lines
if lines[0][0:2] == 'No':
return False, None
for line in range(1,len(lines[1:])+1):
#print lines[line]
dt0 = {}
for j in range(len(lines[line][:-1].split(','))):
dt0[columns[j]] = lines[line][:-1].split(',')[j]
if string.find(lines[line][:-1],'font') == -1:
data.append(dt0)
#if string.find(lines[line][:-1],'font') != -1:
#print lines[line][:-1]
print ' len(data)=',len(data)
print ' len(data[0])=',len(data[0])
outwrite = open('panstarrs_out','w')
keys = ['SeqNr',['dec','Dec'],['ra','Ra'],'raErr','decErr','umag','gmag','rmag','imag','Bmag','Vmag','Rmag','Imag','zmag','uerr','gerr','rerr','ierr','Berr','Verr','Rerr','Ierr','zerr','umg','gmr','rmi','imz','BmV','VmR','RmI','Imz','umgerr','gmrerr','rmierr','imzerr','BmVerr','VmRerr','RmIerr','Imzerr','A_WCS','B_WCS','THETAWCS','Flag','Clean',['ra','ALPHA_J2000'],['dec','DELTA_J2000']]
#keys = ['SeqNr',['dec','Dec'],['ra','Ra'],'raErr','decErr','umag','gmag','rmag','imag','Bmag','Vmag','Rmag','Imag','zmag','uerr','gerr','rerr','ierr','Berr','Verr','Rerr','Ierr','zerr','umg','gmr','rmi','imz','BmV','VmR','RmI','Imz','umgerr','gmrerr','rmierr','imzerr','BmVerr','VmRerr','RmIerr','Imzerr','flags_u','flags_g','flags_r','flags_i','flags_z','A_WCS','B_WCS','THETAWCS','Flag','Clean',['ra','ALPHA_J2000'],['dec','DELTA_J2000']]
#adam-SHNT# this is the point where I'll have to get the panstarrs catalog to start from!
#adam-SHNT# on this cat: /nfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/PHOTOMETRY/astrefcat-stars_only.txt
#adam-ask-anja# are these corrections for sdss applicable for panstarrs?
#answer: this doesn't really matter that much
ab_correction = {'u':-0.036,'g':0.012,'r':0.010,'i':0.028,'z':0.040}
seqnr = 1
for els in range(len(data)):
clean = data[els]['clean']
#pogson converts from luptitudes to magnitudes, and is not needed for PANSTARRS
#u = convert_to_pogson(float(data[els][mag_type + 'Mag_u']),'u') + ab_correction['u']
#g = convert_to_pogson(float(data[els][mag_type + 'Mag_g']),'g') + ab_correction['g']
#r = convert_to_pogson(float(data[els][mag_type + 'Mag_r']),'r') + ab_correction['r']
#i = convert_to_pogson(float(data[els][mag_type + 'Mag_i']),'i') + ab_correction['i']
#z = convert_to_pogson(float(data[els][mag_type + 'Mag_z']),'z') + ab_correction['z']
u = float(data[els][mag_type + 'Mag_u'])
g = float(data[els][mag_type + 'Mag_g'])
r = float(data[els][mag_type + 'Mag_r'])
i = float(data[els][mag_type + 'Mag_i'])
z = float(data[els][mag_type + 'Mag_z'])
uerr = float(data[els][mag_type + 'MagErr_u']) ; gerr = float(data[els][mag_type + 'MagErr_g']) ; rerr = float(data[els][mag_type + 'MagErr_r']) ; ierr = float(data[els][mag_type + 'MagErr_i']) ; zerr = float(data[els][mag_type + 'MagErr_z'])
#adam-old: I have to change this, since there is no u band for PANSTARRS
# data[els]['Bmag'] = u - 0.8116*(u - g) + 0.1313# sigma = 0.0095
# data[els]['Berr'] = math.sqrt( (uerr*0.19)**2. + (0.8119*gerr)**2.)
#adam-new# using Lupton 2005 conversion
#B = g + 0.3130*(g - r) + 0.2271# sigma = 0.0107
if g>0 and r>0:
data[els]['Bmag'] = g + 0.3130*(g - r) + 0.2271 # sigma = 0.0107
else:
data[els]['Bmag'] = -999.0
data[els]['Berr'] = math.sqrt( (gerr*1.3130)**2 + (rerr*0.3130)**2 + 0.0107**2)
#V = g - 0.2906*(u - g) + 0.0885# sigma = 0.0129
if g>0 and r>0:
data[els]['Vmag'] = g - 0.5784*(g - r) - 0.0038# sigma = 0.0054
else:
data[els]['Vmag'] = -999.0
data[els]['Verr'] = math.sqrt( (gerr*0.42)**2. + (0.57*rerr)**2.)
#R = r - 0.1837*(g - r) - 0.0971# sigma = 0.0106
if i>0 and r>0:
data[els]['Rmag'] = r - 0.2936*(r - i) - 0.1439# sigma = 0.0072
else:
data[els]['Rmag'] = -999.0
data[els]['Rerr'] = math.sqrt( (rerr*0.71)**2. + (0.29*ierr)**2.)
if i>0 and r>0:
data[els]['Imag'] = r - 1.2444*(r - i) - 0.3820# sigma = 0.0078
else:
data[els]['Imag'] = -999.0
data[els]['Ierr'] = math.sqrt( (rerr*0.24)**2. + (1.244*ierr)**2.)
#I = i - 0.3780*(i - z) -0.3974# sigma = 0.0063
data[els]['umag'] = u ; data[els]['gmag'] = g ; data[els]['rmag'] = r ; data[els]['imag'] = i ; data[els]['zmag'] = z
data[els]['umg'] = -999.0
if data[els]['rmag']>0 and data[els]['gmag']>0:
data[els]['gmr'] = data[els]['gmag'] - data[els]['rmag']
else:
data[els]['gmr'] = -999.0
if data[els]['rmag']>0 and data[els]['imag']>0:
data[els]['rmi'] = data[els]['rmag'] - data[els]['imag']
else:
data[els]['rmi'] = -999.0
if data[els]['zmag']>0 and data[els]['imag']>0:
data[els]['imz'] = data[els]['imag'] - data[els]['zmag']
else:
data[els]['imz'] = -999.0
data[els]['uerr'] = uerr ; data[els]['gerr'] = gerr ; data[els]['rerr'] = rerr ; data[els]['ierr'] = ierr ; data[els]['zerr'] = zerr
data[els]['umgerr'] = math.sqrt(data[els]['uerr']**2. + data[els]['gerr']**2.)
data[els]['gmrerr'] = math.sqrt(data[els]['gerr']**2. + data[els]['rerr']**2.)
data[els]['rmierr'] = math.sqrt(data[els]['rerr']**2. + data[els]['ierr']**2.)
data[els]['imzerr'] = math.sqrt(data[els]['ierr']**2. + data[els]['zerr']**2.)
if data[els]['Bmag']>0 and data[els]['Vmag'] > 0:
data[els]['BmV'] = data[els]['Bmag'] - data[els]['Vmag']
else:
data[els]['BmV'] = -999.0
if data[els]['Rmag']>0 and data[els]['Vmag'] > 0:
data[els]['VmR'] = data[els]['Vmag'] - data[els]['Rmag']
else:
data[els]['VmR'] = -999.0
if data[els]['Rmag']>0 and data[els]['Imag'] > 0:
data[els]['RmI'] = data[els]['Rmag'] - data[els]['Imag']
else:
data[els]['RmI'] = -999.0
if data[els]['zmag']>0 and data[els]['Imag'] > 0:
data[els]['Imz'] = data[els]['Imag'] - data[els]['zmag']
else:
data[els]['Imz'] = -999.0
data[els]['BmVerr'] = math.sqrt(data[els]['Berr']**2. + data[els]['Verr']**2.)
data[els]['VmRerr'] = math.sqrt(data[els]['Verr']**2. + data[els]['Rerr']**2.)
data[els]['RmIerr'] = math.sqrt(data[els]['Rerr']**2. + data[els]['Ierr']**2.)
data[els]['Imzerr'] = math.sqrt(data[els]['Ierr']**2. + data[els]['zerr']**2.)
#error = (float(data[els]['rowcErr_r'])**2. + float(data[els]['colcErr_r'])**2.)**0.5*0.4/3600.
#if error < 0.0004: error=0.0004
data[els]['A_WCS'] = 0.0004 #error #data[els]['Err'] #'0.0004'
data[els]['B_WCS'] = 0.0004 #error #data[els]['decErr'] #'0.0004'
data[els]['THETAWCS'] = '0'
data[els]['Clean'] = str(clean)
data[els]['Flag'] = '0' #str(clean)
seqnr += 1
data[els]['SeqNr'] = seqnr
lineh = ""
for key in keys:
if len(key) == 2:
key_dict = key[0]
key = key[1]
else:
key_dict = key
if (key == 'SeqNr' or key_dict=='ra' or key_dict=='dec' or key[0:3] == 'Fla'):
num = '%(s)s' % {'s' : str(data[els][key_dict])}
else:
num = '%(num).4f' % {'num' : float(data[els][key_dict])}
num = '%s' % num
num.strip()
lineh = lineh + num + " "
outwrite.write(lineh + "\n")
outwrite.close()
#lineh= "lc -C -B "
#for key in data[els].keys():
# lineh = lineh + " -N '1 1 " + str(key) + "' "
#lineh = lineh + " < outwrite > outf.cat"
#print lineh
#os.system(lineh)
asc = open('asctoldac_panstarrs.conf','w')
asc.write('VERBOSE = DEBUG\n')
for column in keys:
if len(column) == 2:
name = column[1]
else: name = column
if column == 'objID' or column[0:3] == 'fla':
type = 'STRING'
htype = 'STRING'
depth = '128'
elif column == 'Flag':
type = 'SHORT'
htype = 'INT'
depth = '1'
elif column == 'SeqNr':
type = 'LONG'
htype = 'INT'
depth = '1'
elif len(column) ==2: #column == 'Ra' or column == 'Dec':
type = 'DOUBLE'
htype = 'FLOAT'
depth = '1'
else:
type = 'FLOAT'
htype = 'FLOAT'
depth = '1'
asc.write('#\nCOL_NAME = ' + name + '\nCOL_TTYPE= ' + type + '\nCOL_HTYPE= ' + htype + '\nCOL_COMM= ""\nCOL_UNIT= ""\nCOL_DEPTH= ' + depth + '\n')
asc.close()
command = "asctoldac -i panstarrs_out -c asctoldac_panstarrs.conf -t STDTAB -o " + outcat
ooo=os.system(command)
print command
if ooo!=0: raise Exception('asctoldac command failed!')
os.chdir(startdir)
if len(data) > 10:
cov = True
else: cov = False
return cov, outcat
if __name__ == '__main__':
img = sys.argv[1]
outcat = sys.argv[2]
mag_type = 'star'
run(img, outcat, mag_type,None,illum_cat='test.cat')
| deapplegate/wtgpipeline | retrieve_test_PANSTARRS.py | Python | mit | 14,889 | [
"Galaxy"
] | 7cb11dd5af04f940fec590ae6c54d756d1b0e046009b6352e87ade6a739bc481 |
# -*- coding: utf-8 -*-
import os
# MouseClick.py - To demonstrate Tkinter key clicks
import Tkinter
from kdtree import *
class KDTreeApp:
def __init__(self):
"""App for creating KD tree dynamically"""
self.tree = KDTree()
self.master = Tkinter.Tk()
self.w = Tkinter.Frame(self.master, width=410, height=410)
self.canvas = Tkinter.Canvas(self.w, width=400, height=400)
self.paint()
self.canvas.bind("<Button-1>", self.click)
self.w.pack()
self.w.mainloop()
def toCartesian(self, y):
"""Convert y-coordinate into Cartesian equivalent"""
return self.w.winfo_height() - y
def toTk(self, y):
"""Convert Cartesian coordinate into Tk-equivalent"""
if y == maxValue:
return 0
tk_y = self.w.winfo_height()
if y != minValue:
tk_y -= y
return tk_y
def click(self, event):
"""Add point to KDtree"""
p = (event.x, self.toCartesian(event.y))
self.tree.add(p)
self.paint()
def drawPartition(self, r, p, orient):
if orient == VERTICAL:
self.canvas.create_line(p[X_], self.toTk(
r.y_min), p[X_], self.toTk(r.y_max))
else:
xlow = r.x_min
if r.x_min == minValue:
xlow = 0
xhigh = r.x_max
if r.x_max == maxValue:
xhigh = self.w.winfo_width()
self.canvas.create_line(xlow, self.toTk(
p[Y_]), xhigh, self.toTk(p[Y_]))
self.canvas.create_rectangle(
p[X_] - 4, self.toTk(p[Y_]) - 4, p[X_] + 4, self.toTk(p[Y_]) + 4, fill='Red')
def visit(self, n):
if n == None:
return
self.drawPartition(n.region, n.point, n.orient)
self.visit(n.below)
self.visit(n.above)
def prepare(self, event):
"""prepare to add points"""
if self.label:
self.label.destroy()
self.label = None
self.canvas.pack()
def paint(self):
if self.tree.root:
self.visit(self.tree.root)
else:
self.label = Tkinter.Label(
self.w, width=100, height=40, text="Click To Add Points")
self.label.bind("<Button-1>", self.prepare)
self.label.pack()
if __name__ == "__main__":
KDTreeApp()
os.system("pause")
| NicovincX2/Python-3.5 | Algorithmique/Structure de données/Arbre (structure de données)/Arbre kd/app.py | Python | gpl-3.0 | 2,428 | [
"VisIt"
] | 18bf3eec6bc03614e4d23887a11cd696f2b072b7997aef8a042d3370effa4b5e |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RA4(RPackage):
"""Automated Affymetrix Array Analysis Umbrella Package."""
homepage = "https://www.bioconductor.org/packages/a4/"
url = "https://www.bioconductor.org/packages/release/bioc/src/contrib/a4_1.24.0.tar.gz"
list_url = homepage
version('1.24.0', 'dfa17ec5b1914300360ff11f43955fdd')
depends_on('r-a4base', type=('build', 'run'))
depends_on('r-a4preproc', type=('build', 'run'))
depends_on('r-a4classif', type=('build', 'run'))
depends_on('r-a4core', type=('build', 'run'))
depends_on('r-a4reporting', type=('build', 'run'))
| wscullin/spack | var/spack/repos/builtin/packages/r-a4/package.py | Python | lgpl-2.1 | 1,842 | [
"Bioconductor"
] | 8a0fff2e1cb70d9972f374e90cb9186bf4d7872a3e98593af864850847e1346d |
# mako/codegen.py
# Copyright (C) 2006-2013 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""provides functionality for rendering a parsetree constructing into module
source code."""
import time
import re
from mako.pygen import PythonPrinter
from mako import util, ast, parsetree, filters, exceptions
from mako import compat
MAGIC_NUMBER = 9
# names which are hardwired into the
# template and are not accessed via the
# context itself
RESERVED_NAMES = set(['context', 'loop', 'UNDEFINED'])
def compile(node,
uri,
filename=None,
default_filters=None,
buffer_filters=None,
imports=None,
future_imports=None,
source_encoding=None,
generate_magic_comment=True,
disable_unicode=False,
strict_undefined=False,
enable_loop=True,
reserved_names=frozenset()):
"""Generate module source code given a parsetree node,
uri, and optional source filename"""
# if on Py2K, push the "source_encoding" string to be
# a bytestring itself, as we will be embedding it into
# the generated source and we don't want to coerce the
# result into a unicode object, in "disable_unicode" mode
if not compat.py3k and isinstance(source_encoding, compat.text_type):
source_encoding = source_encoding.encode(source_encoding)
buf = util.FastEncodingBuffer()
printer = PythonPrinter(buf)
_GenerateRenderMethod(printer,
_CompileContext(uri,
filename,
default_filters,
buffer_filters,
imports,
future_imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names),
node)
return buf.getvalue()
class _CompileContext(object):
def __init__(self,
uri,
filename,
default_filters,
buffer_filters,
imports,
future_imports,
source_encoding,
generate_magic_comment,
disable_unicode,
strict_undefined,
enable_loop,
reserved_names):
self.uri = uri
self.filename = filename
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.future_imports = future_imports
self.source_encoding = source_encoding
self.generate_magic_comment = generate_magic_comment
self.disable_unicode = disable_unicode
self.strict_undefined = strict_undefined
self.enable_loop = enable_loop
self.reserved_names = reserved_names
class _GenerateRenderMethod(object):
"""A template visitor object which generates the
full module source for a template.
"""
def __init__(self, printer, compiler, node):
self.printer = printer
self.last_source_line = -1
self.compiler = compiler
self.node = node
self.identifier_stack = [None]
self.in_def = isinstance(node, (parsetree.DefTag, parsetree.BlockTag))
if self.in_def:
name = "render_%s" % node.funcname
args = node.get_argument_expressions()
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
defs = None
pagetag = None
if node.is_block and not node.is_anonymous:
args += ['**pageargs']
else:
defs = self.write_toplevel()
pagetag = self.compiler.pagetag
name = "render_body"
if pagetag is not None:
args = pagetag.body_decl.get_argument_expressions()
if not pagetag.body_decl.kwargs:
args += ['**pageargs']
cached = eval(pagetag.attributes.get('cached', 'False'))
self.compiler.enable_loop = self.compiler.enable_loop or eval(
pagetag.attributes.get(
'enable_loop', 'False')
)
else:
args = ['**pageargs']
cached = False
buffered = filtered = False
if args is None:
args = ['context']
else:
args = [a for a in ['context'] + args]
self.write_render_callable(
pagetag or node,
name, args,
buffered, filtered, cached)
if defs is not None:
for node in defs:
_GenerateRenderMethod(printer, compiler, node)
@property
def identifiers(self):
return self.identifier_stack[-1]
def write_toplevel(self):
"""Traverse a template structure for module-level directives and
generate the start of module-level code.
"""
inherit = []
namespaces = {}
module_code = []
self.compiler.pagetag = None
class FindTopLevel(object):
def visitInheritTag(s, node):
inherit.append(node)
def visitNamespaceTag(s, node):
namespaces[node.name] = node
def visitPageTag(s, node):
self.compiler.pagetag = node
def visitCode(s, node):
if node.ismodule:
module_code.append(node)
f = FindTopLevel()
for n in self.node.nodes:
n.accept_visitor(f)
self.compiler.namespaces = namespaces
module_ident = set()
for n in module_code:
module_ident = module_ident.union(n.declared_identifiers())
module_identifiers = _Identifiers(self.compiler)
module_identifiers.declared = module_ident
# module-level names, python code
if self.compiler.generate_magic_comment and \
self.compiler.source_encoding:
self.printer.writeline("# -*- encoding:%s -*-" %
self.compiler.source_encoding)
if self.compiler.future_imports:
self.printer.writeline("from __future__ import %s" %
(", ".join(self.compiler.future_imports),))
self.printer.writeline("from mako import runtime, filters, cache")
self.printer.writeline("UNDEFINED = runtime.UNDEFINED")
self.printer.writeline("__M_dict_builtin = dict")
self.printer.writeline("__M_locals_builtin = locals")
self.printer.writeline("_magic_number = %r" % MAGIC_NUMBER)
self.printer.writeline("_modified_time = %r" % time.time())
self.printer.writeline("_enable_loop = %r" % self.compiler.enable_loop)
self.printer.writeline(
"_template_filename = %r" % self.compiler.filename)
self.printer.writeline("_template_uri = %r" % self.compiler.uri)
self.printer.writeline(
"_source_encoding = %r" % self.compiler.source_encoding)
if self.compiler.imports:
buf = ''
for imp in self.compiler.imports:
buf += imp + "\n"
self.printer.writeline(imp)
impcode = ast.PythonCode(
buf,
source='', lineno=0,
pos=0,
filename='template defined imports')
else:
impcode = None
main_identifiers = module_identifiers.branch(self.node)
module_identifiers.topleveldefs = \
module_identifiers.topleveldefs.\
union(main_identifiers.topleveldefs)
module_identifiers.declared.add("UNDEFINED")
if impcode:
module_identifiers.declared.update(impcode.declared_identifiers)
self.compiler.identifiers = module_identifiers
self.printer.writeline("_exports = %r" %
[n.name for n in
main_identifiers.topleveldefs.values()]
)
self.printer.write("\n\n")
if len(module_code):
self.write_module_code(module_code)
if len(inherit):
self.write_namespaces(namespaces)
self.write_inherit(inherit[-1])
elif len(namespaces):
self.write_namespaces(namespaces)
return list(main_identifiers.topleveldefs.values())
def write_render_callable(self, node, name, args, buffered, filtered,
cached):
"""write a top-level render callable.
this could be the main render() method or that of a top-level def."""
if self.in_def:
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_toplevel(%s)" % decorator)
self.printer.writelines(
"def %s(%s):" % (name, ','.join(args)),
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writeline("context._push_buffer()")
self.identifier_stack.append(
self.compiler.identifiers.branch(self.node))
if (not self.in_def or self.node.is_block) and '**pageargs' in args:
self.identifier_stack[-1].argument_declared.add('pageargs')
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0
):
self.printer.writeline("__M_locals = __M_dict_builtin(%s)" %
','.join([
"%s=%s" % (x, x) for x in
self.identifiers.argument_declared
]))
self.write_variable_declares(self.identifiers, toplevel=True)
for n in self.node.nodes:
n.accept_visitor(self)
self.write_def_finish(self.node, buffered, filtered, cached)
self.printer.writeline(None)
self.printer.write("\n\n")
if cached:
self.write_cache_decorator(
node, name,
args, buffered,
self.identifiers, toplevel=True)
def write_module_code(self, module_code):
"""write module-level template code, i.e. that which
is enclosed in <%! %> tags in the template."""
for n in module_code:
self.write_source_comment(n)
self.printer.write_indented_block(n.text)
def write_inherit(self, node):
"""write the module-level inheritance-determination callable."""
self.printer.writelines(
"def _mako_inherit(template, context):",
"_mako_generate_namespaces(context)",
"return runtime._inherit_from(context, %s, _template_uri)" %
(node.parsed_attributes['file']),
None
)
def write_namespaces(self, namespaces):
"""write the module-level namespace-generating callable."""
self.printer.writelines(
"def _mako_get_namespace(context, name):",
"try:",
"return context.namespaces[(__name__, name)]",
"except KeyError:",
"_mako_generate_namespaces(context)",
"return context.namespaces[(__name__, name)]",
None, None
)
self.printer.writeline("def _mako_generate_namespaces(context):")
for node in namespaces.values():
if 'import' in node.attributes:
self.compiler.has_ns_imports = True
self.write_source_comment(node)
if len(node.nodes):
self.printer.writeline("def make_namespace():")
export = []
identifiers = self.compiler.identifiers.branch(node)
self.in_def = True
class NSDefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
if node.is_anonymous:
raise exceptions.CompileException(
"Can't put anonymous blocks inside "
"<%namespace>",
**node.exception_kwargs
)
self.write_inline_def(node, identifiers, nested=False)
export.append(node.funcname)
vis = NSDefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.printer.writeline("return [%s]" % (','.join(export)))
self.printer.writeline(None)
self.in_def = False
callable_name = "make_namespace()"
else:
callable_name = "None"
if 'file' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.TemplateNamespace(%r,"
" context._clean_inheritance_tokens(),"
" templateuri=%s, callables=%s, "
" calling_uri=_template_uri)" %
(
node.name,
node.parsed_attributes.get('file', 'None'),
callable_name,
)
)
elif 'module' in node.parsed_attributes:
self.printer.writeline(
"ns = runtime.ModuleNamespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri,"
" module=%s)" %
(
node.name,
callable_name,
node.parsed_attributes.get('module', 'None')
)
)
else:
self.printer.writeline(
"ns = runtime.Namespace(%r,"
" context._clean_inheritance_tokens(),"
" callables=%s, calling_uri=_template_uri)" %
(
node.name,
callable_name,
)
)
if eval(node.attributes.get('inheritable', "False")):
self.printer.writeline("context['self'].%s = ns" % (node.name))
self.printer.writeline(
"context.namespaces[(__name__, %s)] = ns" % repr(node.name))
self.printer.write("\n")
if not len(namespaces):
self.printer.writeline("pass")
self.printer.writeline(None)
def write_variable_declares(self, identifiers, toplevel=False, limit=None):
"""write variable declarations at the top of a function.
the variable declarations are in the form of callable
definitions for defs and/or name lookup within the
function's context argument. the names declared are based
on the names that are referenced in the function body,
which don't otherwise have any explicit assignment
operation. names that are assigned within the body are
assumed to be locally-scoped variables and are not
separately declared.
for def callable definitions, if the def is a top-level
callable then a 'stub' callable is generated which wraps
the current Context into a closure. if the def is not
top-level, it is fully rendered as a local closure.
"""
# collection of all defs available to us in this scope
comp_idents = dict([(c.funcname, c) for c in identifiers.defs])
to_write = set()
# write "context.get()" for all variables we are going to
# need that arent in the namespace yet
to_write = to_write.union(identifiers.undeclared)
# write closure functions for closures that we define
# right here
to_write = to_write.union(
[c.funcname for c in identifiers.closuredefs.values()])
# remove identifiers that are declared in the argument
# signature of the callable
to_write = to_write.difference(identifiers.argument_declared)
# remove identifiers that we are going to assign to.
# in this way we mimic Python's behavior,
# i.e. assignment to a variable within a block
# means that variable is now a "locally declared" var,
# which cannot be referenced beforehand.
to_write = to_write.difference(identifiers.locally_declared)
if self.compiler.enable_loop:
has_loop = "loop" in to_write
to_write.discard("loop")
else:
has_loop = False
# if a limiting set was sent, constraint to those items in that list
# (this is used for the caching decorator)
if limit is not None:
to_write = to_write.intersection(limit)
if toplevel and getattr(self.compiler, 'has_ns_imports', False):
self.printer.writeline("_import_ns = {}")
self.compiler.has_imports = True
for ident, ns in self.compiler.namespaces.items():
if 'import' in ns.attributes:
self.printer.writeline(
"_mako_get_namespace(context, %r)."\
"_populate(_import_ns, %r)" %
(
ident,
re.split(r'\s*,\s*', ns.attributes['import'])
))
if has_loop:
self.printer.writeline(
'loop = __M_loop = runtime.LoopStack()'
)
for ident in to_write:
if ident in comp_idents:
comp = comp_idents[ident]
if comp.is_block:
if not comp.is_anonymous:
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
else:
if comp.is_root():
self.write_def_decl(comp, identifiers)
else:
self.write_inline_def(comp, identifiers, nested=True)
elif ident in self.compiler.namespaces:
self.printer.writeline(
"%s = _mako_get_namespace(context, %r)" %
(ident, ident)
)
else:
if getattr(self.compiler, 'has_ns_imports', False):
if self.compiler.strict_undefined:
self.printer.writelines(
"%s = _import_ns.get(%r, UNDEFINED)" %
(ident, ident),
"if %s is UNDEFINED:" % ident,
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None, None
)
else:
self.printer.writeline(
"%s = _import_ns.get(%r, context.get(%r, UNDEFINED))" %
(ident, ident, ident))
else:
if self.compiler.strict_undefined:
self.printer.writelines(
"try:",
"%s = context[%r]" % (ident, ident),
"except KeyError:",
"raise NameError(\"'%s' is not defined\")" %
ident,
None
)
else:
self.printer.writeline(
"%s = context.get(%r, UNDEFINED)" % (ident, ident)
)
self.printer.writeline("__M_writer = context.writer()")
def write_source_comment(self, node):
"""write a source comment containing the line number of the
corresponding template line."""
if self.last_source_line != node.lineno:
self.printer.writeline("# SOURCE LINE %d" % node.lineno)
self.last_source_line = node.lineno
def write_def_decl(self, node, identifiers):
"""write a locally-available callable referencing a top-level def"""
funcname = node.funcname
namedecls = node.get_argument_expressions()
nameargs = node.get_argument_expressions(include_defaults=False)
if not self.in_def and (
len(self.identifiers.locally_assigned) > 0 or
len(self.identifiers.argument_declared) > 0):
nameargs.insert(0, 'context._locals(__M_locals)')
else:
nameargs.insert(0, 'context')
self.printer.writeline("def %s(%s):" % (funcname, ",".join(namedecls)))
self.printer.writeline(
"return render_%s(%s)" % (funcname, ",".join(nameargs)))
self.printer.writeline(None)
def write_inline_def(self, node, identifiers, nested):
"""write a locally-available def callable inside an enclosing def."""
namedecls = node.get_argument_expressions()
decorator = node.decorator
if decorator:
self.printer.writeline(
"@runtime._decorate_inline(context, %s)" % decorator)
self.printer.writeline(
"def %s(%s):" % (node.funcname, ",".join(namedecls)))
filtered = len(node.filter_args.args) > 0
buffered = eval(node.attributes.get('buffered', 'False'))
cached = eval(node.attributes.get('cached', 'False'))
self.printer.writelines(
# push new frame, assign current frame to __M_caller
"__M_caller = context.caller_stack._push_frame()",
"try:"
)
if buffered or filtered or cached:
self.printer.writelines(
"context._push_buffer()",
)
identifiers = identifiers.branch(node, nested=nested)
self.write_variable_declares(identifiers)
self.identifier_stack.append(identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, filtered, cached)
self.printer.writeline(None)
if cached:
self.write_cache_decorator(node, node.funcname,
namedecls, False, identifiers,
inline=True, toplevel=False)
def write_def_finish(self, node, buffered, filtered, cached,
callstack=True):
"""write the end section of a rendering function, either outermost or
inline.
this takes into account if the rendering function was filtered,
buffered, etc. and closes the corresponding try: block if any, and
writes code to retrieve captured content, apply filters, send proper
return value."""
if not buffered and not cached and not filtered:
self.printer.writeline("return ''")
if callstack:
self.printer.writelines(
"finally:",
"context.caller_stack._pop_frame()",
None
)
if buffered or filtered or cached:
if buffered or cached:
# in a caching scenario, don't try to get a writer
# from the context after popping; assume the caching
# implemenation might be using a context with no
# extra buffers
self.printer.writelines(
"finally:",
"__M_buf = context._pop_buffer()"
)
else:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()"
)
if callstack:
self.printer.writeline("context.caller_stack._pop_frame()")
s = "__M_buf.getvalue()"
if filtered:
s = self.create_filter_callable(node.filter_args.args, s,
False)
self.printer.writeline(None)
if buffered and not cached:
s = self.create_filter_callable(self.compiler.buffer_filters,
s, False)
if buffered or cached:
self.printer.writeline("return %s" % s)
else:
self.printer.writelines(
"__M_writer(%s)" % s,
"return ''"
)
def write_cache_decorator(self, node_or_pagetag, name,
args, buffered, identifiers,
inline=False, toplevel=False):
"""write a post-function decorator to replace a rendering
callable with a cached version of itself."""
self.printer.writeline("__M_%s = %s" % (name, name))
cachekey = node_or_pagetag.parsed_attributes.get('cache_key',
repr(name))
cache_args = {}
if self.compiler.pagetag is not None:
cache_args.update(
(
pa[6:],
self.compiler.pagetag.parsed_attributes[pa]
)
for pa in self.compiler.pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
cache_args.update(
(
pa[6:],
node_or_pagetag.parsed_attributes[pa]
) for pa in node_or_pagetag.parsed_attributes
if pa.startswith('cache_') and pa != 'cache_key'
)
if 'timeout' in cache_args:
cache_args['timeout'] = int(eval(cache_args['timeout']))
self.printer.writeline("def %s(%s):" % (name, ','.join(args)))
# form "arg1, arg2, arg3=arg3, arg4=arg4", etc.
pass_args = [
'=' in a and "%s=%s" % ((a.split('=')[0],)*2) or a
for a in args
]
self.write_variable_declares(
identifiers,
toplevel=toplevel,
limit=node_or_pagetag.undeclared_identifiers()
)
if buffered:
s = "context.get('local')."\
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r)" % \
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k, v)
for k, v in cache_args.items()]),
name
)
# apply buffer_filters
s = self.create_filter_callable(self.compiler.buffer_filters, s,
False)
self.printer.writelines("return " + s, None)
else:
self.printer.writelines(
"__M_writer(context.get('local')."
"cache._ctx_get_or_create("\
"%s, lambda:__M_%s(%s), context, %s__M_defname=%r))" %
(cachekey, name, ','.join(pass_args),
''.join(["%s=%s, " % (k, v)
for k, v in cache_args.items()]),
name,
),
"return ''",
None
)
def create_filter_callable(self, args, target, is_expression):
"""write a filter-applying expression based on the filters
present in the given filter names, adjusting for the global
'default' filter aliases as needed."""
def locate_encode(name):
if re.match(r'decode\..+', name):
return "filters." + name
elif self.compiler.disable_unicode:
return filters.NON_UNICODE_ESCAPES.get(name, name)
else:
return filters.DEFAULT_ESCAPES.get(name, name)
if 'n' not in args:
if is_expression:
if self.compiler.pagetag:
args = self.compiler.pagetag.filter_args.args + args
if self.compiler.default_filters:
args = self.compiler.default_filters + args
for e in args:
# if filter given as a function, get just the identifier portion
if e == 'n':
continue
m = re.match(r'(.+?)(\(.*\))', e)
if m:
(ident, fargs) = m.group(1,2)
f = locate_encode(ident)
e = f + fargs
else:
x = e
e = locate_encode(e)
assert e is not None
target = "%s(%s)" % (e, target)
return target
def visitExpression(self, node):
self.write_source_comment(node)
if len(node.escapes) or \
(
self.compiler.pagetag is not None and
len(self.compiler.pagetag.filter_args.args)
) or \
len(self.compiler.default_filters):
s = self.create_filter_callable(node.escapes_code.args,
"%s" % node.text, True)
self.printer.writeline("__M_writer(%s)" % s)
else:
self.printer.writeline("__M_writer(%s)" % node.text)
def visitControlLine(self, node):
if node.isend:
self.printer.writeline(None)
if node.has_loop_context:
self.printer.writeline('finally:')
self.printer.writeline("loop = __M_loop._exit()")
self.printer.writeline(None)
else:
self.write_source_comment(node)
if self.compiler.enable_loop and node.keyword == 'for':
text = mangle_mako_loop(node, self.printer)
else:
text = node.text
self.printer.writeline(text)
children = node.get_children()
# this covers the three situations where we want to insert a pass:
# 1) a ternary control line with no children,
# 2) a primary control line with nothing but its own ternary
# and end control lines, and
# 3) any control line with no content other than comments
if not children or (
compat.all(isinstance(c, (parsetree.Comment,
parsetree.ControlLine))
for c in children) and
compat.all((node.is_ternary(c.keyword) or c.isend)
for c in children
if isinstance(c, parsetree.ControlLine))):
self.printer.writeline("pass")
def visitText(self, node):
self.write_source_comment(node)
self.printer.writeline("__M_writer(%s)" % repr(node.content))
def visitTextTag(self, node):
filtered = len(node.filter_args.args) > 0
if filtered:
self.printer.writelines(
"__M_writer = context._push_writer()",
"try:",
)
for n in node.nodes:
n.accept_visitor(self)
if filtered:
self.printer.writelines(
"finally:",
"__M_buf, __M_writer = context._pop_buffer_and_writer()",
"__M_writer(%s)" %
self.create_filter_callable(
node.filter_args.args,
"__M_buf.getvalue()",
False),
None
)
def visitCode(self, node):
if not node.ismodule:
self.write_source_comment(node)
self.printer.write_indented_block(node.text)
if not self.in_def and len(self.identifiers.locally_assigned) > 0:
# if we are the "template" def, fudge locally
# declared/modified variables into the "__M_locals" dictionary,
# which is used for def calls within the same template,
# to simulate "enclosing scope"
self.printer.writeline(
'__M_locals_builtin_stored = __M_locals_builtin()')
self.printer.writeline(
'__M_locals.update(__M_dict_builtin([(__M_key,'
' __M_locals_builtin_stored[__M_key]) for __M_key in'
' [%s] if __M_key in __M_locals_builtin_stored]))' %
','.join([repr(x) for x in node.declared_identifiers()]))
def visitIncludeTag(self, node):
self.write_source_comment(node)
args = node.attributes.get('args')
if args:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri, %s)" %
(node.parsed_attributes['file'], args))
else:
self.printer.writeline(
"runtime._include_file(context, %s, _template_uri)" %
(node.parsed_attributes['file']))
def visitNamespaceTag(self, node):
pass
def visitDefTag(self, node):
pass
def visitBlockTag(self, node):
if node.is_anonymous:
self.printer.writeline("%s()" % node.funcname)
else:
nameargs = node.get_argument_expressions(include_defaults=False)
nameargs += ['**pageargs']
self.printer.writeline("if 'parent' not in context._data or "
"not hasattr(context._data['parent'], '%s'):"
% node.funcname)
self.printer.writeline(
"context['self'].%s(%s)" % (node.funcname, ",".join(nameargs)))
self.printer.writeline("\n")
def visitCallNamespaceTag(self, node):
# TODO: we can put namespace-specific checks here, such
# as ensure the given namespace will be imported,
# pre-import the namespace, etc.
self.visitCallTag(node)
def visitCallTag(self, node):
self.printer.writeline("def ccall(caller):")
export = ['body']
callable_identifiers = self.identifiers.branch(node, nested=True)
body_identifiers = callable_identifiers.branch(node, nested=False)
# we want the 'caller' passed to ccall to be used
# for the body() function, but for other non-body()
# <%def>s within <%call> we want the current caller
# off the call stack (if any)
body_identifiers.add_declared('caller')
self.identifier_stack.append(body_identifiers)
class DefVisitor(object):
def visitDefTag(s, node):
s.visitDefOrBase(node)
def visitBlockTag(s, node):
s.visitDefOrBase(node)
def visitDefOrBase(s, node):
self.write_inline_def(node, callable_identifiers, nested=False)
if not node.is_anonymous:
export.append(node.funcname)
# remove defs that are within the <%call> from the
# "closuredefs" defined in the body, so they dont render twice
if node.funcname in body_identifiers.closuredefs:
del body_identifiers.closuredefs[node.funcname]
vis = DefVisitor()
for n in node.nodes:
n.accept_visitor(vis)
self.identifier_stack.pop()
bodyargs = node.body_decl.get_argument_expressions()
self.printer.writeline("def body(%s):" % ','.join(bodyargs))
# TODO: figure out best way to specify
# buffering/nonbuffering (at call time would be better)
buffered = False
if buffered:
self.printer.writelines(
"context._push_buffer()",
"try:"
)
self.write_variable_declares(body_identifiers)
self.identifier_stack.append(body_identifiers)
for n in node.nodes:
n.accept_visitor(self)
self.identifier_stack.pop()
self.write_def_finish(node, buffered, False, False, callstack=False)
self.printer.writelines(
None,
"return [%s]" % (','.join(export)),
None
)
self.printer.writelines(
# push on caller for nested call
"context.caller_stack.nextcaller = "
"runtime.Namespace('caller', context, "
"callables=ccall(__M_caller))",
"try:")
self.write_source_comment(node)
self.printer.writelines(
"__M_writer(%s)" % self.create_filter_callable(
[], node.expression, True),
"finally:",
"context.caller_stack.nextcaller = None",
None
)
class _Identifiers(object):
"""tracks the status of identifier names as template code is rendered."""
def __init__(self, compiler, node=None, parent=None, nested=False):
if parent is not None:
# if we are the branch created in write_namespaces(),
# we don't share any context from the main body().
if isinstance(node, parsetree.NamespaceTag):
self.declared = set()
self.topleveldefs = util.SetLikeDict()
else:
# things that have already been declared
# in an enclosing namespace (i.e. names we can just use)
self.declared = set(parent.declared).\
union([c.name for c in parent.closuredefs.values()]).\
union(parent.locally_declared).\
union(parent.argument_declared)
# if these identifiers correspond to a "nested"
# scope, it means whatever the parent identifiers
# had as undeclared will have been declared by that parent,
# and therefore we have them in our scope.
if nested:
self.declared = self.declared.union(parent.undeclared)
# top level defs that are available
self.topleveldefs = util.SetLikeDict(**parent.topleveldefs)
else:
self.declared = set()
self.topleveldefs = util.SetLikeDict()
self.compiler = compiler
# things within this level that are referenced before they
# are declared (e.g. assigned to)
self.undeclared = set()
# things that are declared locally. some of these things
# could be in the "undeclared" list as well if they are
# referenced before declared
self.locally_declared = set()
# assignments made in explicit python blocks.
# these will be propagated to
# the context of local def calls.
self.locally_assigned = set()
# things that are declared in the argument
# signature of the def callable
self.argument_declared = set()
# closure defs that are defined in this level
self.closuredefs = util.SetLikeDict()
self.node = node
if node is not None:
node.accept_visitor(self)
illegal_names = self.compiler.reserved_names.intersection(
self.locally_declared)
if illegal_names:
raise exceptions.NameConflictError(
"Reserved words declared in template: %s" %
", ".join(illegal_names))
def branch(self, node, **kwargs):
"""create a new Identifiers for a new Node, with
this Identifiers as the parent."""
return _Identifiers(self.compiler, node, self, **kwargs)
@property
def defs(self):
return set(self.topleveldefs.union(self.closuredefs).values())
def __repr__(self):
return "Identifiers(declared=%r, locally_declared=%r, "\
"undeclared=%r, topleveldefs=%r, closuredefs=%r, "\
"argumentdeclared=%r)" %\
(
list(self.declared),
list(self.locally_declared),
list(self.undeclared),
[c.name for c in self.topleveldefs.values()],
[c.name for c in self.closuredefs.values()],
self.argument_declared)
def check_declared(self, node):
"""update the state of this Identifiers with the undeclared
and declared identifiers of the given node."""
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.locally_declared.add(ident)
def add_declared(self, ident):
self.declared.add(ident)
if ident in self.undeclared:
self.undeclared.remove(ident)
def visitExpression(self, node):
self.check_declared(node)
def visitControlLine(self, node):
self.check_declared(node)
def visitCode(self, node):
if not node.ismodule:
self.check_declared(node)
self.locally_assigned = self.locally_assigned.union(
node.declared_identifiers())
def visitNamespaceTag(self, node):
# only traverse into the sub-elements of a
# <%namespace> tag if we are the branch created in
# write_namespaces()
if self.node is node:
for n in node.nodes:
n.accept_visitor(self)
def _check_name_exists(self, collection, node):
existing = collection.get(node.funcname)
collection[node.funcname] = node
if existing is not None and \
existing is not node and \
(node.is_block or existing.is_block):
raise exceptions.CompileException(
"%%def or %%block named '%s' already "
"exists in this template." %
node.funcname, **node.exception_kwargs)
def visitDefTag(self, node):
if node.is_root() and not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
# visit defs only one level deep
if node is self.node:
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitBlockTag(self, node):
if node is not self.node and \
not node.is_anonymous:
if isinstance(self.node, parsetree.DefTag):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of def '%s'"
% (node.name, self.node.name), **node.exception_kwargs)
elif isinstance(self.node,
(parsetree.CallTag, parsetree.CallNamespaceTag)):
raise exceptions.CompileException(
"Named block '%s' not allowed inside of <%%call> tag"
% (node.name, ), **node.exception_kwargs)
for ident in node.undeclared_identifiers():
if ident != 'context' and \
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
if not node.is_anonymous:
self._check_name_exists(self.topleveldefs, node)
self.undeclared.add(node.funcname)
elif node is not self.node:
self._check_name_exists(self.closuredefs, node)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
def visitTextTag(self, node):
for ident in node.undeclared_identifiers():
if ident != 'context' and \
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
def visitIncludeTag(self, node):
self.check_declared(node)
def visitPageTag(self, node):
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
self.check_declared(node)
def visitCallNamespaceTag(self, node):
self.visitCallTag(node)
def visitCallTag(self, node):
if node is self.node:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
for ident in node.declared_identifiers():
self.argument_declared.add(ident)
for n in node.nodes:
n.accept_visitor(self)
else:
for ident in node.undeclared_identifiers():
if ident != 'context' and\
ident not in self.declared.union(self.locally_declared):
self.undeclared.add(ident)
_FOR_LOOP = re.compile(
r'^for\s+((?:\(?)\s*[A-Za-z_][A-Za-z_0-9]*'
r'(?:\s*,\s*(?:[A-Za-z_][A-Za-z0-9_]*),??)*\s*(?:\)?))\s+in\s+(.*):'
)
def mangle_mako_loop(node, printer):
"""converts a for loop into a context manager wrapped around a for loop
when access to the `loop` variable has been detected in the for loop body
"""
loop_variable = LoopVariable()
node.accept_visitor(loop_variable)
if loop_variable.detected:
node.nodes[-1].has_loop_context = True
match = _FOR_LOOP.match(node.text)
if match:
printer.writelines(
'loop = __M_loop._enter(%s)' % match.group(2),
'try:'
#'with __M_loop(%s) as loop:' % match.group(2)
)
text = 'for %s in loop:' % match.group(1)
else:
raise SyntaxError("Couldn't apply loop context: %s" % node.text)
else:
text = node.text
return text
class LoopVariable(object):
"""A node visitor which looks for the name 'loop' within undeclared
identifiers."""
def __init__(self):
self.detected = False
def _loop_reference_detected(self, node):
if 'loop' in node.undeclared_identifiers():
self.detected = True
else:
for n in node.get_children():
n.accept_visitor(self)
def visitControlLine(self, node):
self._loop_reference_detected(node)
def visitCode(self, node):
self._loop_reference_detected(node)
def visitExpression(self, node):
self._loop_reference_detected(node)
| akuchling/book-diary-tools | books.d/mako/codegen.py | Python | bsd-3-clause | 49,252 | [
"VisIt"
] | 8993bb8dff66669a69e8f643599e7b3cb22b113bd1de0052b28a33b565f185fb |
# sql/compiler.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import re
from . import schema, sqltypes, operators, functions, visitors, \
elements, selectable, crud
from .. import util, exc
import itertools
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in range(0, 10)]).union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat': "%%(%(name)s)s",
'qmark': "?",
'format': "%%s",
'numeric': ":[_POSITION]",
'named': ":%(name)s"
}
OPERATORS = {
# binary
operators.and_: ' AND ',
operators.or_: ' OR ',
operators.add: ' + ',
operators.mul: ' * ',
operators.sub: ' - ',
operators.div: ' / ',
operators.mod: ' % ',
operators.truediv: ' / ',
operators.neg: '-',
operators.lt: ' < ',
operators.le: ' <= ',
operators.ne: ' != ',
operators.gt: ' > ',
operators.ge: ' >= ',
operators.eq: ' = ',
operators.concat_op: ' || ',
operators.match_op: ' MATCH ',
operators.notmatch_op: ' NOT MATCH ',
operators.in_op: ' IN ',
operators.notin_op: ' NOT IN ',
operators.comma_op: ', ',
operators.from_: ' FROM ',
operators.as_: ' AS ',
operators.is_: ' IS ',
operators.isnot: ' IS NOT ',
operators.collate: ' COLLATE ',
# unary
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
# modifiers
operators.desc_op: ' DESC',
operators.asc_op: ' ASC',
operators.nullsfirst_op: ' NULLS FIRST',
operators.nullslast_op: ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce: 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user: 'SESSION_USER',
functions.user: 'USER'
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: 'UNION',
selectable.CompoundSelect.UNION_ALL: 'UNION ALL',
selectable.CompoundSelect.EXCEPT: 'EXCEPT',
selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL',
selectable.CompoundSelect.INTERSECT: 'INTERSECT',
selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL'
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
def __init__(self, dialect, statement, bind=None,
compile_kwargs=util.immutabledict()):
"""Construct a new ``Compiled`` object.
:param dialect: ``Dialect`` to compile against.
:param statement: ``ClauseElement`` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
.. versionadded:: 0.8
"""
self.dialect = dialect
self.bind = bind
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_compiled(self, multiparams, params)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = 'visit_\w+'
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
class SQLCompiler(Compiled):
"""Default implementation of Compiled.
Compiles ClauseElements into SQL strings. Uses a similar visit
paradigm as visitors.ClauseVisitor but implements its own traversal.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new ``DefaultCompiler`` object.
dialect
Dialect to be used
statement
ClauseElement to be compiled
column_keys
a list of column names to be compiled into an INSERT or UPDATE
statement.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# if False, means we can't be sure the list of entries
# in _result_columns is actually the rendered order. This
# gets flipped when we use TextAsFrom, for example.
self._ordered_columns = True
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
# an IdentifierPreparer that formats the quoting of identifiers
self.preparer = dialect.identifier_preparer
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if self.positional and dialect.paramstyle == 'numeric':
self._apply_numbered_params()
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns, self._ordered_columns)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry['need_result_map_for_nested'] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop('need_result_map_for_nested')
self._result_columns, self._ordered_columns = (
result_columns, ordered_columns)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m: str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
((self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect))
for bindparam in self.bind_names)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number))
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key)
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]['selectable']
with_cols, only_froms = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = with_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if order_by_elem is not None and order_by_elem.name in \
resolve_dict:
kwargs['render_label_as_label'] = \
element.element._order_by_label_element
return self.process(
element.element, within_columns_clause=within_columns_clause,
**kwargs)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(
element._text_clause
)
selectable = self.stack[-1]['selectable']
with_cols, only_froms = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError:
# treat it like text()
util.warn_limited(
"Can't resolve label reference %r; converting to text()",
util.ellipses_string(element.element))
return self.process(
element._text_clause
)
else:
kwargs['render_label_as_label'] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs)
def visit_label(self, label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (within_columns_clause and not
within_label_clause)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname, ) + label._alt_names,
label.type
)
return label.element._compiler_dispatch(
self, within_columns_clause=True,
within_label_clause=True, **kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw)
def visit_column(self, column, add_to_result_map=None,
include_table=True, **kwargs):
name = orig_name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
"its 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name,
orig_name,
(column, name, column.key),
column.type
)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
if table.schema:
schema_prefix = self.preparer.quote_schema(table.schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename) + \
"." + name
def escape_literal_column(self, text):
"""provide escaping for the literal_column() construct."""
# TODO: some dialects might need different behavior here
return text.replace('%', '%%')
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw['type_expression'] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam,
self.post_process_text(textclause.text))
)
def visit_text_as_from(self, taf,
compound_index=None,
asfrom=False,
parens=True, **kw):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
if populate_result_map:
self._ordered_columns = False
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'true'
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'false'
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(
c._compiler_dispatch(self, **kw)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
over.func._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
)
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field, extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(
func.name, func.name, (), func.type
)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." %
self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or \
(compound_index == 0
and entry.get('need_result_map_for_compound', False))
self.stack.append(
{
'correlate_froms': entry['correlate_froms'],
'asfrom_froms': entry['asfrom_froms'],
'selectable': cs,
'need_result_map_for_compound': need_result_map
})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit_clause is not None
or cs._offset_clause is not None) and \
self.limit_clause(cs, **kwargs) or ""
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
disp = getattr(self, "visit_%s_unary_operator" %
unary.operator.__name__, None)
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw)
elif unary.modifier:
disp = getattr(self, "visit_%s_unary_modifier" %
unary.modifier.__name__, None)
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier")
def visit_istrue_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op)
def visit_binary(self, binary, override_operator=None, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, elements.BindParameter) and \
isinstance(binary.right, elements.BindParameter):
kw['literal_binds'] = True
operator_ = override_operator or binary.operator
disp = getattr(self, "visit_%s_binary" % operator_.__name__, None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator_)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_custom_op_binary(self, element, operator, **kw):
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw)
def _generate_generic_binary(self, binary, opstring, **kw):
return binary.left._compiler_dispatch(self, **kw) + \
opstring + \
binary.right._compiler_dispatch(self, **kw)
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC "
if symmetric else " BETWEEN ", **kw)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " NOT BETWEEN SYMMETRIC "
if symmetric else " NOT BETWEEN ", **kw)
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs):
if not skip_bind_expression and bindparam.type._has_bind_expression:
bind_expression = bindparam.type.bind_expression(bindparam)
return self.process(bind_expression,
skip_bind_expression=True)
if literal_binds or \
(within_columns_clause and
self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError("Bind parameter '%s' without a "
"renderable value not allowed here."
% bindparam.key)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(
bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')." %
(bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(name, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(self, name, positional_names=None, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
return self.bindtemplate % {'name': name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return self.preparer.format_alias(cte, cte_name)
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
if cte._cte_alias is not None:
orig_cte = cte._cte_alias
if orig_cte not in self.ctes:
self.visit_cte(orig_cte, **kwargs)
cte_alias_name = cte._cte_alias.name
if isinstance(cte_alias_name, elements._truncated_label):
cte_alias_name = self._truncated_identifier(
"alias", cte_alias_name)
else:
orig_cte = cte
cte_alias_name = None
if not cte_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
if self.positional:
kwargs['positional_names'] = self.cte_positional[cte] = []
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs)
self.ctes[cte] = text
if asfrom:
if cte_alias_name:
text = self.preparer.format_alias(cte, cte_alias_name)
text += self.get_render_as_alias_suffix(cte_name)
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
iscrud=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name))
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(ret, alias,
fromhints[alias], iscrud)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
if not self.dialect.case_sensitive:
keyname = keyname.lower()
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
objects + (column,), type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column, elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
elif (
not isinstance(column, elements.TextClause) and
(
not isinstance(column, elements.UnaryExpression) or
column.wraps_column_expression
) and
(
not hasattr(column, 'name') or
isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite the main one).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if newelem.is_selectable and newelem._is_join and \
isinstance(newelem.right, selectable.FromGrouping):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element],
use_labels=True).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw['transform_clue'] = 'select_container'
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = kw.get('transform_clue', None) == \
'select_container'
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw['transform_clue'] = 'inside_select'
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select):
inner_col = dict((c._key_label, c) for
c in transformed_select.inner_columns)
d = dict(
(inner_col[c._key_label], c)
for c in select.inner_columns
)
self._result_columns = [
(key, name, tuple([d.get(col, col) for col in objs]), typ)
for key, name, objs, typ in self._result_columns
]
_default_stack_entry = util.immutabledict([
('correlate_froms', frozenset()),
('asfrom_froms', frozenset())
])
def _display_froms_for_select(self, select, asfrom):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
return froms
def visit_select(self, select, asfrom=False, parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
**kwargs):
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and 'add_to_result_map' in kwargs:
del kwargs['add_to_result_map']
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return text
froms = self._setup_select_stack(select, entry, asfrom)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(
select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
wrapped_inner_columns = set(select_wraps_for.inner_columns)
translate = dict(
(outer, inner.pop()) for outer, inner in [
(
outer,
outer.proxy_set.intersection(wrapped_inner_columns))
for outer in select.inner_columns
] if inner
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs)
if select._statement_hints:
per_dialect = [
ht for (dialect_name, ht)
in select._statement_hints
if dialect_name in ('*', self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs)
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _setup_select_hints(self, select):
byfrom = dict([
(from_, hinttext % {
'name': from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.items()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom):
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
'asfrom_froms': new_correlate_froms,
'correlate_froms': all_correlate_froms,
'selectable': select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs):
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True,
fromhints=byfrom, **kwargs)
for f in froms])
else:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (select._limit_clause is not None or
select._offset_clause is not None):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or
dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = sum([
self.cte_positional[cte]
for cte in self.ctes], []) + \
self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler.")
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(ret, table,
fromhints[table], iscrud)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
(join.isouter and " LEFT OUTER JOIN " or " JOIN ") +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def visit_insert(self, insert_stmt, **kw):
self.stack.append(
{'correlate_froms': set(),
"asfrom_froms": set(),
"selectable": insert_stmt})
self.isinsert = True
crud_params = crud._get_crud_params(self, insert_stmt, **kw)
if not crud_params and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support empty "
"inserts." %
self.dialect.name)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." %
self.dialect.name)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(insert_stmt,
insert_stmt._prefixes, **kw)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
insert_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if insert_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
insert_stmt.table,
dialect_hints[insert_stmt.table],
True
)
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in crud_params_single])
if self.returning or insert_stmt._returning:
self.returning = self.returning or insert_stmt._returning
returning_clause = self.returning_clause(
insert_stmt, self.returning)
if self.returning_precedes_values:
text += " " + returning_clause
if insert_stmt.select is not None:
text += " %s" % self.process(self._insert_from_select, **kw)
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (
', '.join(c[1] for c in crud_param_set)
)
for crud_param_set in crud_params
)
)
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in crud_params])
if self.returning and not self.returning_precedes_values:
text += " " + returning_clause
self.stack.pop(-1)
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
return from_table._compiler_dispatch(self, asfrom=True,
iscrud=True, **kw)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def visit_update(self, update_stmt, **kw):
self.stack.append(
{'correlate_froms': set([update_stmt.table]),
"asfrom_froms": set([update_stmt.table]),
"selectable": update_stmt})
self.isupdate = True
extra_froms = update_stmt._extra_froms
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
crud_params = crud._get_crud_params(self, update_stmt, **kw)
if update_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
update_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if update_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
update_stmt.table,
dialect_hints[update_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=include_table) +
'=' + c[1] for c in crud_params
)
if self.returning or update_stmt._returning:
if not self.returning:
self.returning = update_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning)
self.stack.pop(-1)
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self)
def visit_delete(self, delete_stmt, **kw):
self.stack.append({'correlate_froms': set([delete_stmt.table]),
"asfrom_froms": set([delete_stmt.table]),
"selectable": delete_stmt})
self.isdelete = True
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(delete_stmt,
delete_stmt._prefixes, **kw)
text += "FROM "
table_text = delete_stmt.table._compiler_dispatch(
self, asfrom=True, iscrud=True)
if delete_stmt._hints:
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
delete_stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if delete_stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
delete_stmt.table,
dialect_hints[delete_stmt.table],
True
)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
self.returning = delete_stmt._returning
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self)
if t:
text += " WHERE " + t
if self.returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
self.stack.pop(-1)
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
@property
def preparer(self):
return self.dialect.identifier_preparer
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.dialect.identifier_preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.dialect.identifier_preparer
text = "\n" + " ".join(['CREATE'] +
table._prefixes +
['TABLE',
preparer.format_table(table),
"("])
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column,
first_pk=column.primary_key
and not first_pk)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s") %
(table.description, column.name, ce.args[0])
))
const = self.create_table_constraints(
table, _include_foreign_key_constraints=
create.include_foreign_key_constraints)
if const:
text += ", \n\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(
column,
first_pk=first_pk
)
const = " ".join(self.process(constraint)
for constraint in column.constraints)
if const:
text += " " + const
return text
def create_table_constraints(
self, table,
_include_foreign_key_constraints=None):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key and
c not in omit_fkcs])
return ", \n\t".join(
p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError("Index '%s' is not associated "
"with any table." % index.name)
def visit_create_index(self, create, include_schema=False,
include_table_schema=True):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table,
use_schema=include_table_schema),
', '.join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True) for
expr in index.expressions)
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if include_schema and index.table is not None and index.table.schema:
schema = index.table.schema
schema_name = self.preparer.quote_schema(schema)
else:
schema_name = None
ident = index.name
if isinstance(ident, elements._truncated_label):
max_ = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max_:
ident = ident[0:max_ - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
index_name = self.preparer.quote(ident)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return "'%s'" % column.server_default.arg
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % constraint.sqltext
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in constraint)
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.dialect.identifier_preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name)
for f in constraint.elements),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name)
for f in constraint.elements)
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % \
{'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return 'TIMESTAMP'
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError("Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
return value.replace(self.escape_quote, self.escape_to_quote)
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
the 'force' flag should be considered deprecated.
"""
return self.quote(schema, force)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
the 'force' flag should be considered deprecated.
"""
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
if (not self.omit_schema and use_schema and
sequence.schema is not None):
name = self.quote_schema(sequence.schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
return self.quote(name or savepoint.ident)
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table)
if name:
return self.quote(name)
elif isinstance(constraint.name, elements._defer_none_name):
return None
return self.quote(constraint.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
if not self.omit_schema and use_schema \
and getattr(table, "schema", None):
result = self.quote_schema(table.schema) + "." + result
return result
def format_schema(self, name, quote=None):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(
column.table, use_schema=False,
name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
if not self.omit_schema and use_schema and \
getattr(table, 'schema', None):
return (self.quote_schema(table.schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{'initial': initial,
'final': final,
'escaped': escaped_final})
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
| ppmt/Crust | flask/lib/python2.7/site-packages/sqlalchemy/sql/compiler.py | Python | gpl-2.0 | 99,638 | [
"VisIt"
] | 27486f76c83eac46e518b3d99ebd9a57e8441a6855f34e06715583a988e86be3 |
import os
import os.path
import random
import re
import stat
import subprocess
import sys
import time
from html.parser import HTMLParser
def out(message, *args, **kwargs):
print(message.format(*args, **kwargs))
def log(message, *args, **kwargs):
print(message.format(*args, **kwargs), file=sys.stderr)
def err(*args, **kwargs):
log(*args, **kwargs)
sys.exit()
def int2str(n):
return str(n) if n < 1000 else '{},{:03}'.format(*divmod(n, 1000))
class TableParser(HTMLParser):
def handle_starttag(self, tag, attributes):
if self.tableDepth == 0:
if tag == self.startTag and (
self.startTagAttributes is None or
self.startTagAttributes == dict(attributes)):
self.startTag = None
if tag == "table" and self.startTag is None:
self.tableDepth = 1
self.tableRows = []
self.currentRow = None
self.currentCol = None
else:
if tag == "td":
if self.tableDepth == 1:
if self.currentCol is not None:
self.currentRow.append(self.currentCol)
self.currentCol = ""
return
elif tag == "tr":
if self.tableDepth == 1:
if self.currentCol is not None:
self.currentRow.append(self.currentCol)
self.currentCol = None
if self.currentRow:
self.tableRows.append(self.currentRow)
self.currentRow = []
return
elif tag == "table":
self.tableDepth += 1
if self.currentCol is not None:
if tag == "a":
attributes = [(k, v) for k, v in attributes
if k == "href" and "/glossary" not in v]
else:
attributes = []
self.currentCol += "<{}{}>".format(tag,
"".join([" {}=\"{}\"".format(k, v) for k, v in attributes]))
def handle_endtag(self, tag):
if self.tableDepth == 0:
return
if tag == "td":
if self.tableDepth == 1:
if self.currentCol is not None:
self.currentRow.append(self.currentCol)
self.currentCol = None
return
elif tag == "tr":
if self.tableDepth == 1:
if self.currentCol is not None:
self.currentRow.append(self.currentCol)
self.currentCol = None
if self.currentRow:
self.tableRows.append(self.currentRow)
self.currentRow = None
return
elif tag == "table":
self.tableDepth -= 1
if self.tableDepth == 0:
if self.currentCol is not None:
self.currentRow.append(self.currentCol)
self.currentCol = None
if self.currentRow:
self.tableRows.append(self.currentRow)
self.currentRow = None
self.tables.append(self.tableRows)
self.numTables -= 1
return
if self.currentCol is not None:
self.currentCol += "</{}>\n".format(tag)
def handle_startendtag(self, tag, attributes):
if self.tableDepth > 0:
if self.currentCol is not None:
attributes = []
self.currentCol += "<{}{}/>".format(tag,
"".join([" {}=\"{}\"".format(k, v) for k, v in attributes]))
def handle_data(self, data):
if self.tableDepth > 0:
if self.currentCol is not None:
self.currentCol += data
def handle_entityref(self, name):
if name == 'ntilde':
self.handle_data('&' + name + ';')
def __init__(self, fileName, numTables=1, startTag="table", startTagAttributes=None):
self.convert_charrefs = False
self.reset()
self.numTables = numTables
self.startTag = startTag
self.startTagAttributes = startTagAttributes
self.tableDepth = 0
self.tables = []
fileObj = open(fileName, encoding="ascii", errors="backslashreplace")
while self.numTables > 0:
bytes = fileObj.read(1000)
if bytes == "":
break
self.feed(bytes)
fileObj.close()
class TableReader(object):
def err(self, message, *args, **kwargs):
self.fileObj.close()
if self.rowNum == 0:
prevTag = "<{}>".format(self.TABLE)
elif self.colNum == 0:
prevTag = self.endTag_TR
elif self.colNum == 1:
prevTag = self.tag_TR
else:
prevTag = self.colEndTag
prefix = self.fileObj.name
i = prefix.rfind("/")
if i >= 0:
prefix = prefix[i + 1:]
if self.rowNum > 0:
prefix += ", row " + str(self.rowNum)
if self.colNum > 0:
prefix += ", column " + str(self.colNum)
prefix += ": "
err(prefix + message, prevTag=prevTag, *args, **kwargs)
def __init__(self, fileName, upper=False, tableAttributes=None):
self.bytes = ""
self.rowNum = 0
self.colNum = 0
self.fileObj = open(fileName, encoding="ascii", errors="backslashreplace")
if upper:
self.TABLE, self.TR, self.TD, self.TH = ("TABLE", "TR", "TD", "TH")
else:
self.TABLE, self.TR, self.TD, self.TH = ("table", "tr", "td", "th")
self.tag_TR = "<" + self.TR + ">"
self.endTag_TR = "</" + self.TR + ">"
self.endTag_TD = "</" + self.TD + ">"
self.endTag_TH = "</" + self.TH + ">"
while True:
self.readUntil("<" + self.TABLE, discard=True)
b = self.readUntil(">", errMsg="Can't find '>' for {prevTag}").rstrip()
if tableAttributes is None:
break
if len(b) == 0:
if tableAttributes == "":
break
elif b[0] in " \n\r\t":
if tableAttributes == b.lstrip():
break
def __iter__(self):
return self
def readUntil(self, untilStr, errMsg="Can't find '{untilStr}'", bufSize=1000, discard=False):
bytes = self.bytes
bytesLen = len(bytes)
untilLen = len(untilStr)
start = 0
while True:
i = bytes.find(untilStr, start)
if i >= 0:
self.bytes = bytes[i + untilLen:]
return None if discard else bytes[:i]
newbytes = self.fileObj.read(bufSize)
if newbytes == "":
self.err(errMsg, untilStr=untilStr)
start = bytesLen - untilLen + 1
if start < 0:
start = 0
if discard:
bytes = bytes[start:] + newbytes
bytesLen += bufSize - start
start = 0
else:
bytes += newbytes
bytesLen += bufSize
def __next__(self):
if self.fileObj is None:
raise StopIteration()
b = self.readUntil("<", errMsg="Can't find '<' for next tag after {prevTag}")
if b.strip() != "":
self.err("Expected only whitespace between {prevTag} and next tag")
b = self.readUntil(">", errMsg="Can't find '>' for next tag after {prevTag}")
if len(b) >= 2 and b[:2] == self.TR and (len(b) == 2 or b[2] == " "):
self.rowNum += 1
elif b == "/" + self.TABLE:
self.fileObj.close()
self.fileObj = None
raise StopIteration()
else:
self.err("Expected either {} or </{}> after {prevTag}", self.tag_TR, self.TABLE)
row = self.readUntil(self.endTag_TR).strip()
while row.startswith(self.tag_TR):
row = row[len(self.tag_TR):].lstrip()
columns = []
while row != "":
self.colNum += 1
i = row.find("<")
if i < 0:
self.err("Can't find '<' for next tag after {prevTag}")
if row[:i].strip() != "":
self.err("Expected only whitespace between {prevTag} and next tag")
row = row[i + 1:]
i = row.find(">")
if i < 0:
self.err("Can't find '>' for next tag after {prevTag}")
tag = row[:i]
row = row[i + 1:]
i = tag.find(" ")
if i >= 0:
tag = tag[:i]
if tag == self.TD:
endTag = self.endTag_TD
elif tag == self.TH:
endTag = self.endTag_TH
else:
self.err("Expected either <{}> or <{}> after {prevTag}", self.TD, self.TH)
i = row.find(endTag)
if i < 0:
self.err("Can't find {}", endTag)
self.colEndTag = endTag
col = row[:i]
row = row[i + 5:]
while col.startswith(" "):
col = col[6:]
while col.endswith(" "):
col = col[:-6]
columns.append(col.replace("'", ""))
self.colNum = 0
return columns
class RE(object):
numLT1k = re.compile('^[1-9][0-9]{0,2}$')
numGE1k = re.compile('^[1-9][0-9]?,[0-9]{3}$')
numLT10k = re.compile('^[1-9][0-9]{0,3}$')
numGE10k = re.compile('^[1-9][0-9],[0-9]{3}$')
htmlTag = re.compile('<[^>]*>')
whitespace = re.compile('\\s{2,}')
nonAlphaNum = re.compile('[^-0-9A-Za-z]')
def toFeet(meters):
return int(meters / 0.3048 + 0.5)
def toFeetRoundDown(meters):
return int(meters / 0.3048)
def toMeters(feet):
return int(feet * 0.3048 + 0.5)
def str2IntLoJ(s, description, peakName):
if RE.numLT1k.match(s):
return int(s)
if RE.numGE1k.match(s):
return int(s[:-4]) * 1000 + int(s[-3:])
if s == "0":
return 0
err("{} '{}' ({}) doesn't match expected pattern", description, s, peakName)
def str2IntPb(s, description, peak):
if RE.numLT10k.match(s):
return int(s)
if RE.numGE10k.match(s):
return int(s[:-4]) * 1000 + int(s[-3:])
if s == "0":
return 0
err("{} {} doesn't match expected pattern: {}", peak.fmtIdName, description, s)
class ObjectDiff(object):
def __init__(self, a, b, allowNotEq=()):
a = vars(a)
b = vars(b)
self.notEq = []
self.onlyA = []
self.onlyB = [k for k in b if k not in a]
for k, v in a.items():
if k in b:
if v != b[k] and k not in allowNotEq:
self.notEq.append(k)
else:
self.onlyA.append(k)
def __bool__(self):
return bool(self.notEq or self.onlyA or self.onlyB)
def message(self, nameA, nameB, suffix=""):
if not self:
return "Objects {} and {} are the same{}".format(nameA, nameB, suffix)
lines = ["Objects {} and {} are different{}".format(nameA, nameB, suffix)]
if self.onlyA:
lines.append("Only {} has these attributes: {}".format(nameA, ", ".join(self.onlyA)))
if self.onlyB:
lines.append("Only {} has these attributes: {}".format(nameB, ", ".join(self.onlyB)))
if self.notEq:
lines.append("These attributes have different values: " + ", ".join(self.notEq))
return "\n".join(lines)
class ElevationPb(object):
classId = "Pb"
def __init__(self, minFeet, maxFeet):
self.feet = minFeet
self.maxFeet = maxFeet
self.isRange = minFeet < maxFeet
def __str__(self):
return int2str(self.feet) + ("+" if self.isRange else "")
def __eq__(self, other):
return self.feet == other.feet and self.maxFeet == other.maxFeet
def __ne__(self, other):
return self.feet != other.feet or self.maxFeet != other.maxFeet
def diff(self, e):
return "({}){}".format(self.feet - e.elevationFeet,
"" if self.isRange == e.isRange else " and range mismatch")
html = __str__
class SimpleElevation(object):
def __init__(self, feet):
self.feet = feet
def __str__(self):
return int2str(self.feet)
def __eq__(self, other):
return self.feet == other.feet
def __ne__(self, other):
return self.feet != other.feet
def diff(self, e):
return "({})".format(self.feet - e.elevationFeet)
html = __str__
class ElevationLoJ(SimpleElevation):
classId = "LoJ"
class ElevationVR(SimpleElevation):
classId = "VR"
countryNameMap = {
"Mexico": "MX",
"United States": "US",
}
stateNameMap = {
"Baja California": "MX-BCN",
"Sonora": "MX-SON",
"Arizona": "AZ",
"California": "CA",
"Colorado": "CO",
"Idaho": "ID",
"Montana": "MT",
"Nevada": "NV",
"New Mexico": "NM",
"Oregon": "OR",
"Utah": "UT",
"Washington": "WA",
"Wyoming": "WY",
}
class LandMgmtArea(object):
npsWilderness = {
"Death Valley": "National Park",
"Joshua Tree": "National Park",
"Lassen Volcanic": "National Park",
"Mojave": "National Preserve",
"Organ Pipe Cactus": "NM",
"Pinnacles": "National Park",
"Sequoia-Kings Canyon": ("Kings Canyon National Park", "Sequoia National Park"),
"Yosemite": "National Park",
"Zion": "National Park",
}
def __init__(self, name):
self.count = 0
self.name = name
self.highPoint = None
self.nps = None
if name.endswith(" Wilderness"):
park = self.npsWilderness.get(name[:-11])
if park is not None:
self.nps = name[:-11] + " " + park if isinstance(park, str) else park
@classmethod
def add(self, name, peak, isHighPoint=False):
area = self.areaLookup.get(name)
if area is None:
self.areaLookup[name] = area = self(name)
area.count += 1
if isHighPoint:
hp = area.highPoint
if hp is None:
area.highPoint = peak
elif peak.elevation.feet > hp.elevation.feet:
out("{} Overriding HP for {} (> {})", peak.fmtIdName, name, hp.name)
area.highPoint = peak
elif peak.elevation.feet < hp.elevation.feet:
out("{} Ignoring HP for {} (< {})", peak.fmtIdName, name, hp.name)
else:
out("{} Ignoring HP for {} (= {})", peak.fmtIdName, name, hp.name)
return area
@classmethod
def addAll(self, peak):
landAreas = []
highPointSuffix = self.highPointSuffix
highPointSuffixLen = len(highPointSuffix)
for name in peak.landManagement:
isHighPoint = name.endswith(highPointSuffix)
if isHighPoint:
name = name[:-highPointSuffixLen]
name = self.normalizeName(name, peak)
area = self.add(name, peak, isHighPoint)
if area in landAreas:
out("{} Duplicate land '{}'", peak.fmtIdName, name)
elif area.nps:
landAreas.insert(0, area)
else:
landAreas.append(area)
peak.landManagement = landAreas
class LandMgmtAreaPb(LandMgmtArea):
highPointSuffix = " (Highest Point)"
areaLookup = {}
nameLookup = {
"Antelope Valley California Poppy Reserve "
"State Natural Reserve": "Antelope Valley California Poppy Reserve",
"Desert National Wildlife Range": "Desert National Wildlife Refuge",
"Giant Sequoia NM": "Giant Sequoia National Monument",
"Hart Mountain National Antelope Refuge": "Hart Mountain NAR",
"Hawthorne Army Ammunition Depot": "Hawthorne Army Depot",
"Indian Peak State Game Management Area": "Indian Peaks WMA",
"Lake Mead National Recreation Area": "Lake Mead NRA",
"Lake Tahoe State Park": "Lake Tahoe Nevada State Park",
"Mono Basin NSA": "Mono Basin National Forest Scenic Area",
"Mount Eddy RNA": "Mount Eddy Research Natural Area",
"Mount Saint Helens National Volcanic Monument":"Mount St. Helens National Volcanic Monument",
"Mount Saint Helens NVM": "Mount St. Helens National Volcanic Monument",
"Organ Pipe Cactus National Monument": "Organ Pipe Cactus NM",
"Providence Mountains State Recreation Area": "Providence Mountains SRA",
"Red Rock Canyon National Conservation Area": "Red Rock Canyon NCA",
"Steens Mountain National Recreation Lands": "Steens Mountain CMPA",
}
@classmethod
def normalizeName(self, name, peak):
if name.endswith(" Wilderness Area"):
name = name[:-5]
return self.nameLookup.get(name, name)
class LandMgmtAreaLoJ(LandMgmtArea):
highPointSuffix = " Highpoint"
areaLookup = {}
nameLookup = {
"Arapaho National Forest": "Arapaho and Roosevelt National Forest",
"Challis National Forest": "Salmon-Challis National Forest",
"Humboldt National Forest": "Humboldt-Toiyabe National Forest",
"Palen/McCoy Wilderness": "Palen-McCoy Wilderness",
"Pike National Forest": "Pike and San Isabel National Forest",
"Shasta National Forest": "Shasta-Trinity National Forest",
"Toiyabe National Forest": "Humboldt-Toiyabe National Forest",
"Trinity National Forest": "Shasta-Trinity National Forest",
"Uinta National Forest": "Uinta-Wasatch-Cache National Forest",
"Wasatch National Forest": "Uinta-Wasatch-Cache National Forest",
"Winema National Forest": "Fremont-Winema National Forest",
}
@classmethod
def normalizeName(self, name, peak):
if name.endswith((" Wilderness", " National Forest", " National Park", " WSA")):
pass
elif name.endswith(" Wilderness Study Area"):
name = name[:-22] + " WSA"
else:
err("{} Unexpected land \"{}\"", peak.fmtIdName, name)
return self.nameLookup.get(name, name)
def formatTime(timestamp):
ymdhms = time.localtime(timestamp)[0:6]
return "{}-{:02}-{:02} {:02}:{:02}:{:02}".format(*ymdhms)
def zipLongest(*iterables):
iterators = [iter(it) for it in iterables]
while True:
values = []
active = []
for it in iterators:
try:
value = next(it)
except StopIteration:
continue
values.append(value)
active.append(it)
if not active:
break
yield tuple(values)
iterators = active
def loadURLs(loadLists):
random.seed()
for loadList in loadLists:
random.shuffle(loadList)
listLengths = "/".join([str(len(loadList)) for loadList in loadLists])
mode444 = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH
mode644 = stat.S_IWUSR | mode444
for i, loadList in enumerate(zipLongest(*loadLists)):
if i != 0:
sleepTime = int(random.random() * 7 + 7.5)
log("{}/{} Sleeping for {} seconds", i, listLengths, sleepTime)
time.sleep(sleepTime)
for url, filename in loadList:
if os.path.exists(filename):
os.chmod(filename, mode644)
command = ["/usr/local/opt/curl/bin/curl", "-o", filename, url]
print(*command)
rc = subprocess.call(command)
if rc != 0:
print("Exit code", rc)
return
os.chmod(filename, mode444)
def getLoadLists(pl):
loadLists = []
for peakClass in (PeakLoJ, PeakPb):
loadList = []
loadLists.append(loadList)
for peak in peakClass.getPeaks(pl.id):
filename = peak.getPeakFileName(peak.id)
if not os.path.exists(filename):
loadList.append((peak.getPeakURL(peak.id), filename))
return loadLists
def getLoadListsFromTable(pl):
loadLists = []
cutoffTime = time.time() - 180 * 24 * 60 * 60
cutoffTimeStr = formatTime(cutoffTime)
for peakClass in (PeakLoJ, PeakPb):
loadList = []
loadLists.append(loadList)
for section in pl.sections:
for peak in section.peaks:
peakId = getattr(peak, peakClass.classAttrId, None)
if peakId is None or peakId[0] == "-":
continue
filename = peakClass.getPeakFileName(peakId)
if os.path.exists(filename):
modTime = os.stat(filename).st_mtime
if modTime > cutoffTime:
continue
out("{:5} {:24} {} Last mod ({}) > 180 days ago ({})",
peak.id,
peak.name.replace('"', '"'),
peakClass.classId,
formatTime(modTime),
cutoffTimeStr)
loadList.append((peakClass.getPeakURL(peakId), filename))
return loadLists
class TablePeak(object):
@classmethod
def getListFileName(self, peakListId, maxProm=False):
return "data/peaklists/{}/{}{}.html".format(
peakListId.lower(), self.classId.lower(), "-max" if maxProm else "")
@classmethod
def getPeaks(self, peakListId, maxProm=False):
fileName = self.getListFileName(peakListId, maxProm)
if not os.path.exists(fileName):
return []
table = TableReader(fileName, **getattr(self, "tableReaderArgs", {}))
row = table.__next__()
columns = []
for colNum, colStr in enumerate(row):
colStr = RE.htmlTag.sub("", colStr)
col = self.columnMap.get(colStr, None)
if col is None:
table.colNum = colNum + 1
table.err("Unrecognized column name:\n{}", colStr)
columns.append(col)
peaks = []
for row in table:
if len(row) != len(columns):
table.err("Unexpected number of columns")
peak = self()
for colNum, (colStr, (regexp, attributes)) in enumerate(zip(row, columns)):
m = regexp.match(colStr)
if m is None:
table.colNum = colNum + 1
table.err("Doesn't match expected pattern:\n{}", colStr)
if attributes is None:
assert regexp.groups == 0
else:
values = m.groups()
assert len(attributes) == len(values)
for attr, value in zip(attributes, values):
setattr(peak, attr, value)
peak.postProcess(peakListId)
peaks.append(peak)
assert len(peaks) == self.numPeaks[peakListId]
return peaks
@classmethod
def getAttr(self, attr, peak):
peak2 = getattr(peak, self.classAttrPeak, None)
if peak2 is None:
return None
return getattr(peak2, attr, None)
def checkLand(self, peak):
land1 = peak.landManagement
land1 = {} if land1 is None else {area.name: area for area in land1}
def getArea(name):
return land1.get(name) if self.expectLandNPS(name) else land1.pop(name, None)
def getAreaNPS(nps):
if isinstance(nps, str):
return getArea(nps)
for name in nps:
area = getArea(name)
if area:
return area
return None
for area2 in self.landManagement:
area1 = getAreaNPS(area2.nps) if area2.nps else land1.pop(area2.name, None)
if area1 is None:
out("{} '{}' not in table", peak.fmtIdName, area2.name)
elif area1.isHighPoint(peak) != (area2.highPoint is self):
out("{} High Point mismatch ({})", peak.fmtIdName, area2.name)
for name, area in sorted(land1.items()):
if self.expectLand(area):
out("{} '{}' not on {}", peak.fmtIdName, name, self.classTitle)
def checkDataSheet(self, peak):
dataSheets = peak.getDataSheets()
if dataSheets is None:
return
ds = self.dataSheet
if ds is None:
if dataSheets:
print(self.fmtIdName, "Datasheet not on", self.classTitle)
else:
if ds not in dataSheets:
print(self.fmtIdName, "Datasheet", ds, "not in table")
class PeakPb(TablePeak):
classId = 'Pb'
classTitle = 'Peakbagger'
classAttrId = 'peakbaggerId'
classAttrPeak = 'peakbaggerPeak'
@classmethod
def getPeakFileName(self, peakId):
return "data/peakfiles/pb/{}/p{}.html".format(peakId[0], peakId)
@classmethod
def getPeakURL(self, peakId):
return "https://peakbagger.com/peak.aspx?pid={}".format(peakId)
@classmethod
def expectLand(self, area):
return not (area.name.startswith("BLM ") or area.landClass in ("landCity", "landEBRPD", "landMROSD"))
@classmethod
def expectLandNPS(self, name):
return True
tableReaderArgs = dict(tableAttributes='class="gray"')
columnMap = {
'Rank': (
re.compile('^(?:[1-9][0-9]*\\.)?$'),
None
),
'Peak': (
re.compile('^<a href=peak\\.aspx\\?pid=([1-9][0-9]*)>([- A-Za-z]+)</a>$'),
('id', 'name')
),
'Section': (
re.compile('^(?:([1-9]|[0-9]{2})(?:\\.| -) )?([A-Z][a-z]+'
'(?:(?: | - |-)[A-Z]?[a-z]+)*(?:\\.? [1-9][0-9]*)?)$'),
('sectionNumber', 'sectionName')
),
'Elev-Ft': (
re.compile('^((?:[1-9][0-9],[0-9]{3})|(?:[1-9][0-9]{3}))$'),
('elevation',)
),
'Range (Level 5)': (
re.compile('^<a href=range\\.aspx\\?rid=([1-9][0-9]*)>([- A-Za-z]+)</a>$'),
('rangeId', 'rangeName')
),
'Prom-Ft': (
re.compile('^((?:[1-9][0-9],[0-9]{3})|(?:[1-9][0-9]{0,3})|0|)$'),
('prominence',)
),
'Ascents': (
re.compile('^[1-9][0-9]*$'),
None
),
}
columnMap['Elev-Ft(Opt)'] = columnMap['Elev-Ft']
columnMap['Prom-Ft(Opt)'] = columnMap['Prom-Ft']
numPeaks = {
'DPS': 95,
'GBP': 115,
'HPS': 280,
'LPC': 86,
'NPC': 73,
'OGUL': 63,
'SPS': 247,
}
NAME_MAP = {
# Desert Peaks Section:
'13410': ('Chuckwalla Peak', 'Bunch Benchmark'),
'1514': ('Eagle Mountain', 'Eagle Mountains HP'),
'3810': ('Granite Mountain', 'Granite Benchmark'),
'3804': ('Granite Mountain', 'Granite Peak'),
'3806': ('Old Woman Mountain', 'Old Woman Mountains HP'),
'13411': ('Spectre Peak', 'Spectre Point'),
'13408': ('Stepladder Mountains', 'Stepladder Mountains HP'),
'4173': ('Superstition Benchmark', 'Superstition Mountain'),
# Hundred Peaks Section:
'2882': ('Black Mountain', 'Black Mountain #5'),
'2867': ('Cannell Point', 'Cannel Point'),
'1457': ('Granite Mountain', 'Granite Mountain #2'),
'1465': ('Monument Peak', 'Monument Peak #1'),
'1508': ('Rabbit Peak', 'Rabbit Peak #2'),
'44743': ('Toro Peak-West Peak', 'Toro Peak West'),
# Great Basin Peaks / Nevada Peaks Club:
'60151': ('Baker Peak-West Summit', 'Baker Peak'),
'3573': ('Baker Peak', 'Baker Peak East'),
'3321': ('Duffer Peak-North Peak', 'Duffer Peak'),
'3322': ('Duffer Peak', 'Duffer Peak South'),
'3394': ('Granite Peak', 'Granite Peak (Humboldt)'),
'3577': ('Granite Peak', 'Granite Peak (Snake Range)'),
'3312': ('Granite Peak', 'Granite Peak (Washoe)'),
'3609': ('Mount Grant', 'Mount Grant (West)'),
'3498': ('Mount Jefferson-North Summit', 'Mount Jefferson North'),
'17460': ('Petersen Mountains HP', 'Petersen Mountain'),
# Sierra Peaks Section:
'69023': ('Adams Peak-West Peak', 'Adams Peak'),
'13541': ('Devils Crags', 'Devil\'s Crag #1'),
'2650': ('Mount Morgan', 'Mount Morgan (North)'),
'2662': ('Mount Morgan', 'Mount Morgan (South)'),
'2652': ('Mount Stanford', 'Mount Stanford (North)'),
'2786': ('Mount Stanford', 'Mount Stanford (South)'),
'13544': ('Pilot Knob', 'Pilot Knob (North)'),
'2868': ('Pilot Knob', 'Pilot Knob (South)'),
'2569': ('Pyramid Peak', 'Pyramid Peak (North)'),
'2754': ('Pyramid Peak', 'Pyramid Peak (South)'),
'13517': ('Sawtooth Peak', 'Sawtooth Peak (North)'),
'13507': ('Sawtooth Peak', 'Sawtooth Peak (South)'),
# Tahoe Ogul Peaks:
'26371': ('Silver Peak', 'Silver Peak (Desolation)'),
'53297': ('Silver Peak-Southwest Summit', 'Silver Peak Southwest'),
# Lower Peaks Committee
'18343': ('El Montanon', 'El Montañon'),
# Other Sierra Peaks:
'37148': ('Gambler\'s Special', 'Gamblers Special Peak'),
'27997': ('Maggies Peaks-South Summit', 'Maggies Peaks South'),
'2548': ('Mount Lola-North Ridge Peak', 'Mount Lola North'),
'16774': ('Mount Lamarck North', 'Northwest Lamarck'),
'83454': ('Peak 10570', 'Peak 3222m'),
'83353': ('Pk 10597', 'Peak 3230m'),
'36720': ('Peak 3560', 'Peak 3560m+'),
'38937': ('Shepherd Crest', 'Shepherd Crest East'),
'26162': ('Silver Peak - Northeast Summit', 'Silver Peak Northeast'),
'43761': ('Peak 9980', 'Sirretta Peak North'),
'36459': ('Snow Valley Peak-East Peak', 'Snow Valley Peak East'),
'24114': ('The Sisters', 'The Sisters East'),
'36717': ('Volcanic Ridge', 'Volcanic Ridge West'),
'28151': ('White Mountain', 'White Mountain (Sonora Pass)'),
'38787': ('White Mountain', 'White Mountain (Tioga Pass)'),
# Other Desert Peaks:
'75734': ('Antelope Buttes HP', 'Antelope Buttes'),
# Other California Peaks:
'77712': ('Maguire Peaks-East Summit', 'Maguire Peaks East'),
'27020': ('Maguire Peak', 'Maguire Peaks West'),
'53478': ('Monument Peak North', 'Monument Peak'),
'1183': ('Mount Saint Helena-East Peak', 'Mount Saint Helena East'),
'40000': ('Mount Saint Helena-South Peak', 'Mount Saint Helena South'),
'53383': ('Mount Saint Helena-Southeast Peak', 'Mount Saint Helena Southeast'),
'1158': ('Mount Tamalpais-East Peak', 'Mount Tamalpais East Peak'),
'16816': ('Mount Tamalpais-Middle Peak', 'Mount Tamalpais Middle Peak'),
'1159': ('Mount Tamalpais-West Peak', 'Mount Tamalpais West Peak'),
'68787': ('Peak 1380', 'Peak 1390'),
'75602': ('Peak 2600', 'Peak 2600+'),
'54010': ('Pine Ridge HP', 'Pine Ridge'),
'1174': ('Snow Mountain', 'Snow Mountain East'),
# Other Western Peaks:
'30726': ('Mount Nebo-South Peak', 'Mount Nebo South'),
'2353': ('Mount Saint Helens', 'Mount St. Helens'),
'3523': ('Ruby Dome-East Peak', 'Ruby Pyramid'),
'25632': ('Sourdough Mountain-Lookout Site', 'Sourdough Mountain Lookout'),
'23494': ('Trail Canyon Saddle Peak', 'Trail Canyon Peak'),
'32935': ('Mount Evans-West Peak', 'West Evans'),
'2454': ('Wizard Island Peak', 'Wizard Island'),
}
@classmethod
def normalizeName(self, name, peak_id=None):
if name.endswith(' High Point'):
name = name[:-10] + 'HP'
if peak_id:
old_and_new_name = self.NAME_MAP.get(peak_id)
if old_and_new_name:
if name != old_and_new_name[0]:
out('Pb name ({}) not as expected ({})', name, old_and_new_name[0])
return old_and_new_name[1]
return name
ELEVATION_MAP = {
# Pb DPS Elevation Adjustments:
#
# - Needle Peak
# See LoJ DPS Elevation Adjustments, except that a possible reason for Pb's 5,801'
# is that the 1768.8m spot elevation from the 7.5' topo was rounded down to 1768m.
# 1768m = 5800.525' which is 5,801' when rounded to the nearest foot.
#
# - Stepladder Mountains HP
# See LoJ DPS Elevation Adjustments. Pb seems to have done the same as LoJ, except
# that instead of rounding down (2939.6' => 2939'), Pb rounded to the nearest foot
# (2939.6' => 2940')
'3665': (5801, 5801, 5804, 5804), # Needle Peak: 1769m
'13408': (2940, 2940, 2920, 2953), # Stepladder Mountains HP: 890m-900m
# Pb SPS Elevation Adjustments:
#
# - Basin Mountain
# See LoJ SPS Elevation Adjustments. Pb did the same.
#
# - Highland Peak
# All of the Ebbetts Pass 7.5' topos show a spot elevation of 10,935'
# All of the Markleeville 15' topos show a spot elevation of 10,934'
# The 1985 Smith Valley 1:100,000 topo doesn't show a spot elevation.
# The 1889, 1891, and 1893 Markleeville 1:125,000 maps show a spot elevation of 10,955'
# All of the Walker Lake 1:250,000 maps show spot elevations of either 10,935' or 10,955'
# How does Pb get 10,936'?
#
# - Kearsarge Peak
# The 1994 Kearsarge Peak 7.5' topo doesn't show a spot elevation.
# The highest contour is at 3840m, and the contour interval is 20m.
# The 1985 and 1992 Kearsarge Peak 7.5' topos show a spot elevation of 3846m = 12,618'
# The Mt. Pinchot 15' topos show a spot elevation of 12,598'
# The 1978 Mount Whitney 1:100,000 topo shows a spot elevation of 3840m = 12,598'
# The 1937 Mt. Whitney 1:125,000 topos show a spot elevation of 12,650'
# The 1:250,000 topos show a spot elevation of 12,650'
# How does Pb get 12,615'?
#
# - Kern Peak
# None of the maps on topoView (1:24,000, 1:62,500, 1:100k, 1:125k, and 1:250k) show a
# spot elevation of 11,480'. It's likely that Pb just didn't set the optimistic elevation.
# The highest contour on the 7.5' topos is at 11,480', and the contour interval is 40'.
#
# - Mount Carillon
# Pb's elevation of 13,553' implies that there's a map showing a spot elevation of either
# 13,553' or 4131m, but none of the maps currently on topoView show this.
# The 1985, 1993, and 1994 editions of the Mount Whitney 7.5' topo don't show a spot elevation
# for Mount Carillon. The highest contour is at 4120m, and the interval is 20m.
# All of the Mount Whitney 15' topos (3x 1956, 2x 1967) show a spot elevation of 13,552'
# The 1978/1990 Mount Whitney 1:100,000 topo doesn't show a spot elevation.
# The highest contour is at 4100m, and the contour interval is 50m.
# The 1907, 1919, and 1937 Mt. Whitney 1:125,000 topos show a spot elevation of 13,571'
# None of the Fresno 1:250,000 maps show a spot elevation, nor do they label Mount Carillon.
#
# - Mount Jordan
# Very similar to Mount Carillon:
# Pb's elevation of 13,343' implies that there's a map showing a spot elevation of either
# 13,343' or 4067m, but none of the maps currently on topoView show this.
# The Mt. Brewer 7.5' quads don't show a spot elevation.
# The highest contour is at 4060m, and the interval is 20m.
# The Mount Whitney 15' quads show a spot elevation of 13,344'
# The 1978/1990 Mount Whitney 1:100,000 topo doesn't show a spot elevation.
# The 1907, 1919, and 1937 Mt. Whitney 1:125,000 maps show a spot elevation of 13,316'
# None of the Fresno 1:250,000 maps show a spot elevation, nor do they label Mount Jordan.
#
# - Mount Williamson
# Pb's elevation of 14,373' implies that there's a map showing a spot elevation of either
# 14,373' or 4381m, but none of the maps currently on topoView show this.
# The 1984, 1993, and 1994 Mt. Williamson 7.5' topos don't show a spot elevation.
# The highest contour is at 4380m, and the interval is 20m.
# All of the Mount Whitney 15' topos (3x 1956, 2x 1967) show a spot elevation of 14,375'
# The 1978/1990 Mount Whitney 1:100,000 topo shows a spot elevation of 4382m = 14376.6'
# The 1907, 1919, and 1937 Mt. Whitney 1:125,000 topos show a spot elevation of 14,384'
# The 1948-1960 Fresno 1:250,000 maps show a spot elevation of 14,384'
# The 1962 Fresno 1:250,000 maps show a spot elevation of 14,375'
#
# - Sierra Buttes
# The Sierra City 7.5' topos show a spot elevation of 8,591'
# The Sierra City 15' topos show a spot elevation of 8,587'
# The 1979/1990 Portola 1:100,000 topo doesn't show a spot elevation.
# The highest contour is at 2550m, and the interval is 50m.
# The 1891-1897 Downieville 1:125,000 maps show a spot elevation of 8,615'
# The 1958 Chico 1:250,000 maps show a spot elevation of 8,587'
# So perhaps Pb got 8,590' from NGS Data Sheet "Sierra" (KS1520). However, that's the
# NAVD 88 elevation which "was computed by applying the VERTCON shift value to the
# NGVD 29 height" which is given as 8,587'. Since the vertical datum for primary
# elevations on Pb is NGVD 29, it seems that either 8,591' or 8,587' should be used.
'2678': (13181, 13200, 13181, 13181), # Basin Mountain
'2582': (10936, 10936, 10935, 10935), # Highland Peak
'13530': (12615, 12615, 12618, 12618), # Kearsarge Peak
'2860': (11480, 11480, 11480, 11520), # Kern Peak
'2824': (13553, 13553, 13552, 13552), # Mount Carillon
'2791': (13343, 13343, 13344, 13344), # Mount Jordan
'2814': (14373, 14373, 14375, 14375), # Mount Williamson
'13567': ( 8590, 8590, 8591, 8591), # Sierra Buttes
# Pb HPS Elevation Adjustments:
'1480': (10839, 10839, 10804, 10804), # San Jacinto Peak
# Pb Elevation Adjustments for Great Basin Peaks / Nevada Peaks Club
#
# - Bull Mountain
# "Field observations by visitors to this mountain indicate that the main point shown here is
# about 8 or 9 feet higher than the nearby Dunn Benchmark. So the elevation of the large contour
# to the east of the benchmark is shown as 9934 feet. The terrain is very gentle and most
# peakbaggers will visit both spots anyway."
# [https://peakbagger.com/peak.aspx?pid=3440]
#
# - Hays Canyon Peak
# "Field observations show that the highest point on this flat-topped mountain is near the
# 7916-foot benchmark. The two tiny 7920-foot contours north of the benchmark are about 10 feet
# lower than the benchmark knoll. These contour are likely a map error, but the benchmark
# elevation might be too low, too. Here, the elevation is given as 7920 feet, assuming the
# benchmark is 4 feet below the high point."
# [https://peakbagger.com/peak.aspx?pid=3304]
#
# - Green Mountain (NPC)
# The last 0 in 10680 was likely misread as an 8 (a contour line bisects it on the 7.5' topo).
'3440': ( 9934, 9934, 9920, 9960), # Bull Mountain
'3321': ( 9400, 9440, 9397, 9397), # Duffer Peak
'3312': ( 8980, 8980, 8960, 9000), # Granite Peak (Washoe)
'24372': (10688, 10688, 10680, 10680), # Green Mountain
'3304': ( 7920, 7920, 7916, 7916), # Hays Canyon Peak
'34519': (10248, 10248, 10240, 10280), # Morey Peak North
# Pb Elevation Adjustments for Other Desert Peaks:
'3782': (4777, 4777, 4757, 4790), # Kelso Peak: 1450m-1460m
# Pb Elevation Adjustments for Other Sierra Peaks:
#
# - Mount Starr
# "Field observations by climbers have shown that the highest point on Mount Starr is south
# of the point marked 12,835' on the topographic map. A point on the ridge to the south is
# approximately five feet higher and thus the summit of this peak."
# [https://peakbagger.com/peak.aspx?pid=2660]
'38609': (12051, 12051, 12077, 12077), # Duck Lake Peak: 3681m
'39925': ( 8083, 8123, 8083, 8083), # Grouse Mountain
'36666': (11353, 11353, 11447, 11447), # Lost World Peak: 3489m
'38937': (12040, 12080, 12000, 12040), # Shepherd Crest East
'2660': (12840, 12840, 12835, 12835), # Mount Starr
'83454': (10570, 10570, 10571, 10571), # Peak 3222m
'36720': (11680, 11680, 11680, 11745), # Peak 3560m+: 3560m-3580m
# Pb Elevation Adjustments for Other California Peaks:
'30637': ( 920, 920, 900, 925), # Hawk Hill
'24165': ( 780, 820, 760, 800), # Islay Hill
'33891': (1728, 1728, 1720, 1760), # Mine Hill
'1158': (2572, 2574, 2571, 2571), # Mount Tamalpais East Peak
'16816': (2518, 2520, 2480, 2520), # Mount Tamalpais Middle Peak
'1159': (2576, 2578, 2560, 2600), # Mount Tamalpais West Peak
'68787': (1380, 1400, 1390, 1390), # Peak 1390
'54010': (3028, 3028, 3000, 3040), # Pine Ridge
'1155': (1342, 1344, 1336, 1336), # Point Reyes Hill
'55390': (3456, 3456, 3455, 3455), # Post Summit
'64361': ( 937, 937, 925, 950), # Slacker Hill
'50114': (1434, 1434, 1430, 1430), # White Hill
}
PROMINENCE_MAP = {
# --------------------------
# Pb Prominence Adjustments
# --------------------------
#
# - East Ord Mountain (DPS)
# The contour interval at the saddle is 40', not 20'. So the saddle range is 4680'-4640', not
# 4680'-4660'. Thus the maximum prominence is raised by 20'. LoJ also uses 4660' for the saddle.
#
# - Signal Peak (DPS)
# The minimum saddle elevation can be raised from 1380' to 1390', thus reducing the maximum
# prominence by 10' to 3487'. The main contour interval on the 7.5' quad (Lone Mountain, AZ)
# is 20 feet, giving a saddle range of 1380'-1400', but there's a supplementary contour on
# both downward-sloping sides (east and west) of the saddle at 1390'. LoJ also uses 1395' for
# the average saddle elevation.
'13419': (3346, 3476, 3346, 3478), # DPS Cerro Pescadores 1020m, 1060m
'13407': (1488, 1508, 1488, 1528), # DPS East Ord Mountain
'4066': (3477, 3497, 3477, 3487), # DPS Signal Peak
'3321': ( 40, 120, 0, 80), # GBP Duffer Peak
'18343': (1333, 1333, 1308, 1358), # LPC El Montañon
'39129': ( 402, 442, 442, 482), # OCAP Ring Mountain 602'-160', 602'-120'
'64361': ( 274, 274, 262, 287), # OCAP Slacker Hill 937'-675', 937'-650'
'50114': ( 308, 310, 314, 354), # OCAP White Hill 1434'-1120', 1434'-1080'
'37148': ( 263, 393, 262, 394), # OSP Gamblers Special Peak 80m, 120m
'83454': ( 431, 431, 399, 465), # OSP Peak 3222m 10570'-3100m, 10570'-3080m
'83353': ( 394, 394, 361, 427), # OSP Peak 3230m 3230m-3120m, 3230m-3100m
'21601': ( 885, 925, 845, 885), # OSP Trail Peak 11605'-10760', 11605'-10720'
'21603': ( 132, 262, 131, 262), # OSP Two Teats 40m, 80m
'58159': ( 896, 896, 879, 912), # OWP Billy Goat Peak 1748m-1480m, 1748m-1470m
'2868': ( 720, 800, 680, 760), # SPS Pilot Knob (South)
}
def postProcess(self, peakListId=None):
def str2int(s):
return int(s) if len(s) <= 4 else int(s[:-4]) * 1000 + int(s[-3:])
self.elevation = str2int(self.elevation)
self.prominence = str2int(self.prominence) if len(self.prominence) > 0 else None
def postProcess2(self, maxPeak):
self.name = self.normalizeName(self.name, self.id)
elev_min = self.elevation
prom_min = self.prominence
elev_max = maxPeak.elevation
prom_max = maxPeak.prominence
proms = self.PROMINENCE_MAP.get(self.id)
if proms:
old_min, old_max, new_min, new_max = proms
if old_min != prom_min or old_max != prom_max:
out('Pb prominences ({}, {}) not as expected ({}, {}) for {}',
prom_min, prom_max, old_min, old_max, self.name)
prom_min = new_min
prom_max = new_max
elevs = self.ELEVATION_MAP.get(self.id)
if elevs:
old_min, old_max, new_min, new_max = elevs
if old_min != elev_min or old_max != elev_max:
out('Pb elevations ({}, {}) not as expected ({}, {}) for {}',
elev_min, elev_max, old_min, old_max, self.name)
prom_min += new_min - elev_min
prom_max += new_max - elev_max
elev_min = new_min
elev_max = new_max
if elev_min > elev_max:
err("Pb: Max elevation ({}) must be >= min elevation ({}) for {}",
elev_max, elev_min, self.name)
if prom_min > prom_max:
err("Pb: Max prominence ({}) must be >= min prominence ({}) for {}",
prom_max, prom_min, self.name)
self.elevation = ElevationPb(elev_min, elev_max)
self.prominence = (prom_min, prom_max)
@classmethod
def getPeaks(self, peakListId):
super_getPeaks = super(PeakPb, self).getPeaks
minPeaks = super_getPeaks(peakListId)
maxPeaks = super_getPeaks(peakListId, True)
maxPeaks = {p.id: p for p in maxPeaks}
for peak in minPeaks:
maxPeak = maxPeaks[peak.id]
diff = ObjectDiff(peak, maxPeak, allowNotEq=("elevation", "prominence"))
if diff:
err(diff.message("minPeak", "maxPeak",
" for Pb ID {} ({})".format(peak.id, peak.name)))
peak.postProcess2(maxPeak)
return minPeaks
landPattern = re.compile(
"^(?:Land: ([- '()./A-Za-z]+))?(?:<br/>)?"
"(?:Wilderness/Special Area: ([- ()/A-Za-z]+))?$"
)
def readLandManagement(self, land):
land = land.replace("\\xe2\\x80\\x99", "'") # Tohono O'odham Nation
land = land.replace("Palen/McCoy", "Palen-McCoy")
m = self.landPattern.match(land)
if m is None:
err("{} Land doesn't match pattern:\n{}", self.fmtIdName, land)
land, wilderness = m.groups()
if land is not None:
self.landManagement.extend(land.split("/"))
if wilderness is not None:
self.landManagement.extend(wilderness.split("/"))
elevationPattern1 = re.compile(
"^<h2>Elevation: ([1-9][0-9]{2,3}|[1-9][0-9],[0-9]{3})(\\+?) feet, ([1-9][0-9]{2,3})\\2 meters</h2>$"
)
elevationPattern2 = re.compile(
"^<h2>Elevation: ([1-9][0-9]{2,3})(\\+?) meters, ([1-9][0-9]{2,3}|[1-9][0-9],[0-9]{3})\\2 feet</h2>$"
)
def readElevation(self, maxPeak, html):
m = self.elevationPattern1.match(html)
if m is None:
m = self.elevationPattern2.match(html)
if m is None:
err("{} Elevation doesn't match pattern:\n{}", self.fmtIdName, html)
meters, isRange, feet = m.groups()
else:
feet, isRange, meters = m.groups()
isRange = (isRange == "+")
feet = str2IntPb(feet, "Elevation in feet", self)
meters = str2IntPb(meters, "Elevation in meters", self)
if toMeters(feet) != meters:
err("{} Elevation in feet ({}) != {} meters", self.fmtIdName, feet, meters)
self.elevation = feet
maxPeak.elevation = None if isRange else feet
elevationRangePattern = re.compile(
"^Elevation range:([,0-9]+) - ([,0-9]+) (ft|m)<br/>"
)
def readElevationInfo(self, maxPeak, html):
m = self.elevationRangePattern.match(html)
if m is None:
print(self.fmtIdName, "Elevation Info doesn't match pattern")
maxPeak.elevation = self.elevation
return
minElev, maxElev, elevUnit = m.groups()
minElev = str2IntPb(minElev, "Minimum elevation", self)
maxElev = str2IntPb(maxElev, "Maximum elevation", self)
if elevUnit == "m":
minElev = toFeet(minElev)
maxElev = toFeet(maxElev)
assert minElev == self.elevation
maxPeak.elevation = maxElev
prominencePattern = re.compile(
"^(?:<a href=\"KeyCol\\.aspx\\?pid=([1-9][0-9]*)\">Key Col Page</a>\n"
"\\(Detailed prominence information\\)<br/>)?<a>Clean Prominence</a>\n"
": ([,0-9]+) (ft|m)/([,0-9]+) (ft|m)<br/><a>Optimistic Prominence</a>\n"
": ([,0-9]+) \\3/([,0-9]+) \\5<br/><a>(?:Line Parent</a>\n"
": <a href=\"peak\\.aspx\\?pid=([1-9][0-9]*)\">([- 0-9A-Za-z]+)</a>\n<br/><a>)?Key Col</a>\n"
": ([A-Z][a-z]+(?:[- /][A-Za-z]+)*(?:, [A-Z]{2})?)?([,0-9]+) \\3/([,0-9]+) \\5$"
)
def readProminence(self, maxPeak, html):
m = self.prominencePattern.match(html)
if m is None:
err("{} Prominence doesn't match pattern:\n{}", self.fmtIdName, html)
(
peakId,
minProm1, unit1,
minProm2, unit2,
maxProm1,
maxProm2,
lineParentId,
lineParentName,
keyColName,
maxSaddleElev1,
maxSaddleElev2,
) = m.groups()
minProm1 = str2IntPb(minProm1, "Clean prominence ({})".format(unit1), self)
minProm2 = str2IntPb(minProm2, "Clean prominence ({})".format(unit2), self)
maxProm1 = str2IntPb(maxProm1, "Optimistic prominence ({})".format(unit1), self)
maxProm2 = str2IntPb(maxProm2, "Optimistic prominence ({})".format(unit2), self)
maxSaddleElev1 = str2IntPb(maxSaddleElev1, "Max saddle elevation ({})".format(unit1), self)
maxSaddleElev2 = str2IntPb(maxSaddleElev2, "Max saddle elevation ({})".format(unit2), self)
if unit1 == "ft":
assert unit2 == "m"
else:
assert unit2 == "ft"
minProm1, minProm2 = minProm2, minProm1
maxProm1, maxProm2 = maxProm2, maxProm1
maxSaddleElev1, maxSaddleElev2 = maxSaddleElev2, maxSaddleElev1
if peakId != self.id and peakId is not None:
err("{} Pb ID from Key Col Page link ({}) != {}", self.fmtIdName, peakId, self.id)
self.prominence = minProm1
maxPeak.prominence = maxProm1
def readLatLng(self, html):
latlng = html.split("<br/>")[1]
assert latlng.endswith(" (Dec Deg)")
latlng = latlng[:-10].split(", ")
latlng = map(lambda d: str(round(float(d), 5)), latlng)
self.latitude, self.longitude = latlng
rangePattern = re.compile(
'^(Range[23456]|Continent): <a href="range\\.aspx\\?rid=([1-9][0-9]*)">'
'((?:Mc|Le)?[A-Z][a-z]+(?: U\\.S\\.)?(?:[- ][A-Z]?[a-z]+)*)(?: \\(Highest Point\\))?</a>$'
)
def readRanges(self, rangeLines):
rangeList = []
for line in rangeLines:
if line == "":
continue
m = self.rangePattern.match(line)
if m is None:
err("{} Range doesn't match pattern:\n{}", self.fmtIdName, line)
label, rangeId, rangeName = m.groups()
rangeList.append((rangeId, rangeName))
expectedLabel = "Continent" if len(rangeList) == 1 else "Range" + str(len(rangeList))
if label != expectedLabel:
err("{} Unexpected range label:\n{}", self.fmtIdName, line)
self.rangeId, self.rangeName = rangeList[4]
self.rangeList = rangeList
quadPattern = {
'US': re.compile('^([A-Za-z]+(?: [A-Za-z]+)*) O[34][0-9]1[0-9][0-9][a-h][1-8] 1:24,000$'),
'MX': re.compile('^([A-Za-z]+(?: [A-Za-z]+)*) [A-Z][0-9]{2}[A-Z][0-9]{2} 1:50,000$'),
}
def readQuad(self, html):
m = self.quadPattern[self.country[0]].match(html)
if m is None:
err("{} Topo Map doesn't match pattern:\n{}", self.fmtIdName, html)
self.quadName = m.group(1)
def readName(self, html):
if html[:34] != "\n<br/><iframe></iframe>\n<br/><img>":
err("{} Maps HTML doesn't match pattern:\n{}", self.fmtIdName, html)
i = html.find("<img>", 34)
if i < 0:
err("{} Maps HTML doesn't match pattern:\n{}", self.fmtIdName, html)
self.name = html[34:i]
if self.name[-5:] == "<br/>":
self.name = self.name[:-5]
def readCountry(self, countries):
self.country = []
for country in countries:
if country.endswith(" (Highest Point)"):
country = country[:-16]
code = countryNameMap.get(country)
if code is None:
err("{} Cannot get country code for {}", self.fmtIdName, country)
self.country.append(code)
def readState(self, states):
self.state = []
for state in states:
if state.endswith(" (Highest Point)"):
state = state[:-16]
code = stateNameMap.get(state)
if code is None:
err("{} Cannot get state code for {}", self.fmtIdName, state)
self.state.append(code)
def readPeakFile(self, fileName):
tables = TableParser(fileName, numTables=3, startTag="h1").tables
maxPeak = PeakPb()
self.readElevation(maxPeak, tables[0][0][1]) # First table, first row, second column
for row in tables[1]:
if row[0] == "Elevation Info:":
if maxPeak.elevation is None:
self.readElevationInfo(maxPeak, row[1])
elif row[0] == "Latitude/Longitude (WGS84)":
self.readLatLng(row[1])
elif row[0] == "Country":
self.readCountry(row[1].split("<br/>"))
elif row[0] == "State/Province":
self.readState(row[1].split("<br/>"))
self.landManagement = []
for row in tables[2]:
if row[0] == "<a>Prominence</a>\n":
self.readProminence(maxPeak, row[1])
elif row[0] == "Ranges":
self.readRanges(row[1].split("<br/>"))
elif row[0] == "Ownership":
self.readLandManagement(row[1])
elif row[0] == "Topo Map":
self.readQuad(row[1])
elif row[0].startswith("<b>Dynamic Map</b>"):
self.readName(row[0][18:])
self.postProcess2(maxPeak)
LandMgmtAreaPb.addAll(self)
def compare(self, other):
for attr in ("id", "name", "elevation", "rangeId", "rangeName"):
v1 = getattr(self, attr)
v2 = getattr(other, attr)
if v1 != v2:
out("{} {} doesn't match: {} != {}", self.fmtIdName, attr, v1, v2)
min1, max1 = self.prominence
min2, max2 = other.prominence
if not (abs(min2-min1) in (0,1,2) and abs(max2-max1) in (0,1,2)):
out("{} Prominence doesn't match: {} != {}", self.fmtIdName,
self.prominence, other.prominence)
def copyListFields(self, other):
for attr in ("prominence", "sectionNumber", "sectionName"):
setattr(self, attr, getattr(other, attr, None))
class PeakLoJ(TablePeak):
classId = 'LoJ'
classTitle = 'Lists of John'
classAttrId = 'listsOfJohnId'
classAttrPeak = 'listsOfJohnPeak'
@classmethod
def getPeakFileName(self, peakId):
return "data/peakfiles/loj/{}/p{}.html".format(peakId[0], peakId)
@classmethod
def getPeakURL(self, peakId):
return "https://listsofjohn.com/peak/{}".format(peakId)
@classmethod
def expectLand(self, area):
name = area.name
return (name.endswith((" National Forest", " National Park")) or
name.endswith(" Wilderness") and not name.endswith(" Regional", 0, -11))
@classmethod
def expectLandNPS(self, name):
return name.endswith(" National Park")
peakNamePattern = (' *('
'(?:[A-Z][- &\\.0-9;A-Za-z]+(?:, [A-Z][a-z]+)?(?:-[A-Z][ A-Za-z]+)?(?: \\(HP\\))?)|'
'(?:"[A-Z][- &0-9;A-Za-z]+")|'
'(?:[1-9][0-9]+(?:-[A-Z][ A-Za-z]+)?))'
)
peakNameRegExp = re.compile('^' + peakNamePattern + '$')
columnMap = {
'# in list': (
re.compile('^[1-9][0-9]*$'),
None
),
'Name': (
re.compile(
'^<b><a href="/peak/([1-9][0-9]*)" target="_blank">' +
peakNamePattern + '</a></b>$'
),
('id', 'name')
),
'Elevation': (
re.compile('^([,0-9]+)\'$'),
('elevation',)
),
'Saddle': (
re.compile(
'^<b><a href="/qmap\\?'
'lat=(-?[0-9]{1,2}\\.[0-9]{4})&'
'lon=(-?[0-9]{1,3}\\.[0-9]{4})&z=15" '
'target="_blank">([,0-9]+)\'</a> </b>$'
),
('saddleLat', 'saddleLng', 'saddleElev')
),
'Prominence': (
re.compile('^([,0-9]+)\'$'),
('prominence',)
),
'Line Parent': (
peakNameRegExp,
('lineParent',)
),
'Isolation': (
re.compile('^([0-9]+\\.[0-9]{2})$'),
('isolation',)
),
'Proximate Parent': (
peakNameRegExp,
('proximateParent',)
),
'State': (
re.compile('^([A-Z]{2}(?:, [A-Z]{2})*)$'),
('state',)
),
'Counties': (
re.compile(
'^([A-Z][a-z]+(?: [A-Z][a-z]+)*(?: &'
' [A-Z][a-z]+(?: [A-Z][a-z]+)*)*)$'
),
('counties',)
),
'Quadrangle': (
re.compile(
'^<a href="/quad\\?q=([0-9]+)" target="_blank">([A-Za-z]+(?: [A-Za-z]+)*)</a>'
' - <a href="/qmap\\?Q=\\1" target="_blank">Map</a>$'
),
('quadId', 'quadName')
),
'Section': (
re.compile('^(?:(?:([1-9][0-9]?)\\. )?([A-Z][a-z]+(?:[- ][A-Z]?[a-z]+)+'
'(?: [1-9][0-9]*)?))?$'),
('sectionNumber', 'sectionName')
),
}
numPeaks = {
# DPS: My list has 99 peaks. Missing from the LoJ list are
# (1) the four Mexican peaks, one of which (Cerro Pescadores) is now delisted
# (2) the three delisted US peaks (Maturango, Argus, and Navajo)
'DPS': 92,
'GBP': 115,
'HPS': 281,
'LPC': 86,
'NPC': 73,
'OGUL': 63,
'SPS': 246, # Pilot Knob (North) is missing from the LoJ SPS list.
}
# Errata for the LoJ SPS list (https://listsofjohn.com/customlists?lid=60):
#
# - Mount Morgan (13,001') (known as Mount Morgan (North) on the SPS list) is listed in
# section 17 (Bear Creek Spire Area). It should be in section 18 (Mono Creek to Mammoth).
#
# - Pilot Knob (12,245') (known as Pilot Knob (North) on the SPS list) is entirely omitted.
# It should be in section 16 (Humphreys Basin and West).
#
NAME_MAP = {
# Desert Peaks Section:
'16922': ('Avawatz Mountains HP', 'Avawatz Peak'),
'59670': ('Canyon Benchmark', 'Canyon Point'),
'17444': ('Eagle Benchmark', 'Eagle Mountains HP'),
'16947': ('Glass Mountain HP', 'Glass Mountain'),
'61927': ('Jacumba Benchmark', 'Jacumba Mountain'),
'58232': ('Mitchell Benchmark', 'Mitchell Point'),
'63948': ('Mopah Peaks, East', 'Mopah Point'),
'16692': ('Mount Jefferson-South Summit', 'Mount Jefferson'),
'16911': ('New York Two Benchmark', 'New York Mountains HP'),
'16861': ('Nopah Benchmark', 'Nopah Range HP'),
'59903': ('Pahrump Benchmark', 'Pahrump Point'),
'61031': ('Rosa Benchmark', 'Rosa Point'),
'58208': ('Sandy Benchmark', 'Sandy Point'),
'17198': ('Spectre Benchmark', 'Spectre Point'),
'17315': ('Resting Spring Range HP', 'Stewart Point'),
'75458': ('Superstition Peak', 'Superstition Mountain'),
# Hundred Peaks Section:
'57892': ('Black Mountain', 'Black Mountain #5'),
'57239': ('Cannel Benchmark', 'Cannel Point'),
'60072': ('Granite Mountain', 'Granite Mountain #2'),
'60327': ('Little Berdoo Benchmark', 'Little Berdoo Peak'),
'59130': ('Monument Peak', 'Monument Peak #1'),
'60146': ('Inspiration Benchmark', 'Mount Inspiration'),
'58667': ('Rabbit Peak', 'Rabbit Peak #2'),
'160158': ('Toro West Peak', 'Toro Peak West'),
'60890': ('Warren Benchmark', 'Warren Point'),
# Great Basin Peaks / Nevada Peaks Club:
'40688': ('Peak 12305', 'Baker Peak East'),
'16757': ('Granite Peak', 'Granite Peak (Humboldt)'),
'40715': ('Granite Peak', 'Granite Peak (Snake Range)'),
'16887': ('Granite Peak', 'Granite Peak (Washoe)'),
'16801': ('Mount Grant', 'Mount Grant (West)'),
'40691': ('Mount Jefferson-North Summit', 'Mount Jefferson North'),
'17032': ('Muddy Mountains HP', 'Muddy Benchmark'),
'40712': ('Peak 11340', 'Thomas Peak'),
# Sierra Peaks Section:
'56234': ('Coyote Peaks, East', 'Coyote Peaks'),
'32590': ('Devils Crags', 'Devil\'s Crag #1'),
'32410': ('Mount Morgan', 'Mount Morgan (North)'),
'17177': ('Mount Morgan', 'Mount Morgan (South)'),
'32463': ('Mount Stanford', 'Mount Stanford (North)'),
'32248': ('Mount Stanford', 'Mount Stanford (South)'),
'32662': ('Pilot Knob', 'Pilot Knob (North)'),
'59191': ('Pilot Knob', 'Pilot Knob (South)'),
'17179': ('Pyramid Peak', 'Pyramid Peak (North)'),
'32483': ('Pyramid Peak', 'Pyramid Peak (South)'),
'32622': ('Sawtooth Peak', 'Sawtooth Peak (North)'),
'57438': ('Sawtooth Peak', 'Sawtooth Peak (South)'),
'57076': ('Sierra Buttes, North', 'Sierra Buttes'),
'56326': ('Three Sisters, East', 'Three Sisters'),
# Tahoe Ogul Peaks:
'56905': ('Silver Peak', 'Silver Peak (Desolation)'),
'160214': ('Silver Peak', 'Silver Peak Southwest'),
'56930': ('Twin Peaks, East', 'Twin Peaks'),
# Other Sierra Peaks:
'32721': ('Peak 12076', 'Duck Lake Peak'),
'56086': ('Peak 11446', 'Lost World Peak'),
'57026': ('Maggies Peaks, South', 'Maggies Peaks South'),
'56503': ('Peak 9970', 'Mariuolumne Dome'),
'32307': ('Peak 13464', 'Northwest Lamarck'),
'56427': ('Peak 10213', 'Peak 3113m'),
'56335': ('Peak 10570', 'Peak 3222m'),
'56330': ('Peak 10597', 'Peak 3230m'),
'56004': ('Peak 11712', 'Peak 3560m+'),
'32393': ('Peak 13074', 'Rosco Peak'),
'32731': ('Shepherd Crest, East', 'Shepherd Crest East'),
'56257': ('Silver Peak', 'Silver Peak Northeast'),
'56489': ('Peak 9980', 'Sirretta Peak North'),
'56141': ('Volcanic Ridge, East', 'Volcanic Ridge East'),
'56075': ('Volcanic Ridge, West', 'Volcanic Ridge West'),
'56098': ('White Mountain', 'White Mountain (Sonora Pass)'),
'32725': ('White Mountain', 'White Mountain (Tioga Pass)'),
# Other California Peaks:
'214878': ('Maguire Peaks, East', 'Maguire Peaks East'),
'68664': ('Maguire Peaks, West', 'Maguire Peaks West'),
'68054': ('North Peak', 'Montara Mountain'),
'171141': ('Peak 2620', 'Monument Peak'),
'214916': ('South Peak', 'Mount Saint Helena South'),
'66302': ('East Peak', 'Mount Tamalpais East Peak'),
'66499': ('Middle Peak', 'Mount Tamalpais Middle Peak'),
'17267': ('Mount Tamalpais', 'Mount Tamalpais West Peak'),
'66173': ('Peak 2620', 'Peak 2600+'),
'16722': ('South Yolla Bolly', 'South Yolla Bolly Mountain'),
'70389': ('Twin Peaks, South', 'Twin Peaks'),
# Other Western Peaks:
'16737': ('Mount Saint Helens', 'Mount St. Helens'),
'140352': ('South Peak', 'Mount Nebo South'),
}
@classmethod
def normalizeName(self, name, peak_id=None):
if name[0] == '"':
assert name[-1] == '"'
name = name[1:-1]
elif name[0] in '123456789':
name = 'Peak ' + name
i = name.find(', Mount')
if i > 0:
if i + 7 == len(name):
name = 'Mount ' + name[:-7]
elif name[i + 7] == '-':
name = 'Mount ' + name[:i] + name[i + 7:]
elif name.endswith(', The'):
name = 'The ' + name[:-5]
elif name.endswith(' (HP)'):
name = name[:-4] + 'HP'
if peak_id:
old_and_new_name = self.NAME_MAP.get(peak_id)
if old_and_new_name:
if name != old_and_new_name[0]:
out('LoJ name ({}) not as expected ({})', name, old_and_new_name[0])
return old_and_new_name[1]
return name
elevationMap = {
# LoJ SPS Elevation Adjustments:
#
# - Adams Peak (8,199) vs 8,197 (topo):
# "East Summit determined higher than west by 2 feet using photo pixel analysis."
# [https://listsofjohn.com/peak/17460]
#
# - Basin Mountain (13,190) vs 13,181 (topo):
# "East Summit is higher. Elevation is interpolation of spot 13181 and next highest contour at 13200."
# [https://listsofjohn.com/peak/32365]
#
# - Mount Baxter (13,140) vs 13,136 (4004m) (topo):
# "This location shown higher than 4004m spot elevation on historical maps and appears higher from
# photographs. Elevation is estimated."
# [https://listsofjohn.com/peak/32376]
#
# - Mount Mills (13,460) vs 13,451 (topo):
# "This summit observed higher than location to the north with spot elevation 13,451' -
# contour missing from map."
# [https://listsofjohn.com/peak/32310]
#
# - Mount Morrison (12,296) vs 12,277 (3742m) (topo):
# Perhaps the 3742m spot elevation was misread as 3748m? The 3600m contour passes through
# the 2 in such a way that it may look like an 8 at first glance.
#
# Or the 3742m spot elevation from the 1983 1:24k topo was converted to feet, rounded down,
# and then 20 feet (half of a typical 40-foot contour interval) were added because the more
# recent 1994 topo doesn't show the spot elevation (even though the contour interval is 20
# meters, not 40 feet, and, of course, the highest contour is 3740m, not 3742m).
#
# - Seven Gables (13,074) vs 13,075 (15' topo)
# "13,080+40' contour on map is erroneous. NW summit is highest and shown as 3,985m on 1:100k map."
# [https://listsofjohn.com/peak/32383]
# The 1978/1988 Bishop 1:100,000 topo does indeed show a spot elevation of 3985m for Seven Gables,
# and 3985m = 13,074'. However, this topo says that it was "Compiled from USGS 1:62 500-scale
# topographic maps dated 1949-1962" and "Elevations shown to the nearest meter". So it seems likely
# that the elevation for Seven Gables was taken from the 15' topo which shows a spot elevation of
# 13,075', converted to meters (3985.26m), and rounded to the nearest meter. Converting this
# rounded value back to feet and rounding down to the nearest foot gives the elevation used by LoJ
# which is one foot less than that on the 15' topo. Thus, the elevation shown on the 15' topo
# seems a sliver more accurate to me.
#
# - South Guard
# The Mt. Brewer 7.5' topos all show a spot elevation of 4033m = 13231.6' which, by LoJ's standard
# of rounding down, should be listed as 13,231', but LoJ uses 13,232'.
# The Mount Whitney 15' topos all show a spot elevation of 13,224'.
# The 1978/1990 Mount Whitney 1:100,000 topo doesn't show a spot elevation.
# The Fresno 1:250,000 maps also don't show a spot elevation.
# However, the 1907-1937 Mount Whitney 30' (1:125,000) quads show a spot elevation of 13,232' for
# the peak directly east of South Guard Lake! Did LoJ get the elevation from one of these maps?
# I added support for 30' quads so I don't have to make an adjustment for this peak.
'17460': ( 8199, 8197 ), # Adams Peak
'32365': (13190, 13181 ), # Basin Mountain
'32339': (13281, 4048.0), # Deerhorn Mountain: didn't round down, 4048m = 13280.8' [1]
'32333': (13327, 4062.0), # Joe Devel Peak: didn't round down, 4062m = 13326.8' [2]
'32376': (13140, 4004.0), # Mount Baxter
'32361': (13186, 4019.0), # Mount Hitchcock: didn't round down, 4019m = 13185.7' [2]
'32310': (13460, 13451 ), # Mount Mills
'32646': (12296, 3742.0), # Mount Morrison
'32311': (13422, 4091.0), # Mount Newcomb: didn't round down, 4091m = 13421.9' [2]
'17720': (14088, 4294.0), # Mount Russell: didn't round down, 4294m = 14087.9' [2]
'32383': (13074, 13075 ), # Seven Gables
'32414': (12976, 3955.0), # Temple Crag: didn't round down, 3955m = 12975.7' [3]
'32250': (13947, 4251.0), # Trojan Peak: didn't round down, 4251m = 13946.85' [4]
'32292': (13563, 4134.0), # Tunnabora Peak: didn't round down, 4134m = 13562.99' [2]
# [1] 1993 Mt. Brewer 7.5'
# [2] 1993 Mount Whitney 7.5'
# [3] 1990 Split Mtn. 7.5'
# [4] 1993 Mt. Williamson 7.5'
# LoJ DPS Elevation Adjustments:
#
# - Boundary Peak
# The 7.5' and 15' topos show a spot elevation of 13,140'.
# The 1:250,000 topos show either a spot elevation of 13,145' or no spot elevation.
# The 1:100,000 topos show a spot elevation of 4005m (13,140').
# So how does LoJ get 13,143'?
#
# - Needle Peak
# The 7.5' topo shows a spot elevation of 1768.8m = 5803.15'
# The 15' topos show a spot elevation of 5,805'
# The 1:100,000 topos show a spot elevation of 1769m = 5803.8'
# The 1:250,000 topos show spot elevations of either 5,782' or 5,805'
# How does LoJ get 5,802'?
# On the 7.5' topo, the top of the second 8 of the spot elevation is partially cut off by the
# old Death Valley National Monument boundary line. Perhaps it was misread as a 6?
# 1768.6m does round down to 5,802'
#
# - Stepladder Mountains HP
# "This location is higher than contour with spot elevation 892m. Elevation is interpolation of
# spot elevation and next higher contour." (892m + 900m) / 2 / 0.3048 m/ft = 2939.6 ft
# [https://listsofjohn.com/peak/65349]
'17755': (13143, 13140 ), # Boundary Peak
'59286': ( 6169, 6168 ), # East Ord Mountain
'59803': ( 5802, 1768.8), # Needle Peak
'17121': ( 5325, 1623.0), # Old Woman Mountains HP
'17198': ( 4483, 4482 ), # Spectre Point
'65349': ( 2939, 895.0), # Stepladder Mountains HP
# Other LoJ Elevation Adjustments:
#
# - Eagle Peak (GBP)
# All the 7.5' topos on topoView show a spot elevation of 9,892'.
# There aren't any 15' topos available on topoView for that area.
# The 1:250,000 topos show a spot elevation of either 9,892' or 9,883'.
# The 1:100,000 topo doesn't show a spot elevation. It doesn't even name the peak.
# The highest contour is at 3000m, and the interval is 50m.
# So the average is 3025m which is 9,924' rounded down.
# I'm guessing there's a 7.5' topo that doesn't show a spot elevation?
# In that case the highest contour is at 9880', and the interval is 40'.
# So the average would be 9900'.
#
# - Kumiva Peak (GBP / NPC)
# The LoJ page for Kumiva Peak has the following note: "1:250k map has spot elevation 8237,
# consistent with 2511 meters on 1:100k map." [https://listsofjohn.com/peak/16838]
# 8237' is approximately 2510.6m and thus rounds to 2511m, and 2511m is approximately 8238.2'
# and thus rounds to 8238'. However, the 1:100k map was "Compiled from USGS 1:24 000 and
# 1:62 500-scale topographic maps dated 1964-1981", and the 1:62,500-scale topo shows a spot
# elevation of 8237'. So 8237' seems more correct than 8238'.
#
# - Verdi Peak (NPC)
# "This contour [the north summit] determined higher than contour with spot elevation 11,074'.
# Elevation [11,077'] is interpolation of spot elevation and next higher contour [11,080']."
# [https://listsofjohn.com/peak/40725]
'16762': ( 9900, 9892 ), # GBP Eagle Peak
'16838': ( 8238, 8237 ), # GBP Kumiva Peak
'40725': (11077, 11074 ), # NPC Verdi Peak
'56325': (10615, 10616 ), # OSP Mount Ian Campbell
'46804': ( 5735, 1748.0), # OWP Billy Goat Peak: didn't round down
}
saddleElevationMap = {
'16689': ( 6580, 6590 ), # DPS Humphreys Peak: maybe used 6600'-40'/2, but the interval is only 20'
'16831': ( 3582, 1092.1), # DPS Kingston Peak: maybe used 1092m
'32333': (12894, 3930.0), # SPS Joe Devel Peak: didn't round down
'32361': (12697, 3870.0), # SPS Mount Hitchcock: didn't round down
'56335': (10138, 3090.0), # OSP Peak 3222m: didn't round down
'70792': ( 189, 140 ), # OCAP Ring Mountain: saddle is between 120' and 160'
'70366': ( 663, 662 ), # OCAP Slacker Hill: saddle is between 650' and 675'
}
def postProcess(self, peakListId=None):
self.elevation = str2IntLoJ(self.elevation, 'Elevation', self.name)
self.saddleElev = str2IntLoJ(self.saddleElev, 'Saddle elevation', self.name)
self.prominence = str2IntLoJ(self.prominence, 'Prominence', self.name)
assert self.prominence == self.elevation - self.saddleElev
if isinstance(self.state, str):
self.state = self.state.split(', ')
if isinstance(self.counties, str):
self.counties = self.counties.split(' & ')
self.isolation = float(self.isolation)
self.saddleLat = float(self.saddleLat)
self.saddleLng = float(self.saddleLng)
self.name = self.normalizeName(self.name, self.id)
self.lineParent = self.normalizeName(self.lineParent)
self.proximateParent = self.normalizeName(self.proximateParent)
adjElev = self.elevationMap.get(self.id)
if adjElev is not None:
oldElev, adjElev = adjElev
if self.elevation != oldElev:
out('LoJ elevation ({}) not as expected ({}) for {}',
self.elevation, oldElev, self.name)
if isinstance(adjElev, float):
adjElev = toFeetRoundDown(adjElev)
elevDiff = adjElev - self.elevation
assert elevDiff != 0
self.elevation = adjElev
self.prominence += elevDiff
self.elevation = ElevationLoJ(self.elevation)
adjElev = self.saddleElevationMap.get(self.id)
if adjElev is not None:
oldElev, adjElev = adjElev
if self.saddleElev != oldElev:
out('LoJ saddle elevation ({}) not as expected ({}) for {}',
self.saddleElev, oldElev, self.name)
if isinstance(adjElev, float):
adjElev = toFeetRoundDown(adjElev)
elevDiff = adjElev - self.saddleElev
assert elevDiff != 0
self.saddleElev = adjElev
self.prominence -= elevDiff
if peakListId == 'NPC':
assert 'NV' in self.state
parentPeakPattern = re.compile(
'^<a href="/(?:(?:peak/([1-9][0-9]*))|'
'(?:mapf\\?lat=-?[0-9]{1,2}\\.[0-9]{1,4}&lon=-?[0-9]{1,3}\\.[0-9]{1,4}))">' +
peakNamePattern + '</a>$'
)
peakFilePatterns = {
"Coords": (
re.compile("^([0-9]{1,2}\\.[0-9]{1,4})N, ([0-9]{1,3}\\.[0-9]{1,4})W"),
("latitude", "longitude")
),
"County": (
re.compile('^<a href="/county/[1-9][0-9]*">([A-Z][a-z]+(?: [A-Z][a-z]+)*)</a>$'),
("counties",)
),
"Elevation": (
re.compile("^ ([1-9][0-9]?(?:,[0-9]{3}|[0-9]?))'$"),
("elevation",)
),
"Isolation": (
re.compile("^([0-9]+\\.[0-9]{2}) miles$"),
("isolation",)
),
"LineParent": (
parentPeakPattern,
("lineParentId", "lineParent")
),
"ProximateParent": (
parentPeakPattern,
("proximateParentId", "proximateParent")
),
"Quad": (
re.compile(
'^<a href="/quad\\?q=([1-9][0-9]*)">((?:Mc)?[A-Z][a-z]+(?: [A-Z]?[a-z]+)*'
'(?: (?:OE)?[SN][WE]| [A-D])?)</a>$'
),
("quadId", "quadName")
),
"Rise": (
re.compile("^([1-9][0-9]?(?:,[0-9]{3}|[0-9]?))'$"),
("prominence",)
),
"Saddle": (
re.compile(
"^<a href=\"/(?:qmap|mapf)\\?"
"lat=(-?[0-9]{1,2}\\.[0-9]{1,4})&"
"lon=(-?[0-9]{1,3}\\.[0-9]{1,4})&z=15\">([,0-9]+)'</a>$"
),
("saddleLat", "saddleLng", "saddleElev")
),
"YDSClass": (
re.compile(
'^ ((?:[1-4]\\+?)|(?:5\\.[0-9](?: A1)?))'
' <a href="/class\\?Id=([1-9][0-9]*)">Discussion</a>'
),
("ydsClass", "id")
),
}
peakFilePatterns["City"] = peakFilePatterns["County"]
peakFileLine1Pattern = re.compile("^<b>" + peakNamePattern + "</b> <b>([A-Z]{2}(?:, [A-Z]{2})*)</b>")
peakFileLabelPattern = re.compile("^[A-Z][A-Za-z]+$")
dataSheetPattern = re.compile("<a href=\"http://www\\.ngs\\.noaa\\.gov/cgi-bin/ds_mark\\.prl\\?"
"PidBox=([A-Z]{2}[0-9]{4})\">Datasheet</a>")
def readPeakFile_DataSheet(self, line):
if "Datasheet" not in line:
return
if "(No Datasheet)" in line:
return
if self.dataSheet is not None:
err("{} More than one datasheet!", self.fmtIdName)
m = self.dataSheetPattern.search(line)
if m is None:
err("{} Datasheet pattern doesn't match: {}", self.fmtIdName, line)
self.dataSheet = m.group(1)
def readPeakFile_Counties(self, line):
self.counties = []
regExp = self.peakFilePatterns["County"][0]
for html in line.split(" "):
m = regExp.match(html)
if m is None:
err("{} County doesn't match pattern: {}", self.fmtIdName, html)
self.counties.append(m.group(1))
def readPeakFile(self, fileName):
lines = (TableParser(fileName).tables[0][0][0] # First table, first row, first column
.replace("\r", "")
.replace("\n", "")
.replace(" <a><img></a>", "")
.replace("<small>", "").replace("</small>", "")
.replace("<font>", "").replace("</font>", "")
.replace("<div>", "").replace("</div>", "")
.split("<br>"))
line = lines.pop(0)
m = self.peakFileLine1Pattern.match(line)
if m is None:
err("{} Line 1 doesn't match pattern: {}", self.fmtIdName, line)
self.name, self.state = m.groups()
self.state = self.state.split(", ")
self.country = ["US"]
self.ydsClass = None
self.dataSheet = None
self.landManagement = []
getLand = False
self.readPeakFile_DataSheet(line)
for line in lines:
if getLand:
if line.startswith("YDS Class:"):
getLand = False
else:
line = RE.htmlTag.sub("", line)
if line == "Submit YDS Class Rating":
getLand = False
break
if line.startswith("US Steepness Rank:"):
getLand = False
else:
if line.endswith((" County Highpoint", " City Highpoint")):
continue
self.landManagement.append(line)
continue
if ":" not in line:
line = RE.htmlTag.sub("", line)
if line == "Submit YDS Class Rating":
break
continue
label, value = line.split(":", 1)
label = label.replace(" ", "")
m = self.peakFileLabelPattern.match(label)
if m is None:
log("{} Skipping label \"{}\"", self.fmtIdName, label)
continue
if label == "Isolation":
getLand = True
pattern = self.peakFilePatterns.get(label)
if pattern is None:
if label == "Counties":
self.readPeakFile_Counties(value)
elif label == "AlternateNames":
self.readPeakFile_DataSheet(value)
continue
pattern, attributes = pattern
m = pattern.match(value)
if m is None:
if label in ("LineParent", "ProximateParent"):
pattern, attributes = self.columnMap[label[:-6] + " Parent"]
m = pattern.match(value)
if m is None:
log("{} {} doesn't match pattern: {}", self.fmtIdName, label, value)
continue
values = m.groups()
assert len(attributes) == len(values)
for attr, value in zip(attributes, values):
setattr(self, attr, value)
if label == "YDSClass":
break
self.postProcess()
LandMgmtAreaLoJ.addAll(self)
def compare(self, other):
for attr in ("id", "name", "elevation", "prominence", "saddleLat", "saddleLng", "saddleElev",
"lineParent", "proximateParent", "isolation", "state", "counties", "quadId", "quadName"):
v1 = getattr(self, attr)
v2 = getattr(other, attr)
if v1 != v2:
out("{} {} doesn't match: {} != {}", self.fmtIdName, attr, v1, v2)
def copyListFields(self, other):
for attr in ("sectionNumber", "sectionName"):
setattr(self, attr, getattr(other, attr, None))
class PeakBB(object):
classId = "BB"
classTitle = "Bob Burd"
classAttrId = "bobBurdId"
classAttrPeak = "bobBurdPeak"
patterns = (
(
re.compile("^<a href=/dayhikes/peak/([1-9][0-9]*)>([ #\\(\\),0-9A-Za-z]+)</a>$"),
("id", "name")
),
(
re.compile("^([1-9][0-9]?(?:,[0-9]{3}|[0-9]?))ft$"),
("elevation",)
),
(
re.compile("^([0-9]{1,2}\\.[0-9]{1,4})N (-[0-9]{1,3}\\.[0-9]{1,4})W$"),
("latitude", "longitude"),
),
(
re.compile("^prom\\. - ([1-9][0-9]?(?:,[0-9]{3}|[0-9]?))ft$"),
("prominence",)
),
)
linkPattern = re.compile("^<a href=//([^0-9]+)([1-9][0-9]*)>([A-Z][A-Za-z]*)</a>$")
linkMap = {
"LoJ": ("listsOfJohnId", "www.listsofjohn.com/peak/"),
"PB": ("peakbaggerId", "peakbagger.com/peak.aspx?pid="),
"SP": ("summitpostId", "www.summitpost.org/mountain/"),
}
numPeaks = {
"GBP": 120,
}
ListAdditions = {
"GBP": ((
("id", "24904"),
("name", "Duffer Peak South"),
("latitude", "41.6574"),
("longitude", "-118.7323"),
("elevation", 9428),
("prominence", 4139),
("listsOfJohnId", "16781"), # Pine Forest Range HP
("peakbaggerId", "3322"),
("summitpostId", "518638"),
),(
("id", "33951"),
("name", "Baker Peak East"),
("latitude", "38.9687"),
("longitude", "-114.3092"),
("elevation", 12305),
("prominence", 496),
("listsOfJohnId", "40688"), # Peak 12305
("peakbaggerId", "3573"),
("summitpostId", "153395"),
),(
("id", "34060"),
("name", "Morey Peak North"),
("latitude", "38.6305"),
("longitude", "-116.2859"),
("elevation", 10260),
("prominence", 2600),
("listsOfJohnId", "17187"), # Hot Creek Range HP
("peakbaggerId", "34519"),
("summitpostId", "577683"),
),(
("id", "34255"),
("name", "Chocolate Peak"),
("latitude", "39.3532"),
("longitude", "-119.8970"),
("elevation", 9402),
("prominence", 262),
("listsOfJohnId", "41109"),
("peakbaggerId", "30104"),
("summitpostId", "351277"),
),(
("id", "59012"),
("name", "Bull Mountain"),
("latitude", "41.9105"),
("longitude", "-113.3658"),
("elevation", 9940),
("prominence", 3744),
("listsOfJohnId", "16828"),
("peakbaggerId", "3440"),
("summitpostId", "183282"),
),
),
}
ListDeletions = {
}
PeakMods = {
"6259": (("name", "Mount Jefferson"),), # Mount Jefferson-South (11,941')
"9008": (("name", "Mount Grant (West)"),), # Mount Grant (11,300')
"10043": (("name", "Granite Peak (Washoe)"),), # Granite Peak (8,980')
"10219": (("name", "Granite Peak (Humboldt)"),), # Granite Peak (9,732')
"10287": (("name", "Mount Jefferson North"),), # Mount Jefferson-North (11,814')
}
@classmethod
def normalizeName(self, name):
if name.endswith(", Mount"):
name = "Mount " + name[:-7]
elif name.endswith(" BM"):
name = name[:-2] + "Benchmark"
elif name.endswith(" Ridge"):
name = name[:-6]
return name
def __init__(self):
self.listsOfJohnId = None
self.peakbaggerId = None
self.summitpostId = None
@classmethod
def getPeaks(self, peakListId):
def str2int(s):
return int(s) if len(s) < 4 else int(s[:-4]) * 1000 + int(s[-3:])
fileName = "data/peaklists/{}/bb.html".format(peakListId.lower())
if not os.path.exists(fileName):
return []
additions = self.ListAdditions.get(peakListId, ())
deletions = self.ListDeletions.get(peakListId, ())
peaks = []
htmlFile = open(fileName)
for line in htmlFile:
if line[:11] != " PMarker":
continue
lines = (line.split("\"")[1]
.replace(" style='font-size:9px;color:gray'", "")
.replace("<span>", "").replace("</span>", "")
.split("<br>")[:5])
links = lines.pop().split(" - ")[1:]
peak = PeakBB()
peaks.append(peak)
for line, (regExp, attributes) in zip(lines, self.patterns):
m = regExp.match(line)
if m is None:
err("Line doesn't match pattern: {}", line)
values = m.groups()
assert len(attributes) == len(values)
for attr, value in zip(attributes, values):
setattr(peak, attr, value)
peak.elevation = str2int(peak.elevation)
peak.prominence = str2int(peak.prominence)
peak.name = self.normalizeName(peak.name)
for line in links:
m = self.linkPattern.match(line)
if m is None:
err("Link doesn't match pattern: {}", line)
linkPrefix, peakId, siteAbbr = m.groups()
attr, expectedPrefix = self.linkMap[siteAbbr]
if linkPrefix != expectedPrefix:
err("Unexpected link prefix: {}", linkPrefix)
setattr(peak, attr, peakId)
if peak.id in deletions:
del peaks[-1]
continue
mods = self.PeakMods.get(peak.id)
if mods is not None:
for attr, value in mods:
setattr(peak, attr, value)
htmlFile.close()
for addition in additions:
peak = PeakBB()
peaks.append(peak)
for attr, value in addition:
setattr(peak, attr, value)
name2peak = {}
for peak in peaks:
peak.elevation = ElevationLoJ(peak.elevation)
peakInMap = name2peak.setdefault(peak.name, peak)
if peakInMap is not peak:
err("BB name {} used for both ID {} ({}') and ID {} ({}')!",
peak.name,
peakInMap.id,
peakInMap.elevation,
peak.id,
peak.elevation)
assert len(peaks) == self.numPeaks[peakListId]
return name2peak
class PeakVR(object):
classId = "VR"
classTitle = "Vulgarian Ramblers"
classAttrId = "vulgarianRamblersId"
classAttrPeak = "vulgarianRamblersPeak"
columnMap = {
"Rank": (
re.compile("^([1-9][0-9]*)?$"),
("rank",)
),
"Peak Name": (
re.compile(
"<a id='peak_UID_[1-9][0-9]*' href='\\./peak_detail\\.php\\?peak_name="
"([A-Z][-%0-9A-Za-z]+)'>([- &\\(\\)\\.0-9;A-Za-z]+)</a> "
),
("linkName", "name")
),
"Elevation": (
re.compile(
"^<a +href='[^']+'>(1[234],[0-9]{3})' or ([34][0-9]{3})m"
"(?:<span [^>]+>([ ',0-9a-z]+)</span>)?</a>$"
),
("elevationFeet", "elevationMeters", "elevationTooltip")
),
"Prominence": (
re.compile(
"^([1-9][0-9]{1,2}')|(?:<a [^>]+>((?:300m\\+)|(?:[1-9][0-9]{1,2}'))"
"<span [^>]+>([ '0-9A-Za-z]+)</span></a>)$"
),
("prominence", "promWithTooltip", "promTooltip")
),
}
nameMap = {
"Black Mountain (South)": "Black Mountain",
"CalTech Peak": "Caltech Peak",
"Twin Peaks": "Peak 3981m",
"UTM888455": "Rosco Peak",
}
def postProcess(self):
self.id = None
name = self.name
if name.startswith("“"):
assert name.endswith("”")
name = name[7:-7]
self.name = name = name.replace("’", "'")
if name.startswith("Mt. "):
name = name[4:]
self.name = "Mount " + name
name = RE.nonAlphaNum.sub(lambda m: "%{:02x}".format(ord(m.group())), name)
if name != self.linkName:
err("Unexpected link name '{}' for '{}'", self.linkName, self.name)
feet = self.elevationFeet
feet = int(feet[:-4]) * 1000 + int(feet[-3:])
if toMeters(feet) != int(self.elevationMeters):
err("Elevation in feet ({}) != elevation in meters ({}) for '{}'",
self.elevationFeet, self.elevationMeters, self.name)
self.elevation = ElevationVR(feet)
if self.prominence is None:
self.prominence = self.promWithTooltip
self.name = self.nameMap.get(self.name, self.name)
if self.rank is not None:
self.rank = int(self.rank)
def __str__(self):
return '<td><a href="http://vulgarianramblers.org/peak_detail.php?peak_name={}">{}</a></td>\n'.format(
self.linkName, "VR" if self.rank is None else "#" + str(self.rank))
@classmethod
def readTable(self, fileName, numPeaks, searchStr=None):
table = TableReader(fileName,
tableAttributes="id='peak_list_ID' class=\"peak_list\" align=\"center\"")
if searchStr is not None:
table.readUntil(searchStr, discard=True)
table.readUntil("</tr>", discard=True)
row = table.__next__()
columns = []
for colNum, colStr in enumerate(row):
colStr = RE.htmlTag.sub("", colStr)
colStr = RE.whitespace.sub("\n", colStr)
colStr = colStr[:colStr.find("\n")]
col = self.columnMap.get(colStr, None)
if col is None:
table.colNum = colNum + 1
table.err("Unrecognized column name:\n{}", colStr)
columns.append(col)
peaks = []
for row in table:
if len(row) != len(columns):
table.err("Unexpected number of columns")
peak = self()
for colNum, (colStr, (regexp, attributes)) in enumerate(zip(row, columns)):
m = regexp.match(colStr)
if m is None:
table.colNum = colNum + 1
table.err("Doesn't match expected pattern:\n{}", colStr)
if attributes is None:
assert regexp.groups == 0
else:
values = m.groups()
assert len(attributes) == len(values)
for attr, value in zip(attributes, values):
setattr(peak, attr, value)
peak.postProcess()
peaks.append(peak)
if len(peaks) == numPeaks:
break
return peaks
@classmethod
def getPeaks(self, peakListId=None):
peaks1 = self.readTable("data/peaklists/vr/ca_13ers.html", 147)
peaks2 = self.readTable("data/peaklists/vr/non_13ers.html", 19, "Marginal Failing Peaks")
peaks3 = self.readTable("data/peaklists/vr/non_13ers.html", 58, "Clearly Failing Peaks")
return peaks1 + peaks2 + peaks3
@classmethod
def getAttr(self, attr, peak):
peak2 = getattr(peak, self.classAttrPeak, None)
if peak2 is None:
return None
return getattr(peak2, attr, None)
def matchElevation(peak, elevation):
line = "{} {:7} {{:7}}".format(peak.fmtIdName, str(elevation))
exactMatches, otherMatches = peak.matchElevation(elevation)
if exactMatches:
return
if otherMatches:
for e, result in otherMatches:
print(line.format(e.getElevation()), result)
else:
for e in peak.elevations:
print(line.format(e.getElevation()), "No match", elevation.diff(e))
class MatchByName(object):
def __init__(self, pl):
name2peak = {}
def put(name, peak):
item = name2peak.get(name)
if not item:
name2peak[name] = peak
elif isinstance(item, list):
item.append(peak)
else:
name2peak[name] = [item, peak]
for section in pl.sections:
for peak in section.peaks:
name = peak.name
if name.startswith("""):
assert name.endswith(""")
name = name[6:-6]
peak.matchName = name
peak.fmtIdName = "{:5} {:24}".format(peak.id, name)
put(name, peak)
if peak.otherName:
put(peak.otherName, peak)
self.name2peak = name2peak
self.id = pl.id
def get(self, peak2):
peak = self.name2peak.get(peak2.name)
if peak:
if not isinstance(peak, list):
peakId = getattr(peak, peak2.classAttrId, None)
if peakId != peak2.id:
err("ID ({}) doesn't match {} ({}) for {}",
peak2.id, peak2.classAttrId, peakId, peak2.name)
setattr(peak, peak2.classAttrPeak, peak2)
return peak
log("Peak name '{}' is not unique!", peak2.name)
return None
def printTitle(title):
width = 60
border = "+" + ("-" * (width - 2)) + "+"
title = "| " + title + (" " * (width - 4 - len(title))) + " |"
print(border)
print(title)
print(border)
def checkElevation(pl, peakClass):
printTitle("Elevations - " + peakClass.classTitle)
for section in pl.sections:
for peak in section.peaks:
elevation = peakClass.getAttr("elevation", peak)
if elevation is not None:
matchElevation(peak, elevation)
def checkProminence(pl, setProm=False):
printTitle("Prominences")
numMatchPb = 0
numMatchLoJ = 0
numMatchBoth = 0
numMatchNone = 0
def getMatchLoJ(prom, promLoJ):
if promLoJ is None:
return "not listed"
if not isinstance(prom, int):
prom = prom.avgFeet(toFeet=toFeetRoundDown)
if prom == promLoJ:
return True
return "{} != {}".format(prom, promLoJ)
def getMatchPb(prom, promPb):
if promPb is None:
return "not listed"
if isinstance(prom, int):
minPb, maxPb = promPb
if prom == (minPb + maxPb) // 2:
return True
return "{} != ({} + {})/2".format(prom, minPb, maxPb)
prom = prom.minMaxPb()
if prom == promPb:
return True
return "{} != {}".format(prom, promPb)
for section in pl.sections:
for peak in section.peaks:
newProm = None
promLoJ = PeakLoJ.getAttr("prominence", peak)
promPb = PeakPb.getAttr("prominence", peak)
for prom in peak.prominences:
matchLoJ = getMatchLoJ(prom, promLoJ)
matchPb = getMatchPb(prom, promPb)
source = None
promObj = None
if not isinstance(prom, int):
promObj = prom
prom = promObj.avgFeet()
if matchLoJ is True:
if matchPb is True:
numMatchBoth += 1
source = "LoJ/Pb"
else:
numMatchLoJ += 1
source = "LoJ"
elif matchPb is True:
numMatchPb += 1
source = "Pb"
if promObj is None:
print(peak.fmtIdName, "{:6} ".format(prom), end=" ")
print("Matches Pb but not LoJ [{}]".format(matchLoJ))
else:
numMatchNone += 1
print(peak.fmtIdName, "{:6} ".format(prom), end=" ")
if promObj is None and promLoJ is not None and promPb is not None:
minPb, maxPb = promPb
avgPb = (minPb + maxPb) // 2
if avgPb == promLoJ and len(peak.prominences) == 1:
if prom == minPb:
newProm = (avgPb, "min")
break
if prom == maxPb:
newProm = (avgPb, "max")
break
print("Matches neither LoJ [{}] nor Pb [{}]".format(matchLoJ, matchPb))
if promObj is not None and source != promObj.source:
print(peak.fmtIdName, "{:6} ".format(prom), end=" ")
print("Source should be {} instead of {}".format(source, promObj.source))
if newProm is not None:
newProm, promType = newProm
if setProm:
out("Setting to {} [LoJ={}, Pb={}]", newProm, promLoJ, promPb)
peak.prominences = [newProm]
else:
out("Matches {}Pb and avgPb == LoJ", promType)
printTitle("Prominences: LoJ/Pb={}, LoJ={}, Pb={}, None={}".format(
numMatchBoth, numMatchLoJ, numMatchPb, numMatchNone))
def checkThirteeners(pl, setVR=False):
printTitle("Thirteeners")
for section in pl.sections:
for peak in section.peaks:
thirteener = False
for e in peak.elevations:
if e.elevationFeet >= 13000:
thirteener = True
break
vr = getattr(peak, PeakVR.classAttrPeak, None)
if vr is None:
if thirteener:
print(peak.fmtIdName, "Missing VR link")
else:
if not thirteener:
print(peak.fmtIdName, "Unexpected VR link")
colVR = peak.column12
if colVR is None:
if setVR:
peak.column12 = vr
else:
if vr.rank != colVR.rank or vr.linkName != colVR.name:
out("{} VR rank/link {}/{} doesn't match {}/{}",
peak.fmtIdName, colVR.rank, colVR.name, vr.rank, vr.linkName)
def checkData(pl, setProm=False, setVR=False):
verbose = pl.id not in ('HPS', 'LPC')
peakClasses = [PeakLoJ, PeakPb]
if pl.id in ('SPS', 'OSP'):
peakClasses.append(PeakVR)
peakMap = MatchByName(pl)
for peakClass in peakClasses:
printTitle("Getting Peaks - " + peakClass.classTitle)
verbose = verbose and peakClass is not PeakVR
numMapped = 0
peaks = peakClass.getPeaks(pl.id)
for peak in peaks:
if peakMap.get(peak):
numMapped += 1
elif verbose:
out("Cannot map '{}' ({})", peak.name, peak.elevation)
out("Mapped {}/{} peaks", numMapped, len(peaks))
for peakClass in (PeakLoJ, PeakPb):
printTitle("Reading Peak Files - " + peakClass.classTitle)
haveList = pl.id in peakClass.numPeaks
for section in pl.sections:
for peak in section.peaks:
peakId = getattr(peak, peakClass.classAttrId, None)
if peakId is None or peakId[0] == "-":
continue
fileName = peakClass.getPeakFileName(peakId)
if not os.path.exists(fileName):
continue
peak2 = peakClass()
peak2.id = peakId
peak2.fmtIdName = peak.fmtIdName
peak2.readPeakFile(fileName)
peak3 = getattr(peak, peakClass.classAttrPeak, None)
if peak3 is None:
if haveList:
out("{} Not in {} list on {}",
peak.fmtIdName, pl.id, peakClass.classId)
peak3 = peakMap.get(peak2)
if peak3 is None:
out("{} Name doesn't match ({})",
peak.fmtIdName, peak2.name)
setattr(peak, peakClass.classAttrPeak, peak2)
elif peak3 is not peak:
out("{} Name matches a different peak ({})!",
peak.fmtIdName, peak2.name)
continue
else:
peak2.compare(peak3)
peak2.checkLand(peak)
if peakClass is PeakPb:
haveFlag = "CC" in peak.flags
wantFlag = (peak2.rangeList[2] == ("126", "Sierra Nevada") and
float(peak.latitude) > 35.36)
if haveFlag != wantFlag:
out("{} Should {}able CC flag", peak.fmtIdName,
"en" if wantFlag else "dis")
else:
peak2.checkDataSheet(peak)
if peak.quad is not None:
if peak.quad != peak2.quadName:
out("{} Quad '{}' != '{}'", peak.fmtIdName,
peak2.quadName, peak.quad)
if peak.country != peak2.country:
out('{} data-country should be "{}" instead of "{}"',
peak.fmtIdName, "/".join(peak2.country), "/".join(peak.country))
if peak.state != peak2.state:
out('{} data-state should be "{}" instead of "{}"',
peak.fmtIdName, "/".join(peak2.state), "/".join(peak.state))
for peakClass in peakClasses:
checkElevation(pl, peakClass)
checkProminence(pl, setProm)
if PeakVR in peakClasses:
checkThirteeners(pl, setVR)
def loadPeakFiles(pl):
loadURLs(getLoadListsFromTable(pl))
# loadURLs(getLoadLists(pl))
def loadPeakListFiles(pl):
loadList_LoJ = []
loadList_Pb = []
listURL_LoJ = 'https://listsofjohn.com/customlists?lid='
listURL_Pb = 'https://peakbagger.com/list.aspx?lid='
listId_LoJ = {
'DPS': 1183,
'GBP': 715,
'HPS': 709,
'LPC': 712,
'NPC': 1411,
'OGUL': 113,
'SPS': 60,
}
listId_Pb = {
'DPS': 5053,
'GBP': 5056,
'HPS': 5052,
'LPC': 5054,
'NPC': 5006,
'OGUL': 5055,
'SPS': 5051,
}
def add(loadList, url, filename):
if os.path.exists(filename):
print(filename, 'already exists')
else:
loadList.append((url, filename))
listId = listId_LoJ.get(pl.id)
if listId:
url = listURL_LoJ + str(listId)
add(loadList_LoJ, url, PeakLoJ.getListFileName(pl.id))
listId = listId_Pb.get(pl.id)
if listId:
url = listURL_Pb + str(listId)
add(loadList_Pb, url, PeakPb.getListFileName(pl.id))
add(loadList_Pb, url + '&pt=opt', PeakPb.getListFileName(pl.id, True))
loadLists = []
if loadList_LoJ: loadLists.append(loadList_LoJ)
if loadList_Pb: loadLists.append(loadList_Pb)
if loadLists:
loadURLs(loadLists)
PeakAttributes = {
"GBP": {
"Arc Dome": (("isEmblem", True),),
"Boundary Peak": (("isEmblem", True),),
"Cache Peak": (("isEmblem", True),),
"Charleston Peak": (("isEmblem", True),),
"Duffer Peak South": (("otherName", "Pine Forest Range HP"),),
"Hayden Peak": (("otherName", "Cinnabar Mountain"),),
"Ibapah Peak": (("isEmblem", True),),
"McCullough Mountain": (("isEmblem", True),),
"Morey Peak North": (("otherName", "Hot Creek Range HP"),),
"Mount Rose": (("isEmblem", True),),
"Navajo Mountain": (("delisted", True),),
"Tule Peak": (("isEmblem", True),),
"Warner Peak": (("isEmblem", True),),
"Wheeler Peak": (("isEmblem", True),),
"White Mountain Peak": (("isEmblem", True),),
"Yellow Peak": (("otherName", "Bald Mountain"),),
},
}
def readListFile(peakListId):
sectionPattern = re.compile("^([1-9][0-9]?)\\. ([- &,;0-9A-Za-z]+)$")
peakPattern = re.compile("^([1-9][0-9]?)\\.([1-9][0-9]?[ab]?) +([#&'0-9;A-Za-z]+(?: [#&'()0-9;A-Za-z]+)*)")
fileName = "data/peaklists/{0}/{0}.txt".format(peakListId.lower())
listFile = open(fileName)
sections = []
expectedSectionNumber = 0
for line in listFile:
expectedSectionNumber += 1
m = sectionPattern.match(line)
if m is None:
err("Section header doesn't match pattern:\n{}", line)
sectionNumber, sectionName = m.groups()
if int(sectionNumber) != expectedSectionNumber:
err("Expected section number {}:\n{}", expectedSectionNumber, line)
if listFile.__next__() != "\n":
err("Expected empty line after section header:\n{}", line)
peaks = []
expectedPeakNumber = ("1", "1a")
sections.append((sectionName, peaks))
for line in listFile:
if line == "\n":
break
m = peakPattern.match(line)
if m is None:
err("Peak line doesn't match pattern:\n{}", line)
sectionNumber, peakNumber, peakName = m.groups()
if int(sectionNumber) != expectedSectionNumber:
err("Expected section number {}:\n{}", expectedSectionNumber, line)
if peakNumber not in expectedPeakNumber:
err("Expected peak number {}:\n{}", " or ".join(expectedPeakNumber), line)
peakId = "{}.{}".format(sectionNumber, peakNumber)
peaks.append((peakId, peakName))
if peakNumber[-1] == "a":
expectedPeakNumber = (peakNumber[:-1] + "b",)
else:
peakNumber = int(peakNumber[:-1] if peakNumber[-1] == "b" else peakNumber)
peakNumber = str(peakNumber + 1)
expectedPeakNumber = (peakNumber, peakNumber + "a")
listFile.close()
return sections
def createBBMap(peakLists):
bbMap = {}
for pl in peakLists.values():
for section in pl.sections:
for peak in section.peaks:
if peak.bobBurdId is None:
continue
if peak.dataFrom is None:
peakInMap = bbMap.setdefault(peak.bobBurdId, peak)
if peakInMap is not peak:
err("BB ID {} used for both {} {} ({}) and {} {} ({})!",
peak.bobBurdId,
peakInMap.peakList.id,
peakInMap.id,
peakInMap.name,
peak.peakList.id,
peak.id,
peak.name)
return bbMap
def setPeak(peak, peakClass, idMap):
peak2Id = getattr(peak, peakClass.classAttrId, None)
if peak2Id is None:
log("{:5} {:24} Missing {} ID", peak.id, peak.name, peakClass.classId)
peak2 = None
elif peak2Id[0] == "-":
log("{:5} {:24} Skipping provisional {} ID {}", peak.id, peak.name, peakClass.classId, peak2Id)
peak2 = None
else:
fileName = peakClass.getPeakFileName(peak2Id)
if not os.path.exists(fileName):
loadURLs([[(peakClass.getPeakURL(peak2Id), fileName)]])
peak2 = peakClass()
peak2.id = peak2Id
peak2.fmtIdName = "{:5} {:24} {}".format(peak.id, peak.name, peakClass.classId)
peak2.readPeakFile(fileName)
mapPeak = idMap.pop(peak2Id, None)
if mapPeak is None:
log("{} list doesn't include ID {}", peak2.fmtIdName, peak2Id)
else:
peak2.compare(mapPeak)
peak2.copyListFields(mapPeak)
setattr(peak, peakClass.classAttrPeak, peak2)
def printPeaks(pl):
for sectionNumber, section in enumerate(pl.sections, start=1):
if sectionNumber != 1:
print()
print("{}. {}".format(sectionNumber, section.name))
print()
for peak in section.peaks:
out("{:5} {:28} {} {:12} {}", peak.id, peak.name,
peak.listsOfJohnPeak.state[0],
peak.listsOfJohnPeak.counties[0],
peak.peakbaggerPeak.rangeList[-1][1])
def createList(pl, peakLists, peakClass, sectionClass, setLandManagement, verbose=False):
sections = readListFile(pl.id)
peakAttributes = PeakAttributes.get(pl.id, {})
mapPb = {p.id: p for p in PeakPb.getPeaks(pl.id)}
mapLoJ = {p.id: p for p in PeakLoJ.getPeaks(pl.id)}
bbIdMap = createBBMap(peakLists)
bbNameMap = PeakBB.getPeaks(pl.id)
pl.sections = []
for sectionName, sectionPeaks in sections:
section = sectionClass(pl, sectionName)
pl.sections.append(section)
for peakId, peakName in sectionPeaks:
peakBB = bbNameMap.get(peakName)
if peakBB is None:
err("{} {} not found in bbNameMap!", peakId, peakName)
fmtIdName = "{:5} {:24}".format(peakId, peakName)
existingPeak = bbIdMap.get(peakBB.id)
if existingPeak is None:
peak = peakClass(section)
peak.name = peakName
peak.bobBurdId = peakBB.id
peak.listsOfJohnId = peakBB.listsOfJohnId
peak.peakbaggerId = peakBB.peakbaggerId
peak.summitpostId = peakBB.summitpostId
peak.summitpostName = "mountain"
elif existingPeak.peakList is pl:
if verbose:
log("{} Using existing data", fmtIdName)
peak = existingPeak
else:
if verbose:
log("{} Using existing data from {} {}", fmtIdName,
existingPeak.peakList.id, existingPeak.id)
peak = peakClass(section)
peak.dataFrom = existingPeak.fromId()
for i, p in enumerate(existingPeak.dataAlsoPeaks):
if p.peakList is pl:
del existingPeak.dataAlsoPeaks[i]
break
peak.copyFrom(existingPeak)
peak.id = peakId
setPeak(peak, PeakPb, mapPb)
setPeak(peak, PeakLoJ, mapLoJ)
section.peaks.append(peak)
for attr, value in peakAttributes.get(peakName, ()):
setattr(peak, attr, value)
if existingPeak is None:
peakPb = peak.peakbaggerPeak
peakLoJ = peak.listsOfJohnPeak
peak.latitude = peakPb.latitude
peak.longitude = peakPb.longitude
peak.elevations = [peakPb.elevation]
peak.prominences = [peakLoJ.prominence]
if peakBB.elevation != peakLoJ.elevation:
log("{} BB elevation ({}) != LoJ elevation ({})",
fmtIdName, peakBB.elevation, peakLoJ.elevation)
if peakBB.prominence != peakLoJ.prominence:
log("{} BB prominence ({}) != LoJ prominence ({})",
fmtIdName, peakBB.prominence, peakLoJ.prominence)
if peakPb.state != peakLoJ.state:
log("{} Pb state ({}) != LoJ state ({})", fmtIdName,
"/".join(peakPb.state), "/".join(peakLoJ.state))
if peak.country != peakPb.country:
peak.country = peakPb.country
if peak.state != peakPb.state:
peak.state = peakPb.state
if peakLoJ.ydsClass is not None:
peak.grade = peakLoJ.ydsClass
setLandManagement(peak)
else:
if peak.name != peakName:
log("Existing peak name ({}) doesn't match {}!", peak.name, peakName)
fromId = peak.fromId()
if peak.dataFrom is not None and pl.sortkey < existingPeak.peakList.sortkey:
peak.dataFrom = None
peak.dataAlsoPeaks.remove(peak)
peak.dataAlsoPeaks.append(existingPeak)
if peak.dataFrom is None:
for p in peak.dataAlsoPeaks:
if p.dataFrom != fromId:
log('{} Set {} {} data-from="{}"',
fmtIdName, p.peakList.id, p.id, fromId)
else:
alsoList = [p for p in peak.dataAlsoPeaks if p is not peak]
alsoList.append(existingPeak)
for p in alsoList:
if fromId not in p.dataAlso:
log('{} Add "{}" to data-also for {} {}',
fmtIdName, fromId, p.peakList.id, p.id)
section.setDataAttributes()
for peak in mapPb.values():
log("Pb peak {} ({}) not used!", peak.id, peak.name)
for peak in mapLoJ.values():
log("LoJ peak {} ({}) not used!", peak.id, peak.name)
| nightjuggler/peaks | sps_create.py | Python | mit | 99,988 | [
"VisIt"
] | 127146907db4ede29e74ca66fd59ef530d09c5e5cdadddfdd0fcd4cd28f1497e |
#
# @file TestXMLToken.py
# @brief XMLToken unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Michael Hucka <mhucka@caltech.edu>
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/xml/test/TestXMLToken.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestXMLToken(unittest.TestCase):
def test_XMLToken_attribute_add_remove(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
xt1 = libsbml.XMLTriple("name1", "http://name1.org/", "p1")
xt2 = libsbml.XMLTriple("name2", "http://name2.org/", "p2")
xt3 = libsbml.XMLTriple("name3", "http://name3.org/", "p3")
xt1a = libsbml.XMLTriple("name1", "http://name1a.org/", "p1a")
xt2a = libsbml.XMLTriple("name2", "http://name2a.org/", "p2a")
token.addAttr( "name1", "val1", "http://name1.org/", "p1")
token.addAttr(xt2, "val2")
self.assert_( token.getAttributesLength() == 2 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "name1" != token.getAttrName(0) ) == False )
self.assert_( ( "val1" != token.getAttrValue(0) ) == False )
self.assert_( ( "http://name1.org/" != token.getAttrURI(0) ) == False )
self.assert_( ( "p1" != token.getAttrPrefix(0) ) == False )
self.assert_( ( "name2" != token.getAttrName(1) ) == False )
self.assert_( ( "val2" != token.getAttrValue(1) ) == False )
self.assert_( ( "http://name2.org/" != token.getAttrURI(1) ) == False )
self.assert_( ( "p2" != token.getAttrPrefix(1) ) == False )
self.assert_( token.getAttrValue( "name1") == "" )
self.assert_( token.getAttrValue( "name2") == "" )
self.assert_( ( "val1" != token.getAttrValue( "name1", "http://name1.org/") ) == False )
self.assert_( ( "val2" != token.getAttrValue( "name2", "http://name2.org/") ) == False )
self.assert_( ( "val1" != token.getAttrValue(xt1) ) == False )
self.assert_( ( "val2" != token.getAttrValue(xt2) ) == False )
self.assert_( token.hasAttr(-1) == False )
self.assert_( token.hasAttr(2) == False )
self.assert_( token.hasAttr(0) == True )
self.assert_( token.hasAttr( "name1", "http://name1.org/") == True )
self.assert_( token.hasAttr( "name2", "http://name2.org/") == True )
self.assert_( token.hasAttr( "name3", "http://name3.org/") == False )
self.assert_( token.hasAttr(xt1) == True )
self.assert_( token.hasAttr(xt2) == True )
self.assert_( token.hasAttr(xt3) == False )
token.addAttr( "noprefix", "val3")
self.assert_( token.getAttributesLength() == 3 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "noprefix" != token.getAttrName(2) ) == False )
self.assert_( ( "val3" != token.getAttrValue(2) ) == False )
self.assert_( token.getAttrURI(2) == "" )
self.assert_( token.getAttrPrefix(2) == "" )
self.assert_( ( "val3" != token.getAttrValue( "noprefix") ) == False )
self.assert_( ( "val3" != token.getAttrValue( "noprefix", "") ) == False )
self.assert_( token.hasAttr( "noprefix" ) == True )
self.assert_( token.hasAttr( "noprefix", "") == True )
token.addAttr(xt1, "mval1")
token.addAttr( "name2", "mval2", "http://name2.org/", "p2")
self.assert_( token.getAttributesLength() == 3 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "name1" != token.getAttrName(0) ) == False )
self.assert_( ( "mval1" != token.getAttrValue(0) ) == False )
self.assert_( ( "http://name1.org/" != token.getAttrURI(0) ) == False )
self.assert_( ( "p1" != token.getAttrPrefix(0) ) == False )
self.assert_( ( "name2" != token.getAttrName(1) ) == False )
self.assert_( ( "mval2" != token.getAttrValue(1) ) == False )
self.assert_( ( "http://name2.org/" != token.getAttrURI(1) ) == False )
self.assert_( ( "p2" != token.getAttrPrefix(1) ) == False )
self.assert_( token.hasAttr(xt1) == True )
self.assert_( token.hasAttr( "name1", "http://name1.org/") == True )
token.addAttr( "noprefix", "mval3")
self.assert_( token.getAttributesLength() == 3 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "noprefix" != token.getAttrName(2) ) == False )
self.assert_( ( "mval3" != token.getAttrValue(2) ) == False )
self.assert_( token.getAttrURI(2) == "" )
self.assert_( token.getAttrPrefix(2) == "" )
self.assert_( token.hasAttr( "noprefix") == True )
self.assert_( token.hasAttr( "noprefix", "") == True )
token.addAttr(xt1a, "val1a")
token.addAttr(xt2a, "val2a")
self.assert_( token.getAttributesLength() == 5 )
self.assert_( ( "name1" != token.getAttrName(3) ) == False )
self.assert_( ( "val1a" != token.getAttrValue(3) ) == False )
self.assert_( ( "http://name1a.org/" != token.getAttrURI(3) ) == False )
self.assert_( ( "p1a" != token.getAttrPrefix(3) ) == False )
self.assert_( ( "name2" != token.getAttrName(4) ) == False )
self.assert_( ( "val2a" != token.getAttrValue(4) ) == False )
self.assert_( ( "http://name2a.org/" != token.getAttrURI(4) ) == False )
self.assert_( ( "p2a" != token.getAttrPrefix(4) ) == False )
self.assert_( ( "val1a" != token.getAttrValue( "name1", "http://name1a.org/") ) == False )
self.assert_( ( "val2a" != token.getAttrValue( "name2", "http://name2a.org/") ) == False )
self.assert_( ( "val1a" != token.getAttrValue(xt1a) ) == False )
self.assert_( ( "val2a" != token.getAttrValue(xt2a) ) == False )
token.removeAttr(xt1a)
token.removeAttr(xt2a)
self.assert_( token.getAttributesLength() == 3 )
token.removeAttr( "name1", "http://name1.org/")
self.assert_( token.getAttributesLength() == 2 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "name2" != token.getAttrName(0) ) == False )
self.assert_( ( "mval2" != token.getAttrValue(0) ) == False )
self.assert_( ( "http://name2.org/" != token.getAttrURI(0) ) == False )
self.assert_( ( "p2" != token.getAttrPrefix(0) ) == False )
self.assert_( ( "noprefix" != token.getAttrName(1) ) == False )
self.assert_( ( "mval3" != token.getAttrValue(1) ) == False )
self.assert_( token.getAttrURI(1) == "" )
self.assert_( token.getAttrPrefix(1) == "" )
self.assert_( token.hasAttr( "name1", "http://name1.org/") == False )
token.removeAttr(xt2)
self.assert_( token.getAttributesLength() == 1 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "noprefix" != token.getAttrName(0) ) == False )
self.assert_( ( "mval3" != token.getAttrValue(0) ) == False )
self.assert_( token.getAttrURI(0) == "" )
self.assert_( token.getAttrPrefix(0) == "" )
self.assert_( token.hasAttr(xt2) == False )
self.assert_( token.hasAttr( "name2", "http://name2.org/") == False )
token.removeAttr( "noprefix")
self.assert_( token.getAttributesLength() == 0 )
self.assert_( token.isAttributesEmpty() == True )
self.assert_( token.hasAttr( "noprefix" ) == False )
self.assert_( token.hasAttr( "noprefix", "") == False )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt3 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt1a ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt2a ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_attribute_set_clear(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
nattr = libsbml.XMLAttributes()
xt1 = libsbml.XMLTriple("name1", "http://name1.org/", "p1")
xt2 = libsbml.XMLTriple("name2", "http://name2.org/", "p2")
xt3 = libsbml.XMLTriple("name3", "http://name3.org/", "p3")
xt4 = libsbml.XMLTriple("name4", "http://name4.org/", "p4")
xt5 = libsbml.XMLTriple("name5", "http://name5.org/", "p5")
nattr.add(xt1, "val1")
nattr.add(xt2, "val2")
nattr.add(xt3, "val3")
nattr.add(xt4, "val4")
nattr.add(xt5, "val5")
token.setAttributes(nattr)
self.assert_( token.getAttributesLength() == 5 )
self.assert_( token.isAttributesEmpty() == False )
self.assert_( ( "name1" != token.getAttrName(0) ) == False )
self.assert_( ( "val1" != token.getAttrValue(0) ) == False )
self.assert_( ( "http://name1.org/" != token.getAttrURI(0) ) == False )
self.assert_( ( "p1" != token.getAttrPrefix(0) ) == False )
self.assert_( ( "name2" != token.getAttrName(1) ) == False )
self.assert_( ( "val2" != token.getAttrValue(1) ) == False )
self.assert_( ( "http://name2.org/" != token.getAttrURI(1) ) == False )
self.assert_( ( "p2" != token.getAttrPrefix(1) ) == False )
self.assert_( ( "name3" != token.getAttrName(2) ) == False )
self.assert_( ( "val3" != token.getAttrValue(2) ) == False )
self.assert_( ( "http://name3.org/" != token.getAttrURI(2) ) == False )
self.assert_( ( "p3" != token.getAttrPrefix(2) ) == False )
self.assert_( ( "name4" != token.getAttrName(3) ) == False )
self.assert_( ( "val4" != token.getAttrValue(3) ) == False )
self.assert_( ( "http://name4.org/" != token.getAttrURI(3) ) == False )
self.assert_( ( "p4" != token.getAttrPrefix(3) ) == False )
self.assert_( ( "name5" != token.getAttrName(4) ) == False )
self.assert_( ( "val5" != token.getAttrValue(4) ) == False )
self.assert_( ( "http://name5.org/" != token.getAttrURI(4) ) == False )
self.assert_( ( "p5" != token.getAttrPrefix(4) ) == False )
ntriple = libsbml.XMLTriple("test2","http://test2.org/","p2")
token.setTriple(ntriple)
self.assert_( ( "test2" != token.getName() ) == False )
self.assert_( ( "http://test2.org/" != token.getURI() ) == False )
self.assert_( ( "p2" != token.getPrefix() ) == False )
token.clearAttributes()
self.assert_( token.getAttributesLength() == 0 )
self.assert_( token.isAttributesEmpty() != False )
_dummyList = [ nattr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ ntriple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt2 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt3 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt4 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ xt5 ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_chars(self):
token = libsbml.XMLToken("This is text")
self.assert_( token.isElement() == False )
self.assert_( token.isEnd() == False )
self.assert_( token.isStart() == False )
self.assert_( token.isText() == True )
self.assert_( token.isEOF() == False )
self.assert_( ( "This is text" != token.getCharacters() ) == False )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_create(self):
token = libsbml.XMLToken()
self.assert_( token != None )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
triple = libsbml.XMLTriple("attr", "uri", "prefix")
token = libsbml.XMLToken(triple)
self.assert_( token != None )
self.assert_( ( "attr" != token.getName() ) == False )
self.assert_( ( "prefix" != token.getPrefix() ) == False )
self.assert_( ( "uri" != token.getURI() ) == False )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
attr = libsbml.XMLAttributes()
self.assert_( attr != None )
attr.add( "attr2", "value")
token = libsbml.XMLToken(triple,attr)
self.assert_( token != None )
returnattr = token.getAttributes()
self.assert_( ( "attr2" != returnattr.getName(0) ) == False )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_fields(self):
triple = libsbml.XMLTriple("attr", "uri", "prefix")
token = libsbml.XMLToken(triple)
self.assert_( token.isElement() == True )
self.assert_( token.isEnd() == True )
self.assert_( token.isStart() == False )
self.assert_( token.isText() == False )
self.assert_( token.isEOF() == False )
self.assert_( ( "attr" != token.getName() ) == False )
self.assert_( ( "uri" != token.getURI() ) == False )
self.assert_( ( "prefix" != token.getPrefix() ) == False )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_namespace_add(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
self.assert_( token.getNamespacesLength() == 0 )
self.assert_( token.isNamespacesEmpty() == True )
token.addNamespace( "http://test1.org/", "test1")
self.assert_( token.getNamespacesLength() == 1 )
self.assert_( token.isNamespacesEmpty() == False )
token.addNamespace( "http://test2.org/", "test2")
self.assert_( token.getNamespacesLength() == 2 )
self.assert_( token.isNamespacesEmpty() == False )
token.addNamespace( "http://test1.org/", "test1a")
self.assert_( token.getNamespacesLength() == 3 )
self.assert_( token.isNamespacesEmpty() == False )
token.addNamespace( "http://test1.org/", "test1a")
self.assert_( token.getNamespacesLength() == 3 )
self.assert_( token.isNamespacesEmpty() == False )
self.assert_( (token.getNamespaceIndex( "http://test1.org/") == -1) == False )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_namespace_get(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
token.addNamespace( "http://test1.org/", "test1")
token.addNamespace( "http://test2.org/", "test2")
token.addNamespace( "http://test3.org/", "test3")
token.addNamespace( "http://test4.org/", "test4")
token.addNamespace( "http://test5.org/", "test5")
token.addNamespace( "http://test6.org/", "test6")
token.addNamespace( "http://test7.org/", "test7")
token.addNamespace( "http://test8.org/", "test8")
token.addNamespace( "http://test9.org/", "test9")
self.assert_( token.getNamespacesLength() == 9 )
self.assert_( token.getNamespaceIndex( "http://test1.org/") == 0 )
self.assert_( ( "test2" != token.getNamespacePrefix(1) ) == False )
self.assert_( ( "test1" != token.getNamespacePrefix( "http://test1.org/") ) == False )
self.assert_( ( "http://test2.org/" != token.getNamespaceURI(1) ) == False )
self.assert_( ( "http://test2.org/" != token.getNamespaceURI( "test2") ) == False )
self.assert_( token.getNamespaceIndex( "http://test1.org/") == 0 )
self.assert_( token.getNamespaceIndex( "http://test2.org/") == 1 )
self.assert_( token.getNamespaceIndex( "http://test5.org/") == 4 )
self.assert_( token.getNamespaceIndex( "http://test9.org/") == 8 )
self.assert_( token.getNamespaceIndex( "http://testX.org/") == -1 )
self.assert_( token.hasNamespaceURI( "http://test1.org/") != False )
self.assert_( token.hasNamespaceURI( "http://test2.org/") != False )
self.assert_( token.hasNamespaceURI( "http://test5.org/") != False )
self.assert_( token.hasNamespaceURI( "http://test9.org/") != False )
self.assert_( token.hasNamespaceURI( "http://testX.org/") == False )
self.assert_( token.getNamespaceIndexByPrefix( "test1") == 0 )
self.assert_( token.getNamespaceIndexByPrefix( "test5") == 4 )
self.assert_( token.getNamespaceIndexByPrefix( "test9") == 8 )
self.assert_( token.getNamespaceIndexByPrefix( "testX") == -1 )
self.assert_( token.hasNamespacePrefix( "test1") != False )
self.assert_( token.hasNamespacePrefix( "test5") != False )
self.assert_( token.hasNamespacePrefix( "test9") != False )
self.assert_( token.hasNamespacePrefix( "testX") == False )
self.assert_( token.hasNamespaceNS( "http://test1.org/", "test1") != False )
self.assert_( token.hasNamespaceNS( "http://test5.org/", "test5") != False )
self.assert_( token.hasNamespaceNS( "http://test9.org/", "test9") != False )
self.assert_( token.hasNamespaceNS( "http://testX.org/", "testX") == False )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_namespace_remove(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
token.addNamespace( "http://test1.org/", "test1")
token.addNamespace( "http://test2.org/", "test2")
token.addNamespace( "http://test3.org/", "test3")
token.addNamespace( "http://test4.org/", "test4")
token.addNamespace( "http://test5.org/", "test5")
self.assert_( token.getNamespacesLength() == 5 )
token.removeNamespace(4)
self.assert_( token.getNamespacesLength() == 4 )
token.removeNamespace(3)
self.assert_( token.getNamespacesLength() == 3 )
token.removeNamespace(2)
self.assert_( token.getNamespacesLength() == 2 )
token.removeNamespace(1)
self.assert_( token.getNamespacesLength() == 1 )
token.removeNamespace(0)
self.assert_( token.getNamespacesLength() == 0 )
token.addNamespace( "http://test1.org/", "test1")
token.addNamespace( "http://test2.org/", "test2")
token.addNamespace( "http://test3.org/", "test3")
token.addNamespace( "http://test4.org/", "test4")
token.addNamespace( "http://test5.org/", "test5")
self.assert_( token.getNamespacesLength() == 5 )
token.removeNamespace(0)
self.assert_( token.getNamespacesLength() == 4 )
token.removeNamespace(0)
self.assert_( token.getNamespacesLength() == 3 )
token.removeNamespace(0)
self.assert_( token.getNamespacesLength() == 2 )
token.removeNamespace(0)
self.assert_( token.getNamespacesLength() == 1 )
token.removeNamespace(0)
self.assert_( token.getNamespacesLength() == 0 )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_namespace_remove_by_prefix(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
token.addNamespace( "http://test1.org/", "test1")
token.addNamespace( "http://test2.org/", "test2")
token.addNamespace( "http://test3.org/", "test3")
token.addNamespace( "http://test4.org/", "test4")
token.addNamespace( "http://test5.org/", "test5")
self.assert_( token.getNamespacesLength() == 5 )
token.removeNamespace( "test1")
self.assert_( token.getNamespacesLength() == 4 )
token.removeNamespace( "test2")
self.assert_( token.getNamespacesLength() == 3 )
token.removeNamespace( "test3")
self.assert_( token.getNamespacesLength() == 2 )
token.removeNamespace( "test4")
self.assert_( token.getNamespacesLength() == 1 )
token.removeNamespace( "test5")
self.assert_( token.getNamespacesLength() == 0 )
token.addNamespace( "http://test1.org/", "test1")
token.addNamespace( "http://test2.org/", "test2")
token.addNamespace( "http://test3.org/", "test3")
token.addNamespace( "http://test4.org/", "test4")
token.addNamespace( "http://test5.org/", "test5")
self.assert_( token.getNamespacesLength() == 5 )
token.removeNamespace( "test5")
self.assert_( token.getNamespacesLength() == 4 )
token.removeNamespace( "test4")
self.assert_( token.getNamespacesLength() == 3 )
token.removeNamespace( "test3")
self.assert_( token.getNamespacesLength() == 2 )
token.removeNamespace( "test2")
self.assert_( token.getNamespacesLength() == 1 )
token.removeNamespace( "test1")
self.assert_( token.getNamespacesLength() == 0 )
token.addNamespace( "http://test1.org/", "test1")
token.addNamespace( "http://test2.org/", "test2")
token.addNamespace( "http://test3.org/", "test3")
token.addNamespace( "http://test4.org/", "test4")
token.addNamespace( "http://test5.org/", "test5")
self.assert_( token.getNamespacesLength() == 5 )
token.removeNamespace( "test3")
self.assert_( token.getNamespacesLength() == 4 )
token.removeNamespace( "test1")
self.assert_( token.getNamespacesLength() == 3 )
token.removeNamespace( "test4")
self.assert_( token.getNamespacesLength() == 2 )
token.removeNamespace( "test5")
self.assert_( token.getNamespacesLength() == 1 )
token.removeNamespace( "test2")
self.assert_( token.getNamespacesLength() == 0 )
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def test_XMLToken_namespace_set_clear(self):
triple = libsbml.XMLTriple("test","","")
attr = libsbml.XMLAttributes()
token = libsbml.XMLToken(triple,attr)
ns = libsbml.XMLNamespaces()
self.assert_( token.getNamespacesLength() == 0 )
self.assert_( token.isNamespacesEmpty() == True )
ns.add( "http://test1.org/", "test1")
ns.add( "http://test2.org/", "test2")
ns.add( "http://test3.org/", "test3")
ns.add( "http://test4.org/", "test4")
ns.add( "http://test5.org/", "test5")
token.setNamespaces(ns)
self.assert_( token.getNamespacesLength() == 5 )
self.assert_( token.isNamespacesEmpty() == False )
self.assert_( ( "test1" != token.getNamespacePrefix(0) ) == False )
self.assert_( ( "test2" != token.getNamespacePrefix(1) ) == False )
self.assert_( ( "test3" != token.getNamespacePrefix(2) ) == False )
self.assert_( ( "test4" != token.getNamespacePrefix(3) ) == False )
self.assert_( ( "test5" != token.getNamespacePrefix(4) ) == False )
self.assert_( ( "http://test1.org/" != token.getNamespaceURI(0) ) == False )
self.assert_( ( "http://test2.org/" != token.getNamespaceURI(1) ) == False )
self.assert_( ( "http://test3.org/" != token.getNamespaceURI(2) ) == False )
self.assert_( ( "http://test4.org/" != token.getNamespaceURI(3) ) == False )
self.assert_( ( "http://test5.org/" != token.getNamespaceURI(4) ) == False )
token.clearNamespaces()
self.assert_( token.getNamespacesLength() == 0 )
_dummyList = [ ns ]; _dummyList[:] = []; del _dummyList
_dummyList = [ token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ attr ]; _dummyList[:] = []; del _dummyList
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestXMLToken))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| TheCoSMoCompany/biopredyn | Prototype/src/libsbml-5.10.0/src/bindings/python/test/xml/TestXMLToken.py | Python | bsd-3-clause | 24,842 | [
"VisIt"
] | 08411f7e6e6258021970021b2c59e5ca3fcd45f40e2c3de0e8a3dd1311eb3817 |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Provides a class for interacting with KPath classes to
generate high-symmetry k-paths using different conventions.
"""
import itertools
from warnings import warn
import networkx as nx
import numpy as np
from pymatgen.symmetry.kpath import (
KPathBase,
KPathLatimerMunro,
KPathSeek,
KPathSetyawanCurtarolo,
)
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.electronic_structure.core import Spin
__author__ = "Jason Munro"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Jason Munro"
__email__ = "jmunro@lbl.gov"
__status__ = "Development"
__date__ = "March 2020"
class HighSymmKpath(KPathBase):
"""
This class generates path along high symmetry lines in the
Brillouin zone according to different conventions.
The class is designed to be used with a specific primitive
cell setting. The definitions for the primitive cell
used can be found in: Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010.
The space group analyzer can be used to produce the correct
primitive structure
(method get_primitive_standard_structure(international_monoclinic=False)).
Ensure input structure is correct before 'get_kpoints()' method is used.
See individual KPath classes for details on specific conventions.
"""
def __init__(
self,
structure,
has_magmoms=False,
magmom_axis=None,
path_type="setyawan_curtarolo",
symprec=0.01,
angle_tolerance=5,
atol=1e-5,
):
"""
Args:
structure (Structure): Structure object
has_magmoms (boolean): Whether the input structure contains
magnetic moments as site properties with the key 'magmom.'
Values may be in the form of 3-component vectors given in
the basis of the input lattice vectors, in
which case the spin axis will default to a_3, the third
real-space lattice vector (this triggers a warning).
magmom_axis (list or numpy array): 3-component vector specifying
direction along which magnetic moments given as scalars
should point. If all magnetic moments are provided as
vectors then this argument is not used.
path_type (string): Chooses which convention to use to generate
the high symmetry path. Options are: 'setyawan_curtarolo', 'hinuma',
'latimer_munro' for the Setyawan & Curtarolo, Hinuma et al., and
Latimer & Munro conventions. Choosing 'all' will generate one path
with points from all three conventions. Equivalent labels between
each will also be generated. Order will always be Latimer & Munro,
Setyawan & Curtarolo, and Hinuma et al. Lengths for each of the paths
will also be generated and output as a list. Note for 'all' the user
will have to alter the labels on their own for plotting.
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
atol (float): Absolute tolerance used to determine symmetric
equivalence of points and lines on the BZ.
"""
super().__init__(structure, symprec=symprec, angle_tolerance=angle_tolerance, atol=atol)
self._path_type = path_type
self._equiv_labels = None
self._path_lengths = None
self._label_index = None
if path_type != "all":
if path_type == "latimer_munro":
self._kpath = self._get_lm_kpath(has_magmoms, magmom_axis, symprec, angle_tolerance, atol).kpath
elif path_type == "setyawan_curtarolo":
self._kpath = self._get_sc_kpath(symprec, angle_tolerance, atol).kpath
elif path_type == "hinuma":
hin_dat = self._get_hin_kpath(symprec, angle_tolerance, atol, not has_magmoms)
self._kpath = hin_dat.kpath
self._hin_tmat = hin_dat._tmat
else:
if has_magmoms:
raise ValueError("Cannot select 'all' with non-zero magmoms.")
lm_bs = self._get_lm_kpath(has_magmoms, magmom_axis, symprec, angle_tolerance, atol)
rpg = lm_bs._rpg
sc_bs = self._get_sc_kpath(symprec, angle_tolerance, atol)
hin_bs = self._get_hin_kpath(symprec, angle_tolerance, atol, not has_magmoms)
index = 0
cat_points = {}
label_index = {}
num_path = []
self._path_lengths = []
for bs in [lm_bs, sc_bs, hin_bs]:
for key, value in enumerate(bs.kpath["kpoints"]):
cat_points[index] = bs.kpath["kpoints"][value]
label_index[index] = value
index += 1
total_points_path = 0
for seg in bs.kpath["path"]:
total_points_path += len(seg)
for block in bs.kpath["path"]:
new_block = []
for label in block:
for ind in range(
len(label_index) - len(bs.kpath["kpoints"]),
len(label_index),
):
if label_index[ind] == label:
new_block.append(ind)
num_path.append(new_block)
self._path_lengths.append(total_points_path)
self._label_index = label_index
self._kpath = {"kpoints": cat_points, "path": num_path}
self._equiv_labels = self._get_klabels(lm_bs, sc_bs, hin_bs, rpg)
@property
def path_type(self):
"""
Returns:
The type of kpath chosen
"""
return self._path_type
@property
def label_index(self):
"""
Returns:
The correspondance between numbers and kpoint symbols for the
combined kpath generated when path_type = 'all'. None otherwise.
"""
return self._label_index
@property
def equiv_labels(self):
"""
Returns:
The correspondance between the kpoint symbols in the Latimer and
Munro convention, Setyawan and Curtarolo, and Hinuma
conventions respectively. Only generated when path_type = 'all'.
"""
return self._equiv_labels
@property
def path_lengths(self):
"""
Returns:
List of lengths of the Latimer and Munro, Setyawan and Curtarolo, and Hinuma
conventions in the combined HighSymmKpath object when path_type = 'all' respectively.
None otherwise.
"""
return self._path_lengths
def _get_lm_kpath(self, has_magmoms, magmom_axis, symprec, angle_tolerance, atol):
"""
Returns:
Latimer and Munro k-path with labels.
"""
return KPathLatimerMunro(self._structure, has_magmoms, magmom_axis, symprec, angle_tolerance, atol)
def _get_sc_kpath(self, symprec, angle_tolerance, atol):
"""
Returns:
Setyawan and Curtarolo k-path with labels.
"""
kpath = KPathSetyawanCurtarolo(self._structure, symprec, angle_tolerance, atol)
self.prim = kpath.prim
self.conventional = kpath.conventional
self.prim_rec = kpath.prim_rec
self._rec_lattice = self.prim_rec
return kpath
def _get_hin_kpath(self, symprec, angle_tolerance, atol, tri):
"""
Returns:
Hinuma et al. k-path with labels.
"""
bs = KPathSeek(self._structure, symprec, angle_tolerance, atol, tri)
kpoints = bs.kpath["kpoints"]
tmat = bs._tmat
for key in kpoints:
kpoints[key] = np.dot(np.transpose(np.linalg.inv(tmat)), kpoints[key])
bs.kpath["kpoints"] = kpoints
self._rec_lattice = self._structure.lattice.reciprocal_lattice
warn(
"K-path from the Hinuma et al. convention has been transformed to the basis of the reciprocal lattice \
of the input structure. Use `KPathSeek` for the path in the original author-intended basis."
)
return bs
def _get_klabels(self, lm_bs, sc_bs, hin_bs, rpg):
"""
Returns:
labels (dict): Dictionary of equivalent labels for paths if 'all' is chosen.
If an exact kpoint match cannot be found, symmetric equivalency will be
searched for and indicated with an asterisk in the equivalent label.
If an equivalent label can still not be found, or the point is not in
the explicit kpath, its equivalent label will be set to itself in the output.
"""
lm_path = lm_bs.kpath
sc_path = sc_bs.kpath
hin_path = hin_bs.kpath
n_op = len(rpg)
pairs = itertools.permutations(
[{"setyawan_curtarolo": sc_path}, {"latimer_munro": lm_path}, {"hinuma": hin_path}], r=2
)
labels = {"setyawan_curtarolo": {}, "latimer_munro": {}, "hinuma": {}}
for (a, b) in pairs:
[(a_type, a_path)] = list(a.items())
[(b_type, b_path)] = list(b.items())
sc_count = np.zeros(n_op)
for o_num in range(0, n_op):
a_tr_coord = []
for (label_a, coord_a) in a_path["kpoints"].items():
a_tr_coord.append(np.dot(rpg[o_num], coord_a))
for coord_a in a_tr_coord:
for key, value in b_path["kpoints"].items():
if np.allclose(value, coord_a, atol=self._atol):
sc_count[o_num] += 1
break
a_to_b_labels = {}
unlabeled = {}
for (label_a, coord_a) in a_path["kpoints"].items():
coord_a_t = np.dot(rpg[np.argmax(sc_count)], coord_a)
assigned = False
for (label_b, coord_b) in b_path["kpoints"].items():
if np.allclose(coord_b, coord_a_t, atol=self._atol):
a_to_b_labels[label_a] = label_b
assigned = True
break
if not assigned:
unlabeled[label_a] = coord_a
for (label_a, coord_a) in unlabeled.items():
for op in rpg:
coord_a_t = np.dot(op, coord_a)
key = [
key
for key, value in b_path["kpoints"].items()
if np.allclose(value, coord_a_t, atol=self._atol)
]
if key != []:
a_to_b_labels[label_a] = key[0][0] + "^{*}"
break
if key == []:
a_to_b_labels[label_a] = label_a
labels[a_type][b_type] = a_to_b_labels
return labels
@staticmethod
def get_continuous_path(bandstructure):
"""
Obtain a continous version of an inputted path using graph theory.
This routine will attempt to add connections between nodes of
odd-degree to ensure a Eulerian path can be formed. Initial
k-path must be able to be converted to a connected graph. See
npj Comput Mater 6, 112 (2020). 10.1038/s41524-020-00383-7
for more details.
Args:
bandstructure (BandstructureSymmLine): BandstructureSymmLine object.
Returns:
bandstructure (BandstructureSymmLine): New BandstructureSymmLine object with continous path.
"""
G = nx.Graph()
labels = []
for point in bandstructure.kpoints:
if point.label is not None:
labels.append(point.label)
plot_axis = []
for i in range(int(len(labels) / 2)):
G.add_edges_from([(labels[2 * i], labels[(2 * i) + 1])])
plot_axis.append((labels[2 * i], labels[(2 * i) + 1]))
G_euler = nx.algorithms.euler.eulerize(G)
G_euler_circuit = nx.algorithms.euler.eulerian_circuit(G_euler)
distances_map = []
kpath_euler = []
for edge_euler in G_euler_circuit:
kpath_euler.append(edge_euler)
for edge_reg in plot_axis:
if edge_euler == edge_reg:
distances_map.append((plot_axis.index(edge_reg), False))
elif edge_euler[::-1] == edge_reg:
distances_map.append((plot_axis.index(edge_reg), True))
if bandstructure.is_spin_polarized:
spins = [Spin.up, Spin.down]
else:
spins = [Spin.up]
new_kpoints = []
new_bands = {spin: [np.array([]) for _ in range(bandstructure.nb_bands)] for spin in spins}
new_projections = {spin: [[] for _ in range(bandstructure.nb_bands)] for spin in spins}
for entry in distances_map:
if not entry[1]:
branch = bandstructure.branches[entry[0]]
start = branch["start_index"]
stop = branch["end_index"] + 1
step = 1
else:
branch = bandstructure.branches[entry[0]]
start = branch["end_index"]
stop = branch["start_index"] - 1
step = -1
# kpoints
new_kpoints += [point.frac_coords for point in bandstructure.kpoints[start:stop:step]]
# eigenvals
for spin in spins:
for n, band in enumerate(bandstructure.bands[spin]):
new_bands[spin][n] = np.concatenate((new_bands[spin][n], band[start:stop:step]))
# projections
for spin in spins:
for n, band in enumerate(bandstructure.projections[spin]):
new_projections[spin][n] += band[start:stop:step].tolist()
for spin in spins:
new_projections[spin] = np.array(new_projections[spin])
new_labels_dict = {label: point.frac_coords for label, point in bandstructure.labels_dict.items()}
new_bandstructure = BandStructureSymmLine(
kpoints=new_kpoints,
eigenvals=new_bands,
lattice=bandstructure.lattice_rec,
efermi=bandstructure.efermi,
labels_dict=new_labels_dict,
structure=bandstructure.structure,
projections=new_projections,
)
return new_bandstructure
| vorwerkc/pymatgen | pymatgen/symmetry/bandstructure.py | Python | mit | 14,766 | [
"pymatgen"
] | 2642c4c78615ea329383d198d779d50f4b1a87797b957e80b760c8e5aac180f1 |
#!/usr/bin/env python
import numpy
import scipy.linalg
from pyscf import lib
from mpi4pyscf.lib import logger
from mpi4pyscf.tools import mpi
class DistributedDIIS(lib.diis.DIIS):
def _store(self, key, value):
if self._diisfile is None:
if isinstance(self.filename, str):
filename = self.filename + '__rank' + str(mpi.rank)
self._diisfile = lib.H5TmpFile(filename, 'w')
elif not (self.incore or value.size < lib.diis.INCORE_SIZE):
self._diisfile = lib.H5TmpFile(self.filename, 'w')
return lib.diis.DIIS._store(self, key, value)
def extrapolate(self, nd=None):
if nd is None:
nd = self.get_num_vec()
if nd == 0:
raise RuntimeError('No vector found in DIIS object.')
h = self._H[:nd+1,:nd+1].copy()
h[1:,1:] = mpi.comm.allreduce(self._H[1:nd+1,1:nd+1])
g = numpy.zeros(nd+1, h.dtype)
g[0] = 1
w, v = scipy.linalg.eigh(h)
if numpy.any(abs(w)<1e-14):
logger.debug(self, 'Singularity found in DIIS error vector space.')
idx = abs(w)>1e-14
c = numpy.dot(v[:,idx]*(1./w[idx]), numpy.dot(v[:,idx].T.conj(), g))
else:
try:
c = numpy.linalg.solve(h, g)
except numpy.linalg.linalg.LinAlgError as e:
logger.warn(self, ' diis singular, eigh(h) %s', w)
raise e
logger.debug1(self, 'diis-c %s', c)
xnew = None
for i, ci in enumerate(c[1:]):
xi = self.get_vec(i)
if xnew is None:
xnew = numpy.zeros(xi.size, c.dtype)
for p0, p1 in lib.prange(0, xi.size, lib.diis.BLOCK_SIZE):
xnew[p0:p1] += xi[p0:p1] * ci
return xnew
def restore(self, filename, inplace=True):
'''Read diis contents from a diis file and replace the attributes of
current diis object if needed, then construct the vector.
'''
filename_base = filename.split('__rank')[0]
filename = filename_base + '__rank' + str(mpi.rank)
val = lib.diis.DIIS.restore(self, filename, inplace)
if inplace:
self.filename = filename_base
return val
def restore(filename):
'''Restore/construct diis object based on a diis file'''
return DIIS().restore(filename)
| sunqm/mpi4pyscf | mpi4pyscf/lib/diis.py | Python | gpl-3.0 | 2,393 | [
"PySCF"
] | d0099d740a5997ca7443ec9521e7aca8baadbad9f6d9942a46e6bac83787ae1e |
from enum import IntEnum
from itertools import chain
from collections import namedtuple, defaultdict
from rdkit import Chem
from ._base import Descriptor
from ._util import parse_enum
from ._atomic_property import AtomicProperty
__all__ = ("Chi",)
class ChiType(IntEnum):
__slots__ = ()
path = 1
cluster = 2
path_cluster = 3
chain = 4
@property
def as_argument(self):
return self.name
@property
def short(self):
_short_dict = {
self.path: "p",
self.chain: "ch",
self.path_cluster: "pc",
self.cluster: "c",
}
return _short_dict[self]
@property
def long(self):
_long_dict = {
self.path: "Chi path",
self.chain: "Chi chain",
self.path_cluster: "Chi path-cluster",
self.cluster: "Chi cluster",
}
return _long_dict[self]
class DFS(object):
__slots__ = (
"mol",
"visited",
"vis_edges",
"is_chain",
"degrees",
"bonds",
"neighbors",
)
def __init__(self, mol):
self.mol = mol
self.visited = set()
self.vis_edges = set()
self.degrees = set()
self.neighbors = defaultdict(set)
self.bonds = [
(b.GetBeginAtomIdx(), b.GetEndAtomIdx()) for b in self.mol.GetBonds()
]
def reset(self, use_bonds):
ns = self.neighbors
bs = self.bonds
self.is_chain = False
self.visited.clear()
self.vis_edges.clear()
self.degrees.clear()
ns.clear()
for i in range(len(use_bonds)):
a, b = bs[use_bonds[i]]
ns[a].add(b)
ns[b].add(a)
self.neighbors = ns
@property
def nodes(self):
return list(self.neighbors.keys())
def _dfs(self, u):
neighbors = self.neighbors[u]
self.visited.add(u)
self.degrees.add(len(neighbors))
for v in neighbors:
ek = (v, u) if u > v else (u, v)
if v not in self.visited:
self.vis_edges.add(ek)
self._dfs(v)
elif ek not in self.vis_edges:
self.vis_edges.add(ek)
self.is_chain = True
def __call__(self):
self._dfs(next(iter(self.neighbors.keys())))
if self.is_chain:
t = ChiType.chain
elif not self.degrees - {1, 2}:
t = ChiType.path
elif 2 in self.degrees:
t = ChiType.path_cluster
else:
t = ChiType.cluster
return t
class ChiBase(Descriptor):
__slots__ = ()
explicit_hydrogens = False
ChiBonds = namedtuple("ChiBonds", "chain path path_cluster cluster")
class ChiCache(ChiBase):
__slots__ = ("_order",)
def parameters(self):
return (self._order,)
def __init__(self, order):
self._order = order
def calculate(self):
chain = []
path = []
path_cluster = []
cluster = []
dfs = DFS(self.mol)
for bonds in Chem.FindAllSubgraphsOfLengthN(self.mol, self._order):
dfs.reset(bonds)
typ = dfs()
nodes = dfs.nodes
if typ == ChiType.chain:
chain.append(nodes)
elif typ == ChiType.path:
path.append(nodes)
elif typ == ChiType.path_cluster:
path_cluster.append(nodes)
else:
cluster.append(nodes)
return ChiBonds(chain, path, path_cluster, cluster)
class Chi(ChiBase):
r"""chi descriptor.
:type type: str
:param type: one of chi_types
:type prop: str or function
:param prop: :ref:`atomic_properties`
:type averaged: bool
:param averaged: averaged by number of subgraphs
:returns: NaN when
* any atomic properties <= 0
* averaged and :math:`N_{\chi} = 0`
"""
since = "1.0.0"
__slots__ = ("_type", "_order", "_prop", "_averaged")
chi_types = tuple(t.name for t in ChiType)
_deltas = ["d", "dv"]
def description(self):
return "{}-ordered {}{} weighted by {}".format(
self._order,
"averaged " if self._averaged else "",
self._type.long,
self._prop.get_long(),
)
@classmethod
def preset(cls, version):
return chain(
(cls(ChiType.chain, l, a) for a in cls._deltas for l in range(3, 8)),
(cls(ChiType.cluster, l, a) for a in cls._deltas for l in range(3, 7)),
(cls(ChiType.path_cluster, l, a) for a in cls._deltas for l in range(4, 7)),
(
cls(ChiType.path, l, a, m)
for a in cls._deltas
for m in [False, True]
for l in range(8)
),
)
def __str__(self):
prop = self._prop.as_argument
ct = self._type.short
averaged = "A" if self._averaged else ""
return "{}X{}-{}{}".format(averaged, ct, self._order, prop)
def parameters(self):
return self._type, self._order, self._prop, self._averaged
def __init__(self, type="path", order=0, prop="d", averaged=False):
self._type = parse_enum(ChiType, type)
self._order = order
self._prop = AtomicProperty(self.explicit_hydrogens, prop)
self._averaged = averaged
def dependencies(self):
d = {"P": self._prop}
if self._order > 0:
d["chi"] = ChiCache(self._order)
return d
def calculate(self, P, chi=None):
if self._order <= 0:
chi = ChiBonds([], [{a.GetIdx()} for a in self.mol.GetAtoms()], [], [])
x = 0.0
node_sets = getattr(chi, self._type.name)
for nodes in node_sets:
c = 1
for node in nodes:
c *= P[node]
if c <= 0:
self.fail(ValueError("some properties less then or equal to 0"))
x += c ** -0.5
if self._averaged:
with self.rethrow_zerodiv():
x /= len(node_sets)
return x
rtype = float
_extra_docs = ("chi_types",)
| mordred-descriptor/mordred | mordred/Chi.py | Python | bsd-3-clause | 6,216 | [
"RDKit"
] | 4e9392df7d937b0297956e894b8c9252a6516df1e0b4baf6a1c3607cb14cd04f |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides class to generate and analyze interfacial reactions.
"""
import warnings
import matplotlib.pylab as plt
import numpy as np
from pymatgen.core.composition import Composition
from pymatgen.analysis.phase_diagram import GrandPotentialPhaseDiagram
from pymatgen.analysis.reaction_calculator import Reaction
__author__ = "Yihan Xiao"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Yihan Xiao"
__email__ = "eric.xyh2011@gmail.com"
__status__ = "Production"
__date__ = "Aug 15 2017"
class InterfacialReactivity:
"""
An object encompassing all relevant data for interface reactions.
"""
EV_TO_KJ_PER_MOL = 96.4853
def __init__(
self,
c1,
c2,
pd,
norm=True,
include_no_mixing_energy=False,
pd_non_grand=None,
use_hull_energy=False,
):
"""
Args:
c1 (Composition): Composition object for reactant 1.
c2 (Composition): Composition object for reactant 2.
pd (PhaseDiagram): PhaseDiagram object or GrandPotentialPhaseDiagram
object built from all elements in composition c1 and c2.
norm (bool): Whether or not the total number of atoms in composition
of reactant will be normalized to 1.
include_no_mixing_energy (bool): No_mixing_energy for a reactant is the
opposite number of its energy above grand potential convex hull. In
cases where reactions involve elements reservoir, this param
determines whether no_mixing_energy of reactants will be included
in the final reaction energy calculation. By definition, if pd is
not a GrandPotentialPhaseDiagram object, this param is False.
pd_non_grand (PhaseDiagram): PhaseDiagram object but not
GrandPotentialPhaseDiagram object built from elements in c1 and c2.
use_hull_energy (bool): Whether or not use the convex hull energy for
a given composition for reaction energy calculation. If false,
the energy of ground state structure will be used instead.
Note that in case when ground state can not be found for a
composition, convex hull energy will be used associated with a
warning message.
"""
self.grand = isinstance(pd, GrandPotentialPhaseDiagram)
# if include_no_mixing_energy is True, pd should be a
# GrandPotentialPhaseDiagram object and pd_non_grand should be given.
if include_no_mixing_energy and not self.grand:
raise ValueError("Please provide grand phase diagram to compute" " no_mixing_energy!")
if include_no_mixing_energy and not pd_non_grand:
raise ValueError("Please provide non-grand phase diagram to " "compute no_mixing_energy!")
if self.grand and use_hull_energy and not pd_non_grand:
raise ValueError("Please provide non-grand phase diagram if" " you want to use convex hull energy.")
# Keeps copy of original compositions.
self.c1_original = c1
self.c2_original = c2
# Two sets of composition attributes for two processing conditions:
# normalization with and without exluding element(s) from reservoir.
self.c1 = c1
self.c2 = c2
self.comp1 = c1
self.comp2 = c2
self.norm = norm
self.pd = pd
self.pd_non_grand = pd_non_grand
self.use_hull_energy = use_hull_energy
# Factor is the compositional ratio between composition self.c1 and
# processed composition self.comp1. E.g., factor for
# Composition('SiO2') and Composition('O') is 2.0. This factor will
# be used to convert mixing ratio in self.comp1 - self.comp2
# tie line to that in self.c1 - self.c2 tie line.
self.factor1 = 1
self.factor2 = 1
if self.grand:
# Excludes element(s) from reservoir.
self.comp1 = Composition({k: v for k, v in c1.items() if k not in pd.chempots})
self.comp2 = Composition({k: v for k, v in c2.items() if k not in pd.chempots})
# Calculate the factors in case where self.grand = True and
# self.norm = True.
factor1 = self.comp1.num_atoms / c1.num_atoms
factor2 = self.comp2.num_atoms / c2.num_atoms
if self.norm:
self.c1 = c1.fractional_composition
self.c2 = c2.fractional_composition
self.comp1 = self.comp1.fractional_composition
self.comp2 = self.comp2.fractional_composition
if self.grand:
# Only when self.grand = True and self.norm = True
# will self.factor be updated.
self.factor1 = factor1
self.factor2 = factor2
# Computes energies for reactants in different scenarios.
if not self.grand:
if self.use_hull_energy:
self.e1 = self.pd.get_hull_energy(self.comp1)
self.e2 = self.pd.get_hull_energy(self.comp2)
else:
# Use entry energy as reactant energy if no reservoir
# is present.
self.e1 = InterfacialReactivity._get_entry_energy(self.pd, self.comp1)
self.e2 = InterfacialReactivity._get_entry_energy(self.pd, self.comp2)
else:
if include_no_mixing_energy:
# Computing grand potentials needs compositions containing
# element(s) from reservoir, so self.c1 and self.c2 are used.
self.e1 = self._get_grand_potential(self.c1)
self.e2 = self._get_grand_potential(self.c2)
else:
self.e1 = self.pd.get_hull_energy(self.comp1)
self.e2 = self.pd.get_hull_energy(self.comp2)
@staticmethod
def _get_entry_energy(pd, composition):
"""
Finds the lowest entry energy for entries matching the composition.
Entries with non-negative formation energies are excluded. If no
entry is found, use the convex hull energy for the composition.
Args:
pd (PhaseDiagram): PhaseDiagram object.
composition (Composition): Composition object that the target
entry should match.
Returns:
The lowest entry energy among entries matching the composition.
"""
candidate = [
i.energy_per_atom
for i in pd.qhull_entries
if i.composition.fractional_composition == composition.fractional_composition
]
if not candidate:
warnings.warn(
"The reactant " + composition.reduced_formula + " has no matching entry with negative formation"
" energy, instead convex hull energy for this"
" composition will be used for reaction energy "
"calculation. "
)
return pd.get_hull_energy(composition)
min_entry_energy = min(candidate)
return min_entry_energy * composition.num_atoms
def _get_grand_potential(self, composition):
"""
Computes the grand potential Phi at a given composition and
chemical potential(s).
Args:
composition (Composition): Composition object.
Returns:
Grand potential at a given composition at chemical potential(s).
"""
if self.use_hull_energy:
grand_potential = self.pd_non_grand.get_hull_energy(composition)
else:
grand_potential = InterfacialReactivity._get_entry_energy(self.pd_non_grand, composition)
grand_potential -= sum([composition[e] * mu for e, mu in self.pd.chempots.items()])
if self.norm:
# Normalizes energy to the composition excluding element(s)
# from reservoir.
grand_potential /= sum([composition[el] for el in composition if el not in self.pd.chempots])
return grand_potential
def _get_energy(self, x):
"""
Computes reaction energy in eV/atom at mixing ratio x : (1-x) for
self.comp1 : self.comp2.
Args:
x (float): Mixing ratio x of reactants, a float between 0 and 1.
Returns:
Reaction energy.
"""
return self.pd.get_hull_energy(self.comp1 * x + self.comp2 * (1 - x)) - self.e1 * x - self.e2 * (1 - x)
def _get_reaction(self, x):
"""
Generates balanced reaction at mixing ratio x : (1-x) for
self.comp1 : self.comp2.
Args:
x (float): Mixing ratio x of reactants, a float between 0 and 1.
Returns:
Reaction object.
"""
mix_comp = self.comp1 * x + self.comp2 * (1 - x)
decomp = self.pd.get_decomposition(mix_comp)
# Uses original composition for reactants.
if np.isclose(x, 0):
reactant = [self.c2_original]
elif np.isclose(x, 1):
reactant = [self.c1_original]
else:
reactant = list(set([self.c1_original, self.c2_original]))
if self.grand:
reactant += [Composition(e.symbol) for e, v in self.pd.chempots.items()]
product = [Composition(k.name) for k, v in decomp.items()]
reaction = Reaction(reactant, product)
x_original = self._get_original_composition_ratio(reaction)
if np.isclose(x_original, 1):
reaction.normalize_to(self.c1_original, x_original)
else:
reaction.normalize_to(self.c2_original, 1 - x_original)
return reaction
def _get_elmt_amt_in_rxt(self, rxt):
"""
Computes total number of atoms in a reaction formula for elements
not in external reservoir. This method is used in the calculation
of reaction energy per mol of reaction formula.
Args:
rxt (Reaction): a reaction.
Returns:
Total number of atoms for non_reservoir elements.
"""
return sum([rxt.get_el_amount(e) for e in self.pd.elements])
def get_products(self):
"""
List of formulas of potential products. E.g., ['Li','O2','Mn'].
"""
products = set()
for _, _, _, react, _ in self.get_kinks():
products = products.union({k.reduced_formula for k in react.products})
return list(products)
@staticmethod
def _convert(x, factor1, factor2):
"""
Converts mixing ratio x in comp1 - comp2 tie line to that in
c1 - c2 tie line.
Args:
x (float): Mixing ratio x in comp1 - comp2 tie line, a float
between 0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.0.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in c1 - c2 tie line, a float between 0 and 1.
"""
return x * factor2 / ((1 - x) * factor1 + x * factor2)
@staticmethod
def _reverse_convert(x, factor1, factor2):
"""
Converts mixing ratio x in c1 - c2 tie line to that in
comp1 - comp2 tie line.
Args:
x (float): Mixing ratio x in c1 - c2 tie line, a float between
0 and 1.
factor1 (float): Compositional ratio between composition c1 and
processed composition comp1. E.g., factor for
Composition('SiO2') and Composition('O') is 2.
factor2 (float): Compositional ratio between composition c2 and
processed composition comp2.
Returns:
Mixing ratio in comp1 - comp2 tie line, a float between 0 and 1.
"""
return x * factor1 / ((1 - x) * factor2 + x * factor1)
def get_kinks(self):
"""
Finds all the kinks in mixing ratio where reaction products changes
along the tie line of composition self.c1 and composition self.c2.
Returns:
Zip object of tuples (index, mixing ratio,
reaction energy per atom in eV/atom,
reaction formula,
reaction energy per mol of reaction
formula in kJ/mol).
"""
c1_coord = self.pd.pd_coords(self.comp1)
c2_coord = self.pd.pd_coords(self.comp2)
n1 = self.comp1.num_atoms
n2 = self.comp2.num_atoms
critical_comp = self.pd.get_critical_compositions(self.comp1, self.comp2)
x_kink, energy_kink, react_kink, energy_per_rxt_formula = [], [], [], []
if all(c1_coord == c2_coord):
x_kink = [0, 1]
energy_kink = [self._get_energy(x) for x in x_kink]
react_kink = [self._get_reaction(x) for x in x_kink]
num_atoms = [(x * self.comp1.num_atoms + (1 - x) * self.comp2.num_atoms) for x in x_kink]
energy_per_rxt_formula = [
energy_kink[i]
* self._get_elmt_amt_in_rxt(react_kink[i])
/ num_atoms[i]
* InterfacialReactivity.EV_TO_KJ_PER_MOL
for i in range(2)
]
else:
for i in reversed(critical_comp):
# Gets mixing ratio x at kinks.
c = self.pd.pd_coords(i)
x = np.linalg.norm(c - c2_coord) / np.linalg.norm(c1_coord - c2_coord)
# Modifies mixing ratio in case compositions self.comp1 and
# self.comp2 are not normalized.
x = x * n2 / (n1 + x * (n2 - n1))
n_atoms = x * self.comp1.num_atoms + (1 - x) * self.comp2.num_atoms
# Converts mixing ratio in comp1 - comp2 tie line to that in
# c1 - c2 tie line.
x_converted = InterfacialReactivity._convert(x, self.factor1, self.factor2)
x_kink.append(x_converted)
# Gets reaction energy at kinks
normalized_energy = self._get_energy(x)
energy_kink.append(normalized_energy)
# Gets balanced reaction at kinks
rxt = self._get_reaction(x)
react_kink.append(rxt)
rxt_energy = normalized_energy * self._get_elmt_amt_in_rxt(rxt) / n_atoms
energy_per_rxt_formula.append(rxt_energy * InterfacialReactivity.EV_TO_KJ_PER_MOL)
index_kink = range(1, len(critical_comp) + 1)
return zip(index_kink, x_kink, energy_kink, react_kink, energy_per_rxt_formula)
def get_critical_original_kink_ratio(self):
"""
Returns a list of molar mixing ratio for each kink between ORIGINAL
(instead of processed) reactant compositions. This is the
same list as mixing ratio obtained from get_kinks method
if self.norm = False.
Returns:
A list of floats representing molar mixing ratios between
the original reactant compositions for each kink.
"""
ratios = []
if self.c1_original == self.c2_original:
return [0, 1]
reaction_kink = [k[3] for k in self.get_kinks()]
for rxt in reaction_kink:
ratios.append(abs(self._get_original_composition_ratio(rxt)))
return ratios
def _get_original_composition_ratio(self, reaction):
"""
Returns the molar mixing ratio between the reactants with ORIGINAL (
instead of processed) compositions for a reaction.
Args:
reaction (Reaction): Reaction object that contains the original
reactant compositions.
Returns:
The molar mixing ratio between the original reactant
compositions for a reaction.
"""
if self.c1_original == self.c2_original:
return 1
c1_coeff = reaction.get_coeff(self.c1_original) if self.c1_original in reaction.reactants else 0
c2_coeff = reaction.get_coeff(self.c2_original) if self.c2_original in reaction.reactants else 0
return c1_coeff * 1.0 / (c1_coeff + c2_coeff)
def labels(self):
"""
Returns a dictionary containing kink information:
{index: 'x= mixing_ratio energy= reaction_energy reaction_equation'}.
E.g., {1: 'x= 0.0 energy = 0.0 Mn -> Mn',
2: 'x= 0.5 energy = -15.0 O2 + Mn -> MnO2',
3: 'x= 1.0 energy = 0.0 O2 -> O2'}.
"""
return {
j: "x= " + str(round(x, 4)) + " energy in eV/atom = " + str(round(energy, 4)) + " " + str(reaction)
for j, x, energy, reaction, _ in self.get_kinks()
}
def plot(self):
"""
Plots reaction energy as a function of mixing ratio x in
self.c1 - self.c2 tie line using pylab.
Returns:
Pylab object that plots reaction energy as a function of
mixing ratio x.
"""
plt.rcParams["xtick.major.pad"] = "6"
plt.rcParams["ytick.major.pad"] = "6"
plt.rcParams["axes.linewidth"] = 2
npoint = 1000
xs = np.linspace(0, 1, npoint)
# Converts sampling points in self.c1 - self.c2 tie line to those in
# self.comp1 - self.comp2 tie line.
xs_reverse_converted = InterfacialReactivity._reverse_convert(xs, self.factor1, self.factor2)
energies = [self._get_energy(x) for x in xs_reverse_converted]
plt.plot(xs, energies, "k-")
# Marks kinks and minimum energy point.
kinks = self.get_kinks()
_, x_kink, energy_kink, _, _ = zip(*kinks)
plt.scatter(x_kink, energy_kink, marker="o", c="blue", s=20)
plt.scatter(self.minimum()[0], self.minimum()[1], marker="*", c="red", s=300)
# Labels kinks with indices. Labels are made draggable
# in case of overlapping.
for index, x, energy, _, _ in kinks:
plt.annotate(
index,
xy=(x, energy),
xytext=(5, 30),
textcoords="offset points",
ha="right",
va="bottom",
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=0"),
).draggable()
plt.xlim([-0.05, 1.05])
if self.norm:
plt.ylabel("Energy (eV/atom)")
else:
plt.ylabel("Energy (eV/f.u.)")
plt.xlabel("$x$ in $x$ {} + $(1-x)$ {}".format(self.c1.reduced_formula, self.c2.reduced_formula))
return plt
def minimum(self):
"""
Finds the minimum reaction energy E_min and corresponding
mixing ratio x_min.
Returns:
Tuple (x_min, E_min).
"""
return min([(x, energy) for _, x, energy, _, _ in self.get_kinks()], key=lambda i: i[1])
def get_no_mixing_energy(self):
"""
Generates the opposite number of energy above grand potential
convex hull for both reactants.
Returns:
[(reactant1, no_mixing_energy1),(reactant2,no_mixing_energy2)].
"""
assert self.grand == 1, "Please provide grand potential phase diagram for computing no_mixing_energy!"
energy1 = self.pd.get_hull_energy(self.comp1) - self._get_grand_potential(self.c1)
energy2 = self.pd.get_hull_energy(self.comp2) - self._get_grand_potential(self.c2)
unit = "eV/f.u."
if self.norm:
unit = "eV/atom"
return [
(self.c1_original.reduced_formula + " ({0})".format(unit), energy1),
(self.c2_original.reduced_formula + " ({0})".format(unit), energy2),
]
@staticmethod
def get_chempot_correction(element, temp, pres):
"""
Get the normalized correction term Δμ for chemical potential of a gas
phase consisting of element at given temperature and pressure,
referenced to that in the standard state (T_std = 298.15 K,
T_std = 1 bar). The gas phase is limited to be one of O2, N2, Cl2,
F2, H2. Calculation formula can be found in the documentation of
Materials Project website.
Args:
element (string): The string representing the element.
temp (float): The temperature of the gas phase.
pres (float): The pressure of the gas phase.
Returns:
The correction of chemical potential in eV/atom of the gas
phase at given temperature and pressure.
"""
if element not in ["O", "N", "Cl", "F", "H"]:
return 0
std_temp = 298.15
std_pres = 1e5
ideal_gas_const = 8.3144598
# Cp and S at standard state in J/(K.mol). Data from
# https://janaf.nist.gov/tables/O-029.html
# https://janaf.nist.gov/tables/N-023.html
# https://janaf.nist.gov/tables/Cl-073.html
# https://janaf.nist.gov/tables/F-054.html
# https://janaf.nist.gov/tables/H-050.html
Cp_dict = {"O": 29.376, "N": 29.124, "Cl": 33.949, "F": 31.302, "H": 28.836}
S_dict = {"O": 205.147, "N": 191.609, "Cl": 223.079, "F": 202.789, "H": 130.680}
Cp_std = Cp_dict[element]
S_std = S_dict[element]
PV_correction = ideal_gas_const * temp * np.log(pres / std_pres)
TS_correction = (
-Cp_std * (temp * np.log(temp) - std_temp * np.log(std_temp))
+ Cp_std * (temp - std_temp) * (1 + np.log(std_temp))
- S_std * (temp - std_temp)
)
dG = PV_correction + TS_correction
# Convert to eV/molecule unit.
dG /= 1000 * InterfacialReactivity.EV_TO_KJ_PER_MOL
# Normalize by number of atoms in the gas molecule. For elements
# considered, the gas molecules are all diatomic.
dG /= 2
return dG
| gmatteo/pymatgen | pymatgen/analysis/interface_reactions.py | Python | mit | 22,139 | [
"pymatgen"
] | bcdaf63101d02a11f8894c2ffa9675513eda93b7f345d28a4e63186a2a44773a |
# This file is part of Androguard.
#
# Copyright (C) 2012, Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class IRForm(object):
def __init__(self):
self.var_map = {}
self.type = None
def is_call(self):
return False
def is_cond(self):
return False
def is_const(self):
return False
def is_ident(self):
return False
def is_propagable(self):
return True
def get_type(self):
return self.type
def set_type(self, _type):
self.type = _type
def has_side_effect(self):
return False
def get_used_vars(self):
return []
def replace(self, old, new):
raise NotImplementedError('replace not implemented in %s' % self)
def remove_defined_var(self):
pass
def get_rhs(self):
return []
def get_lhs(self):
return None
def visit(self, visitor):
pass
class Constant(IRForm):
def __init__(self, value, atype, int_value=None):
self.v = 'c%s' % value
self.cst = value
if int_value is None:
self.cst2 = value
else:
self.cst2 = int_value
self.type = atype
def get_used_vars(self):
return []
def is_call(self):
return False
def is_const(self):
return True
def get_int_value(self):
return self.cst2
def get_type(self):
return self.type
def visit(self, visitor, to_int=False):
if self.type == 'Z':
if self.cst == 0:
return visitor.visit_constant('false')
else:
return visitor.visit_constant('true')
elif self.type == 'class':
return visitor.visit_base_class(self.cst)
elif to_int:
return visitor.visit_constant(self.cst2)
else:
return visitor.visit_constant(self.cst)
def __str__(self):
return 'CST_%s' % repr(self.cst)
class BaseClass(IRForm):
def __init__(self, name):
self.v = 'c%s' % name
self.cls = name
def is_const(self):
return True
def visit(self, visitor):
return visitor.visit_base_class(self.cls)
def __str__(self):
return 'BASECLASS_%s' % self.cls
class Variable(IRForm):
def __init__(self, value):
self.v = value
self.declared = False
self.type = None
def get_used_vars(self):
return [self.v]
def is_call(self):
return False
def is_ident(self):
return True
def value(self):
return self.v
def visit(self, visitor):
return visitor.visit_variable(self)
def visit_decl(self, visitor):
return visitor.visit_decl(self)
def __str__(self):
return 'VAR_%s' % self.v
class Param(Variable):
def __init__(self, value, atype):
super(Param, self).__init__(value)
self.declared = True
self.type = atype
def visit(self, visitor):
return visitor.visit_param(self.v)
def __str__(self):
return 'PARAM_%s' % self.v
class ThisParam(Param):
def __init__(self, value, atype):
super(ThisParam, self).__init__(value, atype)
def is_const(self):
return True
def get_used_vars(self):
return []
def visit(self, visitor):
return visitor.visit_this()
def __str__(self):
return 'THIS'
class AssignExpression(IRForm):
def __init__(self, lhs, rhs):
super(AssignExpression, self).__init__()
self.lhs = lhs.v
self.rhs = rhs
self.var_map[lhs.v] = lhs
lhs.set_type(rhs.get_type())
def is_propagable(self):
return self.rhs.is_propagable()
def is_call(self):
return self.rhs.is_call()
def has_side_effect(self):
return self.rhs.has_side_effect()
def get_rhs(self):
return self.rhs
def get_lhs(self):
return self.lhs
def get_used_vars(self):
return self.rhs.get_used_vars()
def remove_defined_var(self):
self.lhs = None
def replace(self, old, new):
self.rhs.replace(old, new)
def visit(self, visitor):
return visitor.visit_assign(self.var_map.get(self.lhs), self.rhs)
def __str__(self):
return 'ASSIGN(%s, %s)' % (self.var_map.get(self.lhs), self.rhs)
class MoveExpression(IRForm):
def __init__(self, lhs, rhs):
super(MoveExpression, self).__init__()
self.lhs = lhs.v
self.rhs = rhs.v
self.var_map.update([(lhs.v, lhs), (rhs.v, rhs)])
lhs.set_type(rhs.get_type())
def has_side_effect(self):
return False
def is_call(self):
return self.var_map[self.rhs].is_call()
def get_used_vars(self):
return self.var_map[self.rhs].get_used_vars()
def get_rhs(self):
return self.var_map[self.rhs]
def get_lhs(self):
return self.lhs
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_move(v_m[self.lhs], v_m[self.rhs])
def replace(self, old, new):
v_m = self.var_map
rhs = v_m[self.rhs]
if not (rhs.is_const() or rhs.is_ident()):
rhs.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.rhs = new.value()
else:
v_m[old] = new
def __str__(self):
v_m = self.var_map
return '%s = %s' % (v_m.get(self.lhs), v_m.get(self.rhs))
class MoveResultExpression(MoveExpression):
def __init__(self, lhs, rhs):
super(MoveResultExpression, self).__init__(lhs, rhs)
def is_propagable(self):
return self.var_map[self.rhs].is_propagable()
def has_side_effect(self):
return self.var_map[self.rhs].has_side_effect()
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_move_result(v_m[self.lhs], v_m[self.rhs])
def __str__(self):
v_m = self.var_map
return '%s = %s' % (v_m.get(self.lhs), v_m.get(self.rhs))
class ArrayStoreInstruction(IRForm):
def __init__(self, rhs, array, index, _type):
super(ArrayStoreInstruction, self).__init__()
self.rhs = rhs.v
self.array = array.v
self.index = index.v
self.var_map.update([(rhs.v, rhs), (array.v, array), (index.v, index)])
self.type = _type
def has_side_effect(self):
return True
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.array].get_used_vars()
lused_vars.extend(v_m[self.index].get_used_vars())
lused_vars.extend(v_m[self.rhs].get_used_vars())
return list(set(lused_vars))
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_astore(v_m[self.array],
v_m[self.index], v_m[self.rhs])
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
if self.rhs == old:
self.rhs = new.value()
if self.array == old:
self.array = new.value()
if self.index == old:
self.array = new.value()
else:
v_m[old] = new
else:
for arg in (v_m[self.array], v_m[self.index], v_m[self.rhs]):
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
def __str__(self):
v_m = self.var_map
return '%s[%s] = %s' % (v_m[self.array], v_m[self.index], v_m[self.rhs])
class StaticInstruction(IRForm):
def __init__(self, rhs, klass, ftype, name):
super(StaticInstruction, self).__init__()
self.rhs = rhs.v
self.cls = klass
self.ftype = ftype
self.name = name
self.var_map[rhs.v] = rhs
def has_side_effect(self):
return True
def get_used_vars(self):
return self.var_map[self.rhs].get_used_vars()
def get_lhs(self):
return None
def visit(self, visitor):
return visitor.visit_put_static(
self.cls, self.name, self.var_map[self.rhs])
def replace(self, old, new):
v_m = self.var_map
rhs = v_m[self.rhs]
if not (rhs.is_const() or rhs.is_ident()):
rhs.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.rhs = new.value()
else:
v_m[old] = new
def __str__(self):
return '%s.%s = %s' % (self.cls, self.name, self.var_map[self.rhs])
class InstanceInstruction(IRForm):
def __init__(self, rhs, lhs, klass, atype, name):
super(InstanceInstruction, self).__init__()
self.lhs = lhs.v
self.rhs = rhs.v
self.atype = atype
self.cls = klass
self.name = name
self.var_map.update([(lhs.v, lhs), (rhs.v, rhs)])
def has_side_effect(self):
return True
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.lhs].get_used_vars()
lused_vars.extend(v_m[self.rhs].get_used_vars())
return list(set(lused_vars))
def get_lhs(self):
return None
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_put_instance(v_m[self.lhs],
self.name, v_m[self.rhs])
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
if self.lhs == old:
self.lhs = new.value()
if self.rhs == old:
self.rhs = new.value()
else:
v_m[old] = new
else:
for arg in (v_m[self.lhs], v_m[self.rhs]):
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
def __str__(self):
v_m = self.var_map
return '%s.%s = %s' % (v_m[self.lhs], self.name, v_m[self.rhs])
class NewInstance(IRForm):
def __init__(self, ins_type):
super(NewInstance, self).__init__()
self.type = ins_type
def get_type(self):
return self.type
def get_used_vars(self):
return []
def visit(self, visitor):
return visitor.visit_new(self.type)
def replace(self, old, new):
pass
def __str__(self):
return 'NEW(%s)' % self.type
class InvokeInstruction(IRForm):
def __init__(self, clsname, name, base, rtype, ptype, args):
super(InvokeInstruction, self).__init__()
self.cls = clsname
self.name = name
self.base = base.v
self.rtype = rtype
self.ptype = ptype
self.args = [arg.v for arg in args]
self.var_map[base.v] = base
for arg in args:
self.var_map[arg.v] = arg
def get_type(self):
if self.name == '<init>':
return self.var_map[self.base].get_type()
return self.rtype
def is_call(self):
return True
def has_side_effect(self):
return True
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
if self.base == old:
self.base = new.value()
for idx, arg in enumerate(self.args):
if arg == old:
self.args.pop(idx)
self.args.insert(idx, new.value())
else:
v_m[old] = new
else:
base = v_m[self.base]
if not (base.is_ident() or base.is_const()):
base.replace(old, new)
for arg in self.args:
cnt = v_m[arg]
if not (cnt.is_ident() or cnt.is_const()):
cnt.replace(old, new)
def get_used_vars(self):
v_m = self.var_map
lused_vars = []
for arg in self.args:
lused_vars.extend(v_m[arg].get_used_vars())
lused_vars.extend(v_m[self.base].get_used_vars())
return list(set(lused_vars))
def visit(self, visitor):
v_m = self.var_map
largs = [v_m[arg] for arg in self.args]
return visitor.visit_invoke(self.name, v_m[self.base], self.rtype,
self.ptype, largs)
def __str__(self):
v_m = self.var_map
return '%s.%s(%s)' % (v_m[self.base], self.name,
', '.join('%s' % v_m[i] for i in self.args))
class InvokeRangeInstruction(InvokeInstruction):
def __init__(self, clsname, name, rtype, ptype, args):
base = args.pop(0)
super(InvokeRangeInstruction, self).__init__(clsname, name, base,
rtype, ptype, args)
class InvokeDirectInstruction(InvokeInstruction):
def __init__(self, clsname, name, base, rtype, ptype, args):
super(InvokeDirectInstruction, self).__init__(clsname, name, base,
rtype, ptype, args)
class InvokeStaticInstruction(InvokeInstruction):
def __init__(self, clsname, name, base, rtype, ptype, args):
super(InvokeStaticInstruction, self).__init__(clsname, name, base,
rtype, ptype, args)
def get_used_vars(self):
v_m = self.var_map
lused_vars = []
for arg in self.args:
lused_vars.extend(v_m[arg].get_used_vars())
return list(set(lused_vars))
class ReturnInstruction(IRForm):
def __init__(self, arg):
super(ReturnInstruction, self).__init__()
self.arg = arg
if arg is not None:
self.var_map[arg.v] = arg
self.arg = arg.v
def get_used_vars(self):
if self.arg is None:
return []
return self.var_map[self.arg].get_used_vars()
def get_lhs(self):
return None
def visit(self, visitor):
if self.arg is None:
return visitor.visit_return_void()
else:
return visitor.visit_return(self.var_map[self.arg])
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
if self.arg is not None:
return 'RETURN(%s)' % self.var_map.get(self.arg)
return 'RETURN'
class NopExpression(IRForm):
def __init__(self):
pass
def get_used_vars(self):
return []
def get_lhs(self):
return None
def visit(self, visitor):
return visitor.visit_nop()
class SwitchExpression(IRForm):
def __init__(self, src, branch):
super(SwitchExpression, self).__init__()
self.src = src.v
self.branch = branch
self.var_map[src.v] = src
def get_used_vars(self):
return self.var_map[self.src].get_used_vars()
def visit(self, visitor):
return visitor.visit_switch(self.var_map[self.src])
def replace(self, old, new):
v_m = self.var_map
src = v_m[self.src]
if not (src.is_const() or src.is_ident()):
src.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.src = new.value()
else:
v_m[old] = new
def __str__(self):
return 'SWITCH(%s)' % (self.var_map[self.src])
class CheckCastExpression(IRForm):
def __init__(self, arg, _type):
super(CheckCastExpression, self).__init__()
self.arg = arg.v
self.var_map[arg.v] = arg
self.type = _type
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def visit(self, visitor):
return visitor.visit_check_cast(self.var_map[self.arg], self.type)
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.arg = new.value()
else:
v_m[old] = new
class ArrayExpression(IRForm):
def __init__(self):
super(ArrayExpression, self).__init__()
class ArrayLoadExpression(ArrayExpression):
def __init__(self, arg, index, _type):
super(ArrayLoadExpression, self).__init__()
self.array = arg.v
self.idx = index.v
self.var_map.update([(arg.v, arg), (index.v, index)])
self.type = _type
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.array].get_used_vars()
lused_vars.extend(v_m[self.idx].get_used_vars())
return list(set(lused_vars))
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_aload(v_m[self.array], v_m[self.idx])
def get_type(self):
return self.var_map[self.array].get_type()[1:]
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
if self.array == old:
self.array = new.value()
if self.idx == old:
self.idx = new.value()
else:
v_m[old] = new
else:
for arg in (self.array, self.idx):
cnt = v_m[arg]
if not (cnt.is_ident() or cnt.is_const()):
cnt.replace(old, new)
def __str__(self):
v_m = self.var_map
return 'ARRAYLOAD(%s, %s)' % (v_m[self.array], v_m[self.idx])
class ArrayLengthExpression(ArrayExpression):
def __init__(self, array):
super(ArrayLengthExpression, self).__init__()
self.array = array.v
self.var_map[array.v] = array
def get_type(self):
return 'I'
def get_used_vars(self):
return self.var_map[self.array].get_used_vars()
def visit(self, visitor):
return visitor.visit_alength(self.var_map[self.array])
def replace(self, old, new):
v_m = self.var_map
array = v_m[self.array]
if not (array.is_const() or array.is_ident()):
array.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
self.array = new.value()
else:
v_m[old] = new
def __str__(self):
return 'ARRAYLEN(%s)' % (self.var_map[self.array])
class NewArrayExpression(ArrayExpression):
def __init__(self, asize, atype):
super(NewArrayExpression, self).__init__()
self.size = asize.v
self.type = atype
self.var_map[asize.v] = asize
def is_propagable(self):
return False
def get_used_vars(self):
return self.var_map[self.size].get_used_vars()
def visit(self, visitor):
return visitor.visit_new_array(self.type, self.var_map[self.size])
def replace(self, old, new):
v_m = self.var_map
size = v_m[self.size]
if not (size.is_const() or size.is_ident()):
size.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.size = new.value()
else:
v_m[old] = new
def __str__(self):
return 'NEWARRAY_%s[%s]' % (self.type, self.var_map[self.size])
class FilledArrayExpression(ArrayExpression):
def __init__(self, asize, atype, args):
super(FilledArrayExpression, self).__init__()
self.size = asize
self.type = atype
self.args = []
for arg in args:
self.var_map[arg.v] = arg
self.args.append(arg.v)
def get_used_vars(self):
lused_vars = []
for arg in self.args:
lused_vars.extend(self.var_map[arg].get_used_vars())
return list(set(lused_vars))
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
for idx, arg in enumerate(self.args):
if arg == old:
self.args.pop(idx)
self.args.insert(idx, new.value())
else:
v_m[old] = new
else:
for arg in self.args:
cnt = v_m[arg]
if not (cnt.is_ident() or cnt.is_const()):
cnt.replace(old, new)
def visit(self, visitor):
v_m = self.var_map
largs = [v_m[arg] for arg in self.args]
return visitor.visit_filled_new_array(self.type, self.size, largs)
class FillArrayExpression(ArrayExpression):
def __init__(self, reg, value):
super(FillArrayExpression, self).__init__()
self.reg = reg.v
self.var_map[reg.v] = reg
self.value = value
def is_propagable(self):
return False
def get_rhs(self):
return self.reg
def get_used_vars(self):
return self.var_map[self.reg].get_used_vars()
def visit(self, visitor):
return visitor.visit_fill_array(self.var_map[self.reg], self.value)
class RefExpression(IRForm):
def __init__(self, ref):
super(RefExpression, self).__init__()
self.ref = ref.v
self.var_map[ref.v] = ref
def is_propagable(self):
return False
def get_used_vars(self):
return self.var_map[self.ref].get_used_vars()
def replace(self, old, new):
v_m = self.var_map
ref = v_m[self.ref]
if not (ref.is_const() or ref.is_ident()):
ref.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.ref = new.value()
else:
v_m[old] = new
class MoveExceptionExpression(RefExpression):
def __init__(self, ref, _type):
super(MoveExceptionExpression, self).__init__(ref)
self.type = _type
ref.set_type(_type)
def get_lhs(self):
return self.ref
def has_side_effect(self):
return True
def get_used_vars(self):
return []
def visit(self, visitor):
return visitor.visit_move_exception(self.var_map[self.ref])
def __str__(self):
return 'MOVE_EXCEPT %s' % self.var_map[self.ref]
class MonitorEnterExpression(RefExpression):
def __init__(self, ref):
super(MonitorEnterExpression, self).__init__(ref)
def visit(self, visitor):
return visitor.visit_monitor_enter(self.var_map[self.ref])
class MonitorExitExpression(RefExpression):
def __init__(self, ref):
super(MonitorExitExpression, self).__init__(ref)
def visit(self, visitor):
return visitor.visit_monitor_exit(self.var_map[self.ref])
class ThrowExpression(RefExpression):
def __init__(self, ref):
super(ThrowExpression, self).__init__(ref)
def visit(self, visitor):
return visitor.visit_throw(self.var_map[self.ref])
class BinaryExpression(IRForm):
def __init__(self, op, arg1, arg2, _type):
super(BinaryExpression, self).__init__()
self.op = op
self.arg1 = arg1.v
self.arg2 = arg2.v
self.var_map.update([(arg1.v, arg1), (arg2.v, arg2)])
self.type = _type
def has_side_effect(self):
v_m = self.var_map
return (v_m[self.arg1].has_side_effect() or
v_m[self.arg2].has_side_effect())
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.arg1].get_used_vars()
lused_vars.extend(v_m[self.arg2].get_used_vars())
return list(set(lused_vars))
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_binary_expression(self.op, v_m[self.arg1],
v_m[self.arg2])
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
if self.arg1 == old:
self.arg1 = new.value()
if self.arg2 == old:
self.arg2 = new.value()
else:
v_m[old] = new
else:
for arg in (v_m[self.arg1], v_m[self.arg2]):
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
def __str__(self):
v_m = self.var_map
return '(%s %s %s)' % (self.op, v_m[self.arg1], v_m[self.arg2])
class BinaryCompExpression(BinaryExpression):
def __init__(self, op, arg1, arg2, _type):
super(BinaryCompExpression, self).__init__(op, arg1, arg2, _type)
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_cond_expression(self.op, v_m[self.arg1],
v_m[self.arg2])
class BinaryExpression2Addr(BinaryExpression):
def __init__(self, op, dest, arg, _type):
super(BinaryExpression2Addr, self).__init__(op, dest, arg, _type)
class BinaryExpressionLit(BinaryExpression):
def __init__(self, op, arg1, arg2):
super(BinaryExpressionLit, self).__init__(op, arg1, arg2, 'I')
class UnaryExpression(IRForm):
def __init__(self, op, arg, _type):
super(UnaryExpression, self).__init__()
self.op = op
self.arg = arg.v
self.var_map[arg.v] = arg
self.type = _type
def get_type(self):
return self.var_map[self.arg].get_type()
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def visit(self, visitor):
return visitor.visit_unary_expression(self.op, self.var_map[self.arg])
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
elif old in v_m:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
return '(%s, %s)' % (self.op, self.var_map[self.arg])
class CastExpression(UnaryExpression):
def __init__(self, op, atype, arg):
super(CastExpression, self).__init__(op, arg, atype)
def get_type(self):
return self.type
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def visit(self, visitor):
return visitor.visit_cast(self.op, self.var_map[self.arg])
def __str__(self):
return 'CAST_%s(%s)' % (self.op, self.var_map[self.arg])
CONDS = {
'==': '!=',
'!=': '==',
'<': '>=',
'<=': '>',
'>=': '<',
'>': '<=',
}
class ConditionalExpression(IRForm):
def __init__(self, op, arg1, arg2):
super(ConditionalExpression, self).__init__()
self.op = op
self.arg1 = arg1.v
self.arg2 = arg2.v
self.var_map.update([(arg1.v, arg1), (arg2.v, arg2)])
def get_lhs(self):
return None
def is_cond(self):
return True
def get_used_vars(self):
v_m = self.var_map
lused_vars = v_m[self.arg1].get_used_vars()
lused_vars.extend(v_m[self.arg2].get_used_vars())
return list(set(lused_vars))
def neg(self):
self.op = CONDS[self.op]
def visit(self, visitor):
v_m = self.var_map
return visitor.visit_cond_expression(self.op, v_m[self.arg1],
v_m[self.arg2])
def replace(self, old, new):
v_m = self.var_map
if old in v_m:
arg = v_m[old]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
else:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
if self.arg1 == old:
self.arg1 = new.value()
if self.arg2 == old:
self.arg2 = new.value()
else:
v_m[old] = new
else:
for arg in (v_m[self.arg1], v_m[self.arg2]):
if not (arg.is_ident() or arg.is_const()):
arg.replace(old, new)
def __str__(self):
v_m = self.var_map
return 'COND(%s, %s, %s)' % (self.op, v_m[self.arg1], v_m[self.arg2])
class ConditionalZExpression(IRForm):
def __init__(self, op, arg):
super(ConditionalZExpression, self).__init__()
self.op = op
self.arg = arg.v
self.var_map[arg.v] = arg
def get_lhs(self):
return None
def is_cond(self):
return True
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def neg(self):
self.op = CONDS[self.op]
def visit(self, visitor):
return visitor.visit_condz_expression(self.op, self.var_map[self.arg])
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
elif old in v_m:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
return '(IS%s0, %s)' % (self.op, self.var_map[self.arg])
class InstanceExpression(IRForm):
def __init__(self, arg, klass, ftype, name):
super(InstanceExpression, self).__init__()
self.arg = arg.v
self.cls = klass
self.ftype = ftype
self.name = name
self.var_map[arg.v] = arg
def get_type(self):
return self.ftype
def get_used_vars(self):
return self.var_map[self.arg].get_used_vars()
def visit(self, visitor):
return visitor.visit_get_instance(self.var_map[self.arg], self.name)
def replace(self, old, new):
v_m = self.var_map
arg = v_m[self.arg]
if not (arg.is_const() or arg.is_ident()):
arg.replace(old, new)
elif old in v_m:
if new.is_ident():
v_m[new.value()] = new
v_m.pop(old)
self.arg = new.value()
else:
v_m[old] = new
def __str__(self):
return '%s.%s' % (self.var_map[self.arg], self.name)
class StaticExpression(IRForm):
def __init__(self, cls_name, field_type, field_name):
super(StaticExpression, self).__init__()
self.cls = cls_name
self.ftype = field_type
self.name = field_name
def get_type(self):
return self.ftype
def visit(self, visitor):
return visitor.visit_get_static(self.cls, self.name)
def replace(self, old, new):
pass
def __str__(self):
return '%s.%s' % (self.cls, self.name)
| andymg/androguard | androguard/decompiler/dad/instruction.py | Python | apache-2.0 | 33,388 | [
"VisIt"
] | 90e70b544b90f4c83cc6cfe3e2f2661821a68ebb2dc0ac4f4243472972dcb78c |
"""
Model grism spectra in individual FLTs
"""
import os
from collections import OrderedDict
import copy
import numpy as np
import scipy.ndimage as nd
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
from astropy.table import Table
import astropy.wcs as pywcs
import astropy.units as u
#import stwcs
# Helper functions from a document written by Pirzkal, Brammer & Ryan
from . import grismconf
from . import utils
# from .utils_c import disperse
# from .utils_c import interp
from . import GRIZLI_PATH
# Would prefer 'nearest' but that occasionally segment faults out
SEGMENTATION_INTERP = 'nearest'
# Factors for converting HST countrates to Flamba flux densities
photflam_list = {'F098M': 6.0501324882418389e-20,
'F105W': 3.038658152508547e-20,
'F110W': 1.5274130068787271e-20,
'F125W': 2.2483414275260141e-20,
'F140W': 1.4737154005353565e-20,
'F160W': 1.9275637653833683e-20,
'F435W': 3.1871480286278679e-19,
'F606W': 7.8933594352047833e-20,
'F775W': 1.0088466875014488e-19,
'F814W': 7.0767633156044843e-20,
'VISTAH': 1.9275637653833683e-20*0.95,
'GRISM': 1.e-20,
'G150': 1.e-20,
'G800L': 1.,
'G280': 1.,
'F444W': 1.e-20}
# Filter pivot wavelengths
photplam_list = {'F098M': 9864.722728110915,
'F105W': 10551.046906405772,
'F110W': 11534.45855553774,
'F125W': 12486.059785775655,
'F140W': 13922.907350356367,
'F160W': 15369.175708965562,
'F435W': 4328.256914042873,
'F606W': 5921.658489236346,
'F775W': 7693.297933335407,
'F814W': 8058.784799323767,
'VISTAH': 1.6433e+04,
'GRISM': 1.6e4, # WFIRST/Roman
'G150': 1.46e4, # WFIRST/Roman
'G800L': 7.4737026e3,
'G280': 3651.,
'F070W': 7.043e+03, # NIRCam
'F090W': 9.023e+03,
'F115W': 1.150e+04, # NIRISS
'F150W': 1.493e+04, # NIRISS
'F200W': 1.993e+04, # NIRISS
'F150W2': 1.658e+04,
'F140M': 1.405e+04,
'F158M': 1.582e+04, # NIRISS
'F162M': 1.627e+04,
'F182M': 1.845e+04,
'F210M': 2.096e+04,
'F164N': 1.645e+04,
'F187N': 1.874e+04,
'F212N': 2.121e+04,
'F277W': 2.758e+04,
'F356W': 3.568e+04,
'F444W': 4.404e+04,
'F322W2': 3.232e+04,
'F250M': 2.503e+04,
'F300M': 2.987e+04,
'F335M': 3.362e+04,
'F360M': 3.624e+04,
'F380M': 3.825e+04, # NIRISS
'F410M': 4.082e+04,
'F430M': 4.280e+04,
'F460M': 4.626e+04,
'F480M': 4.816e+04,
'F323N': 3.237e+04,
'F405N': 4.052e+04,
'F466N': 4.654e+04,
'F470N': 4.708e+04}
# character to skip clearing line on STDOUT printing
#no_newline = '\x1b[1A\x1b[1M'
# Demo for computing photflam and photplam with pysynphot
if False:
import pysynphot as S
n = 1.e-20
spec = S.FlatSpectrum(n, fluxunits='flam')
photflam_list = {}
photplam_list = {}
for filter in ['F098M', 'F105W', 'F110W', 'F125W', 'F140W', 'F160W', 'G102', 'G141']:
bp = S.ObsBandpass('wfc3,ir,{0}'.format(filter.lower()))
photplam_list[filter] = bp.pivot()
obs = S.Observation(spec, bp)
photflam_list[filter] = n/obs.countrate()
for filter in ['F435W', 'F606W', 'F775W', 'F814W']:
bp = S.ObsBandpass('acs,wfc1,{0}'.format(filter.lower()))
photplam_list[filter] = bp.pivot()
obs = S.Observation(spec, bp)
photflam_list[filter] = n/obs.countrate()
class GrismDisperser(object):
def __init__(self, id=0, direct=None,
segmentation=None, origin=[500, 500],
xcenter=0., ycenter=0., pad=0, grow=1, beam='A',
conf=['WFC3', 'F140W', 'G141'], scale=1.,
fwcpos=None, MW_EBV=0., yoffset=0):
"""Object for computing dispersed model spectra
Parameters
----------
id : int
Only consider pixels in the segmentation image with value `id`.
Default of zero to match the default empty segmentation image.
direct : `~numpy.ndarray`
Direct image cutout in f_lambda units (i.e., e-/s times PHOTFLAM).
Default is a trivial zeros array.
segmentation : `~numpy.ndarray` (float32) or None
Segmentation image. If None, create a zeros array with the same
shape as `direct`.
origin : [int, int]
`origin` defines the lower left pixel index (y,x) of the `direct`
cutout from a larger detector-frame image
xcenter, ycenter : float, float
Sub-pixel centering of the exact center of the object, relative
to the center of the thumbnail. Needed for getting exact
wavelength grid correct for the extracted 2D spectra.
pad : int
Offset between origin = [0,0] and the true lower left pixel of the
detector frame. This can be nonzero for cases where one creates
a direct image that extends beyond the boundaries of the nominal
detector frame to model spectra at the edges.
grow : int >= 1
Interlacing factor.
beam : str
Spectral order to compute. Must be defined in `self.conf.beams`
conf : [str, str, str] or `grismconf.aXeConf` object.
Pre-loaded aXe-format configuration file object or if list of
strings determine the appropriate configuration filename with
`grismconf.get_config_filename` and load it.
scale : float
Multiplicative factor to apply to the modeled spectrum from
`compute_model`.
fwcpos : float
Rotation position of the NIRISS filter wheel
MW_EBV : float
Galactic extinction
yoffset : float
Cross-dispersion offset to apply to the trace
Attributes
----------
sh : 2-tuple
shape of the direct array
sh_beam : 2-tuple
computed shape of the 2D spectrum
seg : `~numpy.array`
segmentation array
lam : `~numpy.array`
wavelength along the trace
ytrace : `~numpy.array`
y pixel center of the trace. Has same dimensions as sh_beam[1].
sensitivity : `~numpy.array`
conversion factor from native e/s to f_lambda flux densities
lam_beam, ytrace_beam, sensitivity_beam : `~numpy.array`
Versions of the above attributes defined for just the specific
pixels of the pixel beam, not the full 2D extraction.
modelf, model : `~numpy.array`, `~numpy.ndarray`
2D model spectrum. `model` is linked to `modelf` with "reshape",
the later which is a flattened 1D array where the fast
calculations are actually performed.
model : `~numpy.ndarray`
2D model spectrum linked to `modelf` with reshape.
slx_parent, sly_parent : slice
slices defined relative to `origin` to match the location of the
computed 2D spectrum.
total_flux : float
Total f_lambda flux in the thumbail within the segmentation
region.
"""
self.id = id
# lower left pixel of the `direct` array in native detector
# coordinates
self.origin = origin
self.pad = pad
self.grow = grow
# Galactic extinction
self.MW_EBV = MW_EBV
self.init_galactic_extinction(self.MW_EBV)
self.fwcpos = fwcpos
self.scale = scale
# Direct image
if direct is None:
direct = np.zeros((20, 20), dtype=np.float32)
self.direct = direct
self.sh = self.direct.shape
if self.direct.dtype is not np.float32:
self.direct = np.cast[np.float32](self.direct)
# Segmentation image, defaults to all zeros
if segmentation is None:
#self.seg = np.zeros_like(self.direct, dtype=np.float32)
empty = np.zeros_like(self.direct, dtype=np.float32)
self.set_segmentation(empty)
else:
self.set_segmentation(segmentation.astype(np.float32))
# Initialize attributes
self.spectrum_1d = None
self.is_cgs = False
self.xc = self.sh[1]/2+self.origin[1]
self.yc = self.sh[0]/2+self.origin[0]
# Sub-pixel centering of the exact center of the object, relative
# to the center of the thumbnail
self.xcenter = xcenter
self.ycenter = ycenter
self.beam = beam
# Config file
if isinstance(conf, list):
conf_f = grismconf.get_config_filename(*conf)
self.conf = grismconf.load_grism_config(conf_f)
else:
self.conf = conf
# Get Pixel area map (xxx need to add test for WFC3)
self.PAM_value = self.get_PAM_value(verbose=False)
#print('xxx PAM!')
self.process_config()
self.yoffset = yoffset
if yoffset != 0:
#print('yoffset!', yoffset)
self.add_ytrace_offset(yoffset)
def set_segmentation(self, seg_array):
"""
Set Segmentation array and `total_flux`.
"""
self.seg = seg_array*1
self.seg_ids = list(np.unique(self.seg))
try:
self.total_flux = self.direct[self.seg == self.id].sum()
if self.total_flux == 0:
self.total_flux = 1
except:
self.total_flux = 1.
def init_galactic_extinction(self, MW_EBV=0., R_V=utils.MW_RV):
"""
Initialize Fitzpatrick 99 Galactic extinction
Parameters
----------
MW_EBV : float
Local E(B-V)
R_V : float
Relation between specific and total extinction,
``a_v = r_v * ebv``.
Returns
-------
Sets `self.MW_F99` attribute, which is a callable function that
returns the extinction for a supplied array of wavelengths.
If MW_EBV <= 0, then sets `self.MW_F99 = None`.
"""
self.MW_F99 = None
if MW_EBV > 0:
self.MW_F99 = utils.MW_F99(MW_EBV*R_V, r_v=R_V)
def process_config(self):
"""Process grism config file
Parameters
----------
none
Returns
-------
Sets attributes that define how the dispersion is computed. See the
attributes list for `~grizli.model.GrismDisperser`.
"""
from .utils_c import interp
# Get dispersion parameters at the reference position
self.dx = self.conf.dxlam[self.beam] # + xcenter #-xoff
if self.grow > 1:
self.dx = np.arange(self.dx[0]*self.grow, self.dx[-1]*self.grow)
xoff = 0.
if ('G14' in self.conf.conf_file) & (self.beam == 'A'):
xoff = -0.5 # necessary for WFC3/IR G141, v4.32
# xoff = 0. # suggested by ACS
# xoff = -2.5 # test
self.xoff = xoff
self.ytrace_beam, self.lam_beam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad)/self.grow,
y=(self.yc+self.ycenter-self.pad)/self.grow,
dx=(self.dx+self.xcenter*0+self.xoff)/self.grow,
beam=self.beam, fwcpos=self.fwcpos)
self.ytrace_beam *= self.grow
# Integer trace
# Add/subtract 20 for handling int of small negative numbers
dyc = np.cast[int](self.ytrace_beam+20)-20+1
# Account for pixel centering of the trace
self.yfrac_beam = self.ytrace_beam - np.floor(self.ytrace_beam)
# Interpolate the sensitivity curve on the wavelength grid.
ysens = self.lam_beam*0
so = np.argsort(self.lam_beam)
conf_sens = self.conf.sens[self.beam]
if self.MW_F99 is not None:
MWext = 10**(-0.4*(self.MW_F99(conf_sens['WAVELENGTH']*u.AA)))
else:
MWext = 1.
ysens[so] = interp.interp_conserve_c(self.lam_beam[so],
conf_sens['WAVELENGTH'],
conf_sens['SENSITIVITY']*MWext,
integrate=1, left=0, right=0)
self.lam_sort = so
# Needs term of delta wavelength per pixel for flux densities
# dl = np.abs(np.append(self.lam_beam[1] - self.lam_beam[0],
# np.diff(self.lam_beam)))
# ysens *= dl#*1.e-17
self.sensitivity_beam = ysens
# Initialize the model arrays
self.NX = len(self.dx)
self.sh_beam = (self.sh[0], self.sh[1]+self.NX)
self.modelf = np.zeros(np.product(self.sh_beam), dtype=np.float32)
self.model = self.modelf.reshape(self.sh_beam)
self.idx = np.arange(self.modelf.size,
dtype=np.int64).reshape(self.sh_beam)
# Indices of the trace in the flattened array
self.x0 = np.array(self.sh, dtype=np.int64) // 2
self.x0 -= 1 # zero index!
self.dxpix = self.dx - self.dx[0] + self.x0[1] # + 1
try:
self.flat_index = self.idx[dyc + self.x0[0], self.dxpix]
except IndexError:
#print('Index Error', id, dyc.dtype, self.dxpix.dtype, self.x0[0], self.xc, self.yc, self.beam, self.ytrace_beam.max(), self.ytrace_beam.min())
raise IndexError
# Trace, wavelength, sensitivity across entire 2D array
self.dxfull = np.arange(self.sh_beam[1], dtype=int)
self.dxfull += self.dx[0]-self.x0[1]
# self.ytrace, self.lam = self.conf.get_beam_trace(x=self.xc,
# y=self.yc, dx=self.dxfull, beam=self.beam)
self.ytrace, self.lam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad)/self.grow,
y=(self.yc+self.ycenter-self.pad)/self.grow,
dx=(self.dxfull+self.xcenter+xoff)/self.grow,
beam=self.beam, fwcpos=self.fwcpos)
self.ytrace *= self.grow
ysens = self.lam*0
so = np.argsort(self.lam)
ysens[so] = interp.interp_conserve_c(self.lam[so],
conf_sens['WAVELENGTH'],
conf_sens['SENSITIVITY']*MWext,
integrate=1, left=0, right=0)
# dl = np.abs(np.append(self.lam[1] - self.lam[0],
# np.diff(self.lam)))
# ysens *= dl#*1.e-17
self.sensitivity = ysens
# Slices of the parent array based on the origin parameter
self.slx_parent = slice(self.origin[1] + self.dxfull[0] + self.x0[1],
self.origin[1] + self.dxfull[-1] + self.x0[1]+1)
self.sly_parent = slice(self.origin[0], self.origin[0] + self.sh[0])
# print 'XXX wavelength: %s %s %s' %(self.lam[-5:], self.lam_beam[-5:], dl[-5:])
def add_ytrace_offset(self, yoffset):
"""Add an offset in Y to the spectral trace
Parameters
----------
yoffset : float
Y-offset to apply
"""
from .utils_c.interp import interp_conserve_c
self.ytrace_beam, self.lam_beam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad)/self.grow,
y=(self.yc+self.ycenter-self.pad)/self.grow,
dx=(self.dx+self.xcenter*0+self.xoff)/self.grow,
beam=self.beam, fwcpos=self.fwcpos)
self.ytrace_beam *= self.grow
self.yoffset = yoffset
self.ytrace_beam += yoffset
# Integer trace
# Add/subtract 20 for handling int of small negative numbers
dyc = np.cast[int](self.ytrace_beam+20)-20+1
# Account for pixel centering of the trace
self.yfrac_beam = self.ytrace_beam - np.floor(self.ytrace_beam)
try:
self.flat_index = self.idx[dyc + self.x0[0], self.dxpix]
except IndexError:
# print 'Index Error', id, self.x0[0], self.xc, self.yc, self.beam, self.ytrace_beam.max(), self.ytrace_beam.min()
raise IndexError
# Trace, wavelength, sensitivity across entire 2D array
self.ytrace, self.lam = self.conf.get_beam_trace(
x=(self.xc+self.xcenter-self.pad)/self.grow,
y=(self.yc+self.ycenter-self.pad)/self.grow,
dx=(self.dxfull+self.xcenter+self.xoff)/self.grow,
beam=self.beam, fwcpos=self.fwcpos)
self.ytrace *= self.grow
self.ytrace += yoffset
# Reset sensitivity
ysens = self.lam_beam*0
so = np.argsort(self.lam_beam)
conf_sens = self.conf.sens[self.beam]
if self.MW_F99 is not None:
MWext = 10**(-0.4*(self.MW_F99(conf_sens['WAVELENGTH']*u.AA)))
else:
MWext = 1.
ysens[so] = interp_conserve_c(self.lam_beam[so],
conf_sens['WAVELENGTH'],
conf_sens['SENSITIVITY']*MWext,
integrate=1, left=0, right=0)
self.lam_sort = so
self.sensitivity_beam = ysens
# Full array
ysens = self.lam*0
so = np.argsort(self.lam)
ysens[so] = interp_conserve_c(self.lam[so],
conf_sens['WAVELENGTH'],
conf_sens['SENSITIVITY']*MWext,
integrate=1, left=0, right=0)
self.sensitivity = ysens
def compute_model(self, id=None, thumb=None, spectrum_1d=None,
in_place=True, modelf=None, scale=None, is_cgs=False,
apply_sensitivity=True, reset=True):
"""Compute a model 2D grism spectrum
Parameters
----------
id : int
Only consider pixels in the segmentation image (`self.seg`) with
values equal to `id`.
thumb : `~numpy.ndarray` with shape = `self.sh` or None
Optional direct image. If `None` then use `self.direct`.
spectrum_1d : [`~numpy.array`, `~numpy.array`] or None
Optional 1D template [wave, flux] to use for the 2D grism model.
If `None`, then implicitly assumes flat f_lambda spectrum.
in_place : bool
If True, put the 2D model in `self.model` and `self.modelf`,
otherwise put the output in a clean array or preformed `modelf`.
modelf : `~numpy.array` with shape = `self.sh_beam`
Preformed (flat) array to which the 2D model is added, if
`in_place` is False.
scale : float or None
Multiplicative factor to apply to the modeled spectrum.
is_cgs : bool
Units of `spectrum_1d` fluxes are f_lambda cgs.
Returns
-------
model : `~numpy.ndarray`
If `in_place` is False, returns the 2D model spectrum. Otherwise
the result is stored in `self.model` and `self.modelf`.
"""
from .utils_c import disperse
from .utils_c import interp
if id is None:
id = self.id
total_flux = self.total_flux
else:
self.id = id
total_flux = self.direct[self.seg == id].sum()
# Template (1D) spectrum interpolated onto the wavelength grid
if in_place:
self.spectrum_1d = spectrum_1d
if scale is None:
scale = self.scale
else:
self.scale = scale
if spectrum_1d is not None:
xspec, yspec = spectrum_1d
scale_spec = self.sensitivity_beam*0.
int_func = interp.interp_conserve_c
scale_spec[self.lam_sort] = int_func(self.lam_beam[self.lam_sort],
xspec, yspec)*scale
else:
scale_spec = scale
self.is_cgs = is_cgs
if is_cgs:
scale_spec /= total_flux
# Output data, fastest is to compute in place but doesn't zero-out
# previous result
if in_place:
self.modelf *= (1-reset)
modelf = self.modelf
else:
if modelf is None:
modelf = self.modelf*(1-reset)
# Optionally use a different direct image
if thumb is None:
thumb = self.direct
else:
if thumb.shape != self.sh:
print("""
Error: `thumb` must have the same dimensions as the direct image! ({0:d},{1:d})
""".format(self.sh[0], self.sh[1]))
return False
# Now compute the dispersed spectrum using the C helper
if apply_sensitivity:
sens_curve = self.sensitivity_beam
else:
sens_curve = 1.
nonz = (sens_curve*scale_spec) != 0
if (nonz.sum() > 0) & (id in self.seg_ids):
status = disperse.disperse_grism_object(thumb, self.seg,
np.float32(id),
self.flat_index[nonz],
self.yfrac_beam[nonz].astype(np.float64),
(sens_curve*scale_spec)[nonz].astype(np.float64),
modelf,
self.x0,
np.array(self.sh, dtype=np.int64),
self.x0,
np.array(self.sh_beam, dtype=np.int64))
#print('yyy PAM')
modelf /= self.PAM_value # = self.get_PAM_value()
if not in_place:
return modelf
else:
self.model = modelf.reshape(self.sh_beam)
return True
def init_optimal_profile(self, seg_ids=None):
"""Initilize optimal extraction profile
"""
if seg_ids is None:
ids = [self.id]
else:
ids = seg_ids
for i, id in enumerate(ids):
if hasattr(self, 'psf_params'):
m_i = self.compute_model_psf(id=id, in_place=False)
else:
m_i = self.compute_model(id=id, in_place=False)
#print('Add {0} to optimal profile'.format(id))
if i == 0:
m = m_i
else:
m += m_i
m = m.reshape(self.sh_beam)
m[m < 0] = 0
self.optimal_profile = m/m.sum(axis=0)
def optimal_extract(self, data, bin=0, ivar=1., weight=1.):
"""`Horne (1986) <http://adsabs.harvard.edu/abs/1986PASP...98..609H>`_ optimally-weighted 1D extraction
Parameters
----------
data : `~numpy.ndarray` with shape `self.sh_beam`
2D data to extract
bin : int, optional
Simple boxcar averaging of the output 1D spectrum
ivar : float or `~numpy.ndarray` with shape `self.sh_beam`
Inverse variance array or scalar float that multiplies the
optimal weights
weight : TBD
Returns
-------
wave, opt_flux, opt_rms : `~numpy.array`
`wave` is the wavelength of 1D array
`opt_flux` is the optimally-weighted 1D extraction
`opt_rms` is the weighted uncertainty of the 1D extraction
All are optionally binned in wavelength if `bin` > 1.
"""
import scipy.ndimage as nd
if not hasattr(self, 'optimal_profile'):
self.init_optimal_profile()
if data.shape != self.sh_beam:
print("""
`data` ({0},{1}) must have the same shape as the data array ({2},{3})
""".format(data.shape[0], data.shape[1], self.sh_beam[0],
self.sh_beam[1]))
return False
if not isinstance(ivar, float):
if ivar.shape != self.sh_beam:
print("""
`ivar` ({0},{1}) must have the same shape as the data array ({2},{3})
""".format(ivar.shape[0], ivar.shape[1], self.sh_beam[0],
self.sh_beam[1]))
return False
num = self.optimal_profile*data*ivar*weight
den = self.optimal_profile**2*ivar*weight
opt_flux = num.sum(axis=0)/den.sum(axis=0)
opt_var = 1./den.sum(axis=0)
if bin > 1:
kern = np.ones(bin, dtype=float)/bin
opt_flux = nd.convolve(opt_flux, kern)[bin // 2::bin]
opt_var = nd.convolve(opt_var, kern**2)[bin // 2::bin]
wave = self.lam[bin // 2::bin]
else:
wave = self.lam
opt_rms = np.sqrt(opt_var)
opt_rms[opt_var == 0] = 0
return wave, opt_flux, opt_rms
def trace_extract(self, data, r=0, bin=0, ivar=1., dy0=0):
"""Aperture extraction along the trace
Parameters
----------
data : array-like
Data array with dimenions equivalent to those of `self.model`
r : int
Radius of of the aperture to extract, in pixels. The extraction
will be performed from `-r` to `+r` pixels below and above the
central pixel of the trace.
bin : int, optional
Simple boxcar averaging of the output 1D spectrum
ivar : float or `~numpy.ndarray` with shape `self.sh_beam`
Inverse variance array or scalar float that multiplies the
optimal weights
dy0 : float
Central pixel to extract, relative to the central pixel of
the trace
Returns
-------
wave, opt_flux, opt_rms : `~numpy.array`
`wave` is the wavelength of 1D array
`opt_flux` is the 1D aperture extraction
`opt_rms` is the uncertainty of the 1D extraction, derived from
the sum of the pixel variances within the aperture
All are optionally binned in wavelength if `bin` > 1.
"""
dy = np.cast[int](np.round(self.ytrace+dy0))
aper = np.zeros_like(self.model)
y0 = self.sh_beam[0] // 2
for d in range(-r, r+1):
for i in range(self.sh_beam[1]):
aper[y0+d+dy[i]-1, i] = 1
var = 1./ivar
if not np.isscalar(ivar):
var[ivar == 0] = 0
opt_flux = np.sum(data*aper, axis=0)
opt_var = np.sum(var*aper, axis=0)
if bin > 1:
kern = np.ones(bin, dtype=float)/bin
opt_flux = nd.convolve(opt_flux, kern)[bin // 2::bin]
opt_var = nd.convolve(opt_var, kern**2)[bin // 2::bin]
wave = self.lam[bin // 2::bin]
else:
wave = self.lam
opt_rms = np.sqrt(opt_var)
return wave, opt_flux, opt_rms
def contained_in_full_array(self, full_array):
"""Check if subimage slice is fully contained within larger array
"""
sh = full_array.shape
if (self.sly_parent.start < 0) | (self.slx_parent.start < 0):
return False
if (self.sly_parent.stop >= sh[0]) | (self.slx_parent.stop >= sh[1]):
return False
return True
def add_to_full_image(self, data, full_array):
"""Add spectrum cutout back to the full array
`data` is *added* to `full_array` in place, so, for example, to
subtract `self.model` from the full array, call the function with
>>> self.add_to_full_image(-self.model, full_array)
Parameters
----------
data : `~numpy.ndarray` shape `self.sh_beam` (e.g., `self.model`)
Spectrum cutout
full_array : `~numpy.ndarray`
Full detector array, where the lower left pixel of `data` is given
by `origin`.
"""
if self.contained_in_full_array(full_array):
full_array[self.sly_parent, self.slx_parent] += data
else:
sh = full_array.shape
xpix = np.arange(self.sh_beam[1])
xpix += self.origin[1] + self.dxfull[0] + self.x0[1]
ypix = np.arange(self.sh_beam[0])
ypix += self.origin[0]
okx = (xpix >= 0) & (xpix < sh[1])
oky = (ypix >= 0) & (ypix < sh[1])
if (okx.sum() == 0) | (oky.sum() == 0):
return False
sly = slice(ypix[oky].min(), ypix[oky].max()+1)
slx = slice(xpix[okx].min(), xpix[okx].max()+1)
full_array[sly, slx] += data[oky, :][:, okx]
# print sly, self.sly_parent, slx, self.slx_parent
return True
def cutout_from_full_image(self, full_array):
"""Get beam-sized cutout from a full image
Parameters
----------
full_array : `~numpy.ndarray`
Array of the size of the parent array from which the cutout was
extracted. If possible, the function first tries the slices with
>>> sub = full_array[self.sly_parent, self.slx_parent]
and then computes smaller slices for cases where the beam spectrum
falls off the edge of the parent array.
Returns
-------
cutout : `~numpy.ndarray`
Array with dimensions of `self.model`.
"""
# print self.sly_parent, self.slx_parent, full_array.shape
if self.contained_in_full_array(full_array):
data = full_array[self.sly_parent, self.slx_parent]
else:
sh = full_array.shape
###
xpix = np.arange(self.sh_beam[1])
xpix += self.origin[1] + self.dxfull[0] + self.x0[1]
ypix = np.arange(self.sh_beam[0])
ypix += self.origin[0]
okx = (xpix >= 0) & (xpix < sh[1])
oky = (ypix >= 0) & (ypix < sh[1])
if (okx.sum() == 0) | (oky.sum() == 0):
return False
sly = slice(ypix[oky].min(), ypix[oky].max()+1)
slx = slice(xpix[okx].min(), xpix[okx].max()+1)
data = self.model*0.
data[oky, :][:, okx] += full_array[sly, slx]
return data
def twod_axis_labels(self, wscale=1.e4, limits=None, mpl_axis=None):
"""Set 2D wavelength (x) axis labels based on spectral parameters
Parameters
----------
wscale : float
Scale factor to divide from the wavelength units. The default
value of 1.e4 results in wavelength ticks in microns.
limits : None, list = `[x0, x1, dx]`
Will automatically use the whole wavelength range defined by the
spectrum. To change, specify `limits = [x0, x1, dx]` to
interpolate `self.beam.lam_beam` between x0*wscale and x1*wscale.
mpl_axis : `matplotlib.axes._axes.Axes`
Plotting axis to place the labels, e.g.,
>>> fig = plt.figure()
>>> mpl_axis = fig.add_subplot(111)
Returns
-------
Nothing if `mpl_axis` is supplied, else pixels and wavelengths of the
tick marks.
"""
xarr = np.arange(len(self.lam))
if limits:
xlam = np.arange(limits[0], limits[1], limits[2])
xpix = np.interp(xlam, self.lam/wscale, xarr)
else:
xlam = np.unique(np.cast[int](self.lam / 1.e4*10)/10.)
xpix = np.interp(xlam, self.lam/wscale, xarr)
if mpl_axis is None:
return xpix, xlam
else:
mpl_axis.set_xticks(xpix)
mpl_axis.set_xticklabels(xlam)
def twod_xlim(self, x0, x1=None, wscale=1.e4, mpl_axis=None):
"""Set wavelength (x) axis limits on a 2D spectrum
Parameters
----------
x0 : float or list/tuple of floats
minimum or (min,max) of the plot limits
x1 : float or None
max of the plot limits if x0 is a float
wscale : float
Scale factor to divide from the wavelength units. The default
value of 1.e4 results in wavelength ticks in microns.
mpl_axis : `matplotlib.axes._axes.Axes`
Plotting axis to place the labels.
Returns
-------
Nothing if `mpl_axis` is supplied else pixels the desired wavelength
limits.
"""
if isinstance(x0, list) | isinstance(x0, tuple):
x0, x1 = x0[0], x0[1]
xarr = np.arange(len(self.lam))
xpix = np.interp([x0, x1], self.lam/wscale, xarr)
if mpl_axis:
mpl_axis.set_xlim(xpix)
else:
return xpix
def x_init_epsf(self, flat_sensitivity=False, psf_params=None, psf_filter='F140W', yoff=0.0, skip=0.5, get_extended=False, seg_mask=True):
"""Initialize ePSF fitting for point sources
TBD
"""
import scipy.sparse
import scipy.ndimage
#print('SKIP: {0}'.format(skip))
EPSF = utils.EffectivePSF()
if psf_params is None:
self.psf_params = [self.total_flux, 0., 0.]
else:
self.psf_params = psf_params
if self.psf_params[0] is None:
self.psf_params[0] = self.total_flux # /photflam_list[psf_filter]
origin = np.array(self.origin) - np.array(self.pad)
self.psf_yoff = yoff
self.psf_filter = psf_filter
self.psf = EPSF.get_ePSF(self.psf_params, sci=self.psf_sci,
ivar=self.psf_ivar, origin=origin,
shape=self.sh, filter=psf_filter,
get_extended=get_extended)
#print('XXX', self.psf_params[0], self.psf.sum())
# self.psf_params[0] /= self.psf.sum()
# self.psf /= self.psf.sum()
# Center in detector coords
y0, x0 = np.array(self.sh)/2.-1
if len(self.psf_params) == 2:
xd = x0+self.psf_params[0] + origin[1]
yd = y0+self.psf_params[1] + origin[0]
else:
xd = x0+self.psf_params[1] + origin[1]
yd = y0+self.psf_params[2] + origin[0]
# Get wavelength array
psf_xy_lam = []
psf_ext_lam = []
for i, filter in enumerate(['F105W', 'F125W', 'F160W']):
psf_xy_lam.append(EPSF.get_at_position(x=xd, y=yd, filter=filter))
psf_ext_lam.append(EPSF.extended_epsf[filter])
filt_ix = np.arange(3)
filt_lam = np.array([1.0551, 1.2486, 1.5369])*1.e4
yp_beam, xp_beam = np.indices(self.sh_beam)
xarr = np.arange(0, self.lam_beam.shape[0], skip)
xarr = xarr[xarr <= self.lam_beam.shape[0]-1]
xbeam = np.arange(self.lam_beam.shape[0])*1.
#xbeam += 1.
# yoff = 0 #-0.15
psf_model = self.model*0.
A_psf = []
lam_psf = []
if len(self.psf_params) == 2:
lam_offset = self.psf_params[0] # self.sh[1]/2 - self.psf_params[1] - 1
else:
lam_offset = self.psf_params[1] # self.sh[1]/2 - self.psf_params[1] - 1
self.lam_offset = lam_offset
for xi in xarr:
yi = np.interp(xi, xbeam, self.ytrace_beam)
li = np.interp(xi, xbeam, self.lam_beam)
if len(self.psf_params) == 2:
dx = xp_beam-self.psf_params[0]-xi-x0
dy = yp_beam-self.psf_params[1]-yi+yoff-y0
else:
dx = xp_beam-self.psf_params[1]-xi-x0
dy = yp_beam-self.psf_params[2]-yi+yoff-y0
# wavelength-dependent
ii = np.interp(li, filt_lam, filt_ix, left=-1, right=10)
if ii == -1:
psf_xy_i = psf_xy_lam[0]*1
psf_ext_i = psf_ext_lam[0]*1
elif ii == 10:
psf_xy_i = psf_xy_lam[2]*1
psf_ext_i = psf_ext_lam[2]*1
else:
ni = int(ii)
f = 1-(li-filt_lam[ni])/(filt_lam[ni+1]-filt_lam[ni])
psf_xy_i = f*psf_xy_lam[ni] + (1-f)*psf_xy_lam[ni+1]
psf_ext_i = f*psf_ext_lam[ni] + (1-f)*psf_ext_lam[ni+1]
if not get_extended:
psf_ext_i = None
psf = EPSF.eval_ePSF(psf_xy_i, dx, dy, extended_data=psf_ext_i)
if len(self.psf_params) > 2:
psf *= self.psf_params[0]
#print(xi, psf.sum())
if seg_mask:
segm = nd.maximum_filter((self.seg == self.id)*1., size=7)
#yps, xps = np.indices(self.sh)
seg_i = nd.map_coordinates(segm, np.array([dx+x0, dy+y0]), order=1, mode='constant', cval=0.0, prefilter=True) > 0
else:
seg_i = 1
A_psf.append((psf*seg_i).flatten())
lam_psf.append(li)
# Sensitivity
self.lam_psf = np.array(lam_psf)
#photflam = photflam_list[psf_filter]
photflam = 1
if flat_sensitivity:
psf_sensitivity = np.abs(np.gradient(self.lam_psf))*photflam
else:
sens = self.conf.sens[self.beam]
# so = np.argsort(self.lam_psf)
# s_i = interp.interp_conserve_c(self.lam_psf[so], sens['WAVELENGTH'], sens['SENSITIVITY'], integrate=1)
# psf_sensitivity = s_i*0.
# psf_sensitivity[so] = s_i
if self.MW_F99 is not None:
MWext = 10**(-0.4*(self.MW_F99(sens['WAVELENGTH']*u.AA)))
else:
MWext = 1.
psf_sensitivity = self.get_psf_sensitivity(sens['WAVELENGTH'], sens['SENSITIVITY']*MWext)
self.psf_sensitivity = psf_sensitivity
self.A_psf = scipy.sparse.csr_matrix(np.array(A_psf).T)
# self.init_extended_epsf()
self.PAM_value = self.get_PAM_value()
self.psf_scale_to_data = 1.
self.psf_renorm = 1.
self.renormalize_epsf_model()
self.init_optimal_profile()
def get_psf_sensitivity(self, wave, sensitivity):
"""
Integrate the sensitivity curve to the wavelengths for the
PSF model
"""
from .utils_c import interp
so = np.argsort(self.lam_psf)
s_i = interp.interp_conserve_c(self.lam_psf[so], wave, sensitivity, integrate=1)
psf_sensitivity = s_i*0.
psf_sensitivity[so] = s_i
return psf_sensitivity
def renormalize_epsf_model(self, spectrum_1d=None, verbose=False):
"""
Ensure normalization correct
"""
from .utils_c import interp
if not hasattr(self, 'A_psf'):
print('ePSF not initialized')
return False
if spectrum_1d is None:
dl = 0.1
flat_x = np.arange(self.lam.min()-10, self.lam.max()+10, dl)
flat_y = flat_x*0.+1.e-17
spectrum_1d = [flat_x, flat_y]
tab = self.conf.sens[self.beam]
if self.MW_F99 is not None:
MWext = 10**(-0.4*(self.MW_F99(tab['WAVELENGTH']*u.AA)))
else:
MWext = 1.
sens_i = interp.interp_conserve_c(spectrum_1d[0], tab['WAVELENGTH'], tab['SENSITIVITY']*MWext, integrate=1, left=0, right=0)
total_sens = np.trapz(spectrum_1d[1]*sens_i/np.gradient(spectrum_1d[0]), spectrum_1d[0])
m = self.compute_model_psf(spectrum_1d=spectrum_1d, is_cgs=True, in_place=False).reshape(self.sh_beam)
#m2 = self.compute_model(spectrum_1d=[flat_x, flat_y], is_cgs=True, in_place=False).reshape(self.sh_beam)
renorm = total_sens / m.sum()
self.psf_renorm = renorm
# Scale model to data, depends on Pixel Area Map and PSF normalization
scale_to_data = self.PAM_value # * (self.psf_params[0]/0.975)
self.psf_scale_to_data = scale_to_data
renorm /= scale_to_data # renorm PSF
if verbose:
print('Renorm ePSF model: {0:0.3f}'.format(renorm))
self.A_psf *= renorm
def get_PAM_value(self, verbose=False):
"""
Apply Pixel Area Map correction to WFC3 effective PSF model
http://www.stsci.edu/hst/wfc3/pam/pixel_area_maps
"""
confp = self.conf.conf
if ('INSTRUMENT' in confp) & ('CAMERA' in confp):
instr = '{0}-{1}'.format(confp['INSTRUMENT'], confp['CAMERA'])
if instr != 'WFC3-IR':
return 1
else:
return 1
try:
with pyfits.open(os.getenv('iref')+'ir_wfc3_map.fits') as pam:
pam_data = pam[1].data
pam_value = pam_data[int(self.yc-self.pad), int(self.xc-self.pad)]
pam.close()
except:
pam_value = 1
if verbose:
msg = 'PAM correction at x={0}, y={1}: {2:.3f}'
print(msg.format(self.xc-self.pad, self.yc-self.pad, pam_value))
return pam_value
def init_extended_epsf(self):
"""
Hacky code for adding extended component of the EPSFs
"""
ext_file = os.path.join(GRIZLI_PATH, 'CONF',
'ePSF_extended_splines.npy')
if not os.path.exists(ext_file):
return False
bg_splines = np.load(ext_file, allow_pickle=True)[0]
spline_waves = np.array(list(bg_splines.keys()))
spline_waves.sort()
spl_ix = np.arange(len(spline_waves))
yarr = np.arange(self.sh_beam[0]) - self.sh_beam[0]/2.+1
dy = self.psf_params[2]
spl_data = self.model * 0.
for i in range(self.sh_beam[1]):
dy_i = dy + self.ytrace[i]
x_i = np.interp(self.lam[i], spline_waves, spl_ix)
if (x_i == 0) | (x_i == len(bg_splines)-1):
spl_data[:, i] = bg_splines[spline_waves[int(x_i)]](yarr-dy_i)
else:
f = x_i-int(x_i)
sp = bg_splines[spline_waves[int(x_i)]](yarr-dy_i)*(1-f)
sp += bg_splines[spline_waves[int(x_i)+1]](yarr-dy_i)*f
spl_data[:, i] = sp
self.ext_psf_data = np.maximum(spl_data, 0)
def compute_model_psf(self, id=None, spectrum_1d=None, in_place=True, is_cgs=False, apply_sensitivity=True):
"""
Compute model with PSF morphology template
"""
from .utils_c import interp
if spectrum_1d is None:
#modelf = np.array(self.A_psf.sum(axis=1)).flatten()
#model = model.reshape(self.sh_beam)
coeffs = np.ones(self.A_psf.shape[1])
if not is_cgs:
coeffs *= self.total_flux
else:
dx = np.diff(self.lam_psf)[0]
if dx < 0:
coeffs = interp.interp_conserve_c(self.lam_psf[::-1],
spectrum_1d[0],
spectrum_1d[1])[::-1]
else:
coeffs = interp.interp_conserve_c(self.lam_psf,
spectrum_1d[0],
spectrum_1d[1])
if not is_cgs:
coeffs *= self.total_flux
modelf = self.A_psf.dot(coeffs*self.psf_sensitivity).astype(np.float32)
model = modelf.reshape(self.sh_beam)
# if hasattr(self, 'ext_psf_data'):
# model += self.ext_psf_data*model.sum(axis=0)
# modelf = model.flatten()
# model = modelf.reshape(self.sh_beam)
if in_place:
self.spectrum_1d = spectrum_1d
self.is_cgs = is_cgs
self.modelf = modelf # .flatten()
self.model = model
#self.modelf = self.model.flatten()
return True
else:
return modelf # .flatten()
class ImageData(object):
"""Container for image data with WCS, etc."""
def __init__(self, sci=None, err=None, dq=None,
header=None, wcs=None, photflam=1., photplam=1.,
origin=[0, 0], pad=0, process_jwst_header=True,
instrument='WFC3', filter='G141', pupil=None, module=None,
hdulist=None,
sci_extn=1, fwcpos=None):
"""
Parameters
----------
sci : `~numpy.ndarray`
Science data
err, dq : `~numpy.ndarray` or None
Uncertainty and DQ data. Defaults to zero if None
header : `~astropy.io.fits.Header`
Associated header with `data` that contains WCS information
wcs : `~astropy.wcs.WCS` or None
WCS solution to use. If `None` will derive from the `header`.
photflam : float
Multiplicative conversion factor to scale `data` to set units
to f_lambda flux density. If data is grism spectra, then use
photflam=1
origin : [int, int]
Origin of lower left pixel in detector coordinates
pad : int
Padding to apply to the image dimensions
process_jwst_header : bool
If the image is detected as coming from JWST NIRISS or NIRCAM,
generate the necessary header WCS keywords
instrument : str
Instrument where the image came from
filter : str
Filter from the image header. For WFC3 and NIRISS this is the
dispersing element
pupil : str
Pupil from the image header (JWST instruments). For NIRISS this
is the blocking filter and for NIRCAM this is the dispersing
element
module : str
Instrument module for NIRCAM ('A' or 'B')
hdulist : `~astropy.io.fits.HDUList`, optional
If specified, read `sci`, `err`, `dq` from the HDU list from a
FITS file, e.g., WFC3 FLT.
sci_extn : int
Science EXTNAME to read from the HDUList, for example,
`sci` = hdulist['SCI',`sci_extn`].
fwcpos : float
Filter wheel encoder position (NIRISS)
Attributes
----------
parent_file : str
Filename of the parent from which the data were extracted
data : dict
Dictionary to store pixel data, with keys 'SCI', 'DQ', and 'ERR'.
If a reference image has been supplied and processed, will also
have an entry 'REF'. The data arrays can also be addressed with
the `__getitem__` method, i.e.,
>>> self = ImageData(...)
>>> print np.median(self['SCI'])
pad : int
Additional padding around the nominal image dimensions
wcs : `~astropy.wcs.WCS`
WCS of the data array
header : `~astropy.io.fits.Header`
FITS header
filter, instrument, photflam, photplam, APZP : str, float
Parameters taken from the header
ref_file, ref_photlam, ref_photplam, ref_filter : str, float
Corresponding parameters for the reference image, if necessary.
"""
import copy
# Easy way, get everything from an image HDU list
if isinstance(hdulist, pyfits.HDUList):
if ('REF', sci_extn) in hdulist:
ref_h = hdulist['REF', sci_extn].header
ref_data = hdulist['REF', sci_extn].data/ref_h['PHOTFLAM']
ref_data = np.cast[np.float32](ref_data)
ref_file = ref_h['REF_FILE']
ref_photflam = 1.
ref_photplam = ref_h['PHOTPLAM']
ref_filter = ref_h['FILTER']
else:
ref_data = None
if ('SCI', sci_extn) in hdulist:
sci = np.cast[np.float32](hdulist['SCI', sci_extn].data)
err = np.cast[np.float32](hdulist['ERR', sci_extn].data)
dq = np.cast[np.int16](hdulist['DQ', sci_extn].data)
base_extn = ('SCI', sci_extn)
else:
if ref_data is None:
raise KeyError('No SCI or REF extensions found')
# Doesn't have SCI, get from ref
sci = err = ref_data*0.+1
dq = np.zeros(sci.shape, dtype=np.int16)
base_extn = ('REF', sci_extn)
if 'ORIGINX' in hdulist[base_extn].header:
h0 = hdulist[base_extn].header
origin = [h0['ORIGINY'], h0['ORIGINX']]
else:
origin = [0, 0]
self.sci_extn = sci_extn
header = hdulist[base_extn].header.copy()
if 'PARENT' in header:
self.parent_file = header['PARENT']
else:
self.parent_file = hdulist.filename()
if 'CPDIS1' in header:
if 'Lookup' in header['CPDIS1']:
self.wcs_is_lookup = True
else:
self.wcs_is_lookup = False
else:
self.wcs_is_lookup = False
status = False
for ext in [base_extn, 0]:
h = hdulist[ext].header
if 'INSTRUME' in h:
status = True
break
if not status:
msg = ('Couldn\'t find \'INSTRUME\' keyword in the headers' +
' of extensions 0 or (SCI,{0:d})'.format(sci_extn))
raise KeyError(msg)
instrument = h['INSTRUME']
filter = utils.get_hst_filter(h)
if 'PUPIL' in h:
pupil = h['PUPIL']
if 'MODULE' in h:
module = h['MODULE']
else:
module = None
if 'PHOTPLAM' in h:
photplam = h['PHOTPLAM']
elif filter in photplam_list:
photplam = photplam_list[filter]
else:
photplam = 1
if 'PHOTFLAM' in h:
photflam = h['PHOTFLAM']
elif filter in photflam_list:
photflam = photflam_list[filter]
elif 'PHOTUJA2' in header:
# JWST calibrated products
per_pix = header['PIXAR_SR']
if header['BUNIT'].strip() == 'MJy/sr':
photfnu = per_pix*1e6
else:
photfnu = 1./(header['PHOTMJSR']*1.e6)*per_pix
photflam = photfnu/1.e23*3.e18/photplam**2
else:
photflam = 1.
# For NIRISS
if 'FWCPOS' in h:
fwcpos = h['FWCPOS']
self.mdrizsky = 0.
if 'MDRIZSKY' in header:
#sci -= header['MDRIZSKY']
self.mdrizsky = header['MDRIZSKY']
# ACS bunit
#self.exptime = 1.
if 'EXPTIME' in hdulist[0].header:
self.exptime = hdulist[0].header['EXPTIME']
else:
self.exptime = hdulist[0].header['EFFEXPTM']
# if 'BUNIT' in header:
# if header['BUNIT'] == 'ELECTRONS':
# self.exptime = hdulist[0].header['EXPTIME']
# # sci /= self.exptime
# # err /= self.exptime
sci = (sci-self.mdrizsky)
if 'BUNIT' in header:
if header['BUNIT'] == 'ELECTRONS':
sci /= self.exptime
err /= self.exptime
if filter.startswith('G'):
photflam = 1
if (instrument == 'NIRCAM') & (pupil is not None):
if pupil.startswith('G'):
photflam = 1
if 'PAD' in header:
pad = header['PAD']
self.grow = 1
if 'GROW' in header:
self.grow = header['GROW']
else:
if sci is None:
sci = np.zeros((1014, 1014))
self.parent_file = 'Unknown'
self.sci_extn = None
self.grow = 1
ref_data = None
if 'EXPTIME' in header:
self.exptime = header['EXPTIME']
else:
self.exptime = 1.
if 'MDRIZSKY' in header:
self.mdrizsky = header['MDRIZSKY']
else:
self.mdrizsky = 0.
if 'CPDIS1' in header:
if 'Lookup' in header['CPDIS1']:
self.wcs_is_lookup = True
else:
self.wcs_is_lookup = False
else:
self.wcs_is_lookup = False
self.is_slice = False
# Array parameters
self.pad = pad
self.origin = origin
self.fwcpos = fwcpos # NIRISS
self.MW_EBV = 0.
self.data = OrderedDict()
self.data['SCI'] = sci*photflam
self.sh = np.array(self.data['SCI'].shape)
# Header-like parameters
self.filter = filter
self.pupil = pupil
if (instrument == 'NIRCAM'):
# Fallback if module not specified
if module is None:
self.module = 'A'
else:
self.module = module
else:
self.module = module
self.instrument = instrument
self.header = header
if 'ISCUTOUT' in self.header:
self.is_slice = self.header['ISCUTOUT']
self.header['EXPTIME'] = self.exptime
self.photflam = photflam
self.photplam = photplam
self.ABZP = (0*np.log10(self.photflam) - 21.10 -
5*np.log10(self.photplam) + 18.6921)
self.thumb_extension = 'SCI'
if err is None:
self.data['ERR'] = np.zeros_like(self.data['SCI'])
else:
self.data['ERR'] = err*photflam
if self.data['ERR'].shape != tuple(self.sh):
raise ValueError('err and sci arrays have different shapes!')
if dq is None:
self.data['DQ'] = np.zeros_like(self.data['SCI'], dtype=np.int16)
else:
self.data['DQ'] = dq
if self.data['DQ'].shape != tuple(self.sh):
raise ValueError('err and dq arrays have different shapes!')
if ref_data is None:
self.data['REF'] = None
self.ref_file = None
self.ref_photflam = None
self.ref_photplam = None
self.ref_filter = None
else:
self.data['REF'] = ref_data
self.ref_file = ref_file
self.ref_photflam = ref_photflam
self.ref_photplam = ref_photplam
self.ref_filter = ref_filter
self.wcs = None
if (instrument in ['NIRISS', 'NIRCAM']) & (~self.is_slice):
if process_jwst_header:
self.update_jwst_wcsheader(hdulist)
if self.header is not None:
if wcs is None:
self.get_wcs()
else:
self.wcs = wcs.copy()
if not hasattr(self.wcs, 'pixel_shape'):
self.wcs.pixel_shape = self.wcs._naxis1, self.wcs._naxis2
else:
self.header = pyfits.Header()
# Detector chip
if 'CCDCHIP' in self.header:
self.ccdchip = self.header['CCDCHIP']
else:
self.ccdchip = 1
# Galactic extinction
if 'MW_EBV' in self.header:
self.MW_EBV = self.header['MW_EBV']
else:
self.MW_EBV = 0.
def unset_dq(self):
"""Flip OK data quality bits using utils.unset_dq_bits
OK bits are defined as
>>> okbits_instrument = {'WFC3': 32+64+512, # blob OK
'NIRISS': 0,
'WFIRST': 0,
'WFI': 0}
"""
okbits_instrument = {'WFC3': 32+64+512, # blob OK
'NIRISS': 0,
'NIRCAM': 0,
'WFIRST': 0,
'WFI': 0}
if self.instrument not in okbits_instrument:
okbits = 1
else:
okbits = okbits_instrument[self.instrument]
self.data['DQ'] = utils.unset_dq_bits(self.data['DQ'], okbits=okbits)
def flag_negative(self, sigma=-3):
"""Flag negative data values with dq=4
Parameters
----------
sigma : float
Threshold for setting bad data
Returns
-------
n_negative : int
Number of flagged negative pixels
If `self.data['ERR']` is zeros, do nothing.
"""
if self.data['ERR'].max() == 0:
return 0
bad = self.data['SCI'] < sigma*self.data['ERR']
self.data['DQ'][bad] |= 4
return bad.sum()
def update_jwst_wcsheader(self, hdulist, force=False):
"""
For now generate an approximate SIP header for NIRISS/NIRCam
Parameters
----------
hdulist : `~astropy.io.fits.HDUList`
FITS HDU list
force : bool
"""
import jwst
from . import jwst as _jwst
datamodel = _jwst.img_with_wcs(hdulist)
if (jwst.__version__ < '1.3.2') | force:
# Need to compute own transformed header
sip_header = _jwst.model_wcs_header(datamodel, get_sip=True)
else:
sip_header = utils.to_header(datamodel.wcs)
for k in sip_header:
self.header[k] = sip_header[k]
# Remove PC
for i in [1, 2]:
for j in [1, 2]:
k = 'PC{0}_{1}'.format(i, j)
if k in self.header:
self.header.remove(k)
def get_wcs(self, pc2cd=False):
"""Get WCS from header"""
import numpy.linalg
import stwcs
if self.wcs_is_lookup:
if 'CCDCHIP' in self.header:
ext = {1: 2, 2: 1}[self.header['CCDCHIP']]
else:
ext = self.header['EXTVER']
if os.path.exists(self.parent_file):
with pyfits.open(self.parent_file) as fobj:
wcs = stwcs.wcsutil.hstwcs.HSTWCS(fobj=fobj,
ext=('SCI', ext))
if self.pad > 0:
wcs = self.add_padding_to_wcs(wcs, pad=self.pad)
else:
# Get WCS from a stripped wcs.fits file (from self.save_wcs)
# already padded.
wcsfile = self.parent_file.replace('.fits',
'.{0:02d}.wcs.fits'.format(ext))
with pyfits.open(wcsfile) as fobj:
fh = fobj[0].header
if fh['NAXIS'] == 0:
fh['NAXIS'] = 2
fh['NAXIS1'] = int(fh['CRPIX1']*2)
fh['NAXIS2'] = int(fh['CRPIX2']*2)
wcs = stwcs.wcsutil.hstwcs.HSTWCS(fobj=fobj, ext=0)
#print('XXX WCS',wcs)
# Object is a cutout
if self.is_slice:
slx = slice(self.origin[1], self.origin[1]+self.sh[1])
sly = slice(self.origin[0], self.origin[0]+self.sh[0])
wcs = self.get_slice_wcs(wcs, slx=slx, sly=sly)
else:
fobj = None
wcs = pywcs.WCS(self.header, relax=True, fobj=fobj)
if not hasattr(wcs, 'pscale'):
wcs.pscale = utils.get_wcs_pscale(wcs)
self.wcs = wcs
if not hasattr(self.wcs, 'pixel_shape'):
self.wcs.pixel_shape = self.wcs._naxis1, self.wcs._naxis2
@staticmethod
def add_padding_to_wcs(wcs_in, pad=200):
"""Pad the appropriate WCS keywords"""
wcs = wcs_in.deepcopy()
is_new = True
for attr in ['naxis1', '_naxis1', 'naxis2', '_naxis2']:
if hasattr(wcs, attr):
is_new = False
value = wcs.__getattribute__(attr)
if value is not None:
wcs.__setattr__(attr, value+2*pad)
# Handle changing astropy.wcs.WCS attributes
if is_new:
for i in range(len(wcs._naxis)):
wcs._naxis[i] += 2*pad
wcs.naxis1, wcs.naxis2 = wcs._naxis
else:
wcs.naxis1 = wcs._naxis1
wcs.naxis2 = wcs._naxis2
wcs.wcs.crpix[0] += pad
wcs.wcs.crpix[1] += pad
# Pad CRPIX for SIP
for wcs_ext in [wcs.sip]:
if wcs_ext is not None:
wcs_ext.crpix[0] += pad
wcs_ext.crpix[1] += pad
# Pad CRVAL for Lookup Table, if necessary (e.g., ACS)
for wcs_ext in [wcs.cpdis1, wcs.cpdis2, wcs.det2im1, wcs.det2im2]:
if wcs_ext is not None:
wcs_ext.crval[0] += pad
wcs_ext.crval[1] += pad
return wcs
def add_padding(self, pad=200):
"""Pad the data array and update WCS keywords"""
# Update data array
new_sh = self.sh + 2*pad
for key in ['SCI', 'ERR', 'DQ', 'REF']:
if key not in self.data:
continue
else:
if self.data[key] is None:
continue
data = self.data[key]
new_data = np.zeros(new_sh, dtype=data.dtype)
new_data[pad:-pad, pad:-pad] += data
self.data[key] = new_data
self.sh = new_sh
self.pad += pad
# Padded image dimensions
self.header['NAXIS1'] += 2*pad
self.header['NAXIS2'] += 2*pad
self.header['CRPIX1'] += pad
self.header['CRPIX2'] += pad
# Add padding to WCS
self.wcs = self.add_padding_to_wcs(self.wcs, pad=pad)
if not hasattr(self.wcs, 'pixel_shape'):
self.wcs.pixel_shape = self.wcs._naxis1, self.wcs._naxis2
def shrink_large_hdu(self, hdu=None, extra=100, verbose=False):
"""Shrink large image mosaic to speed up blotting
Parameters
----------
hdu : `~astropy.io.fits.ImageHDU`
Input reference HDU
extra : int
Extra border to put around `self.data` WCS to ensure the reference
image is large enough to encompass the distorted image
Returns
-------
new_hdu : `~astropy.io.fits.ImageHDU`
Image clipped to encompass `self.data['SCI']` + margin of `extra`
pixels.
Make a cutout of the larger reference image around the desired FLT
image to make blotting faster for large reference images.
"""
ref_wcs = pywcs.WCS(hdu.header)
# Borders of the flt frame
naxis = [self.header['NAXIS1'], self.header['NAXIS2']]
xflt = [-extra, naxis[0]+extra, naxis[0]+extra, -extra]
yflt = [-extra, -extra, naxis[1]+extra, naxis[1]+extra]
raflt, deflt = self.wcs.all_pix2world(xflt, yflt, 0)
xref, yref = np.cast[int](ref_wcs.all_world2pix(raflt, deflt, 0))
ref_naxis = [hdu.header['NAXIS1'], hdu.header['NAXIS2']]
# Slices of the reference image
xmi = np.maximum(0, xref.min())
xma = np.minimum(ref_naxis[0], xref.max())
slx = slice(xmi, xma)
ymi = np.maximum(0, yref.min())
yma = np.minimum(ref_naxis[1], yref.max())
sly = slice(ymi, yma)
if ((xref.min() < 0) | (yref.min() < 0) |
(xref.max() > ref_naxis[0]) | (yref.max() > ref_naxis[1])):
if verbose:
msg = 'Image cutout: x={0}, y={1} [Out of range]'
print(msg.format(slx, sly))
return hdu
else:
if verbose:
print('Image cutout: x={0}, y={1}'.format(slx, sly))
# Sliced subimage
slice_wcs = ref_wcs.slice((sly, slx))
slice_header = hdu.header.copy()
hwcs = slice_wcs.to_header(relax=True)
for k in hwcs.keys():
if not k.startswith('PC'):
slice_header[k] = hwcs[k]
slice_data = hdu.data[sly, slx]*1
new_hdu = pyfits.ImageHDU(data=slice_data, header=slice_header)
return new_hdu
def expand_hdu(self, hdu=None, verbose=True):
"""TBD
"""
ref_wcs = pywcs.WCS(hdu.header)
# Borders of the flt frame
naxis = [self.header['NAXIS1'], self.header['NAXIS2']]
xflt = [-self.pad, naxis[0]+self.pad, naxis[0]+self.pad, -self.pad]
yflt = [-self.pad, -self.pad, naxis[1]+self.pad, naxis[1]+self.pad]
raflt, deflt = self.wcs.all_pix2world(xflt, yflt, 0)
xref, yref = np.cast[int](ref_wcs.all_world2pix(raflt, deflt, 0))
ref_naxis = [hdu.header['NAXIS1'], hdu.header['NAXIS2']]
pad_min = np.minimum(xref.min(), yref.min())
pad_max = np.maximum((xref-ref_naxis[0]).max(), (yref-ref_naxis[1]).max())
if (pad_min > 0) & (pad_max < 0):
# do nothing
return hdu
pad = np.maximum(np.abs(pad_min), pad_max) + 50
if verbose:
print('{0} / Pad ref HDU with {1:d} pixels'.format(self.parent_file, pad))
# Update data array
sh = hdu.data.shape
new_sh = np.array(sh) + 2*pad
new_data = np.zeros(new_sh, dtype=hdu.data.dtype)
new_data[pad:-pad, pad:-pad] += hdu.data
header = hdu.header.copy()
# Padded image dimensions
header['NAXIS1'] += 2*pad
header['NAXIS2'] += 2*pad
# Add padding to WCS
header['CRPIX1'] += pad
header['CRPIX2'] += pad
new_hdu = pyfits.ImageHDU(data=new_data, header=header)
return new_hdu
def blot_from_hdu(self, hdu=None, segmentation=False, grow=3,
interp='nearest'):
"""Blot a rectified reference image to detector frame
Parameters
----------
hdu : `~astropy.io.fits.ImageHDU`
HDU of the reference image
segmentation : bool, False
If True, treat the reference image as a segmentation image and
preserve the integer values in the blotting.
If specified as number > 1, then use `~grizli.utils.blot_nearest_exact`
rather than a hacky pixel area ratio method to blot integer
segmentation maps.
grow : int, default=3
Number of pixels to dilate the segmentation regions
interp : str,
Form of interpolation to use when blotting float image pixels.
Valid options: {'nearest', 'linear', 'poly3', 'poly5' (default), 'spline3', 'sinc'}
Returns
-------
blotted : `np.ndarray`
Blotted array with the same shape and WCS as `self.data['SCI']`.
"""
import astropy.wcs
from drizzlepac import astrodrizzle
#ref = pyfits.open(refimage)
if hdu.data.dtype.type != np.float32:
#hdu.data = np.cast[np.float32](hdu.data)
refdata = np.cast[np.float32](hdu.data)
else:
refdata = hdu.data
if 'ORIENTAT' in hdu.header.keys():
hdu.header.remove('ORIENTAT')
if segmentation:
seg_ones = np.cast[np.float32](refdata > 0)-1
ref_wcs = pywcs.WCS(hdu.header, relax=True)
flt_wcs = self.wcs.copy()
# Fix some wcs attributes that might not be set correctly
for wcs in [ref_wcs, flt_wcs]:
if hasattr(wcs, '_naxis1'):
wcs.naxis1 = wcs._naxis1
wcs.naxis2 = wcs._naxis2
else:
wcs._naxis1, wcs._naxis2 = wcs._naxis
if (not hasattr(wcs.wcs, 'cd')) & hasattr(wcs.wcs, 'pc'):
wcs.wcs.cd = wcs.wcs.pc
if hasattr(wcs, 'idcscale'):
if wcs.idcscale is None:
wcs.idcscale = np.mean(np.sqrt(np.sum(wcs.wcs.cd**2, axis=0))*3600.) # np.sqrt(np.sum(wcs.wcs.cd[0,:]**2))*3600.
else:
#wcs.idcscale = np.sqrt(np.sum(wcs.wcs.cd[0,:]**2))*3600.
wcs.idcscale = np.mean(np.sqrt(np.sum(wcs.wcs.cd**2, axis=0))*3600.) # np.sqrt(np.sum(wcs.wcs.cd[0,:]**2))*3600.
wcs.pscale = utils.get_wcs_pscale(wcs)
if segmentation:
# Handle segmentation images a bit differently to preserve
# integers.
# +1 here is a hack for some memory issues
if segmentation*1 == 1:
seg_interp = 'nearest'
blotted_ones = astrodrizzle.ablot.do_blot(seg_ones+1, ref_wcs,
flt_wcs, 1, coeffs=True,
interp=seg_interp,
sinscl=1.0, stepsize=10, wcsmap=None)
blotted_seg = astrodrizzle.ablot.do_blot(refdata*1., ref_wcs,
flt_wcs, 1, coeffs=True,
interp=seg_interp,
sinscl=1.0, stepsize=10, wcsmap=None)
blotted_ones[blotted_ones == 0] = 1
#pixel_ratio = (flt_wcs.idcscale / ref_wcs.idcscale)**2
#in_seg = np.abs(blotted_ones - pixel_ratio) < 1.e-2
ratio = np.round(blotted_seg/blotted_ones)
seg = nd.maximum_filter(ratio, size=grow,
mode='constant', cval=0)
ratio[ratio == 0] = seg[ratio == 0]
blotted = ratio
else:
blotted = utils.blot_nearest_exact(refdata, ref_wcs, flt_wcs,
verbose=True, stepsize=-1,
scale_by_pixel_area=False,
wcs_mask=True,
fill_value=0)
else:
# Floating point data
blotted = astrodrizzle.ablot.do_blot(refdata, ref_wcs, flt_wcs, 1,
coeffs=True, interp=interp, sinscl=1.0,
stepsize=10, wcsmap=None)
return blotted
@staticmethod
def get_slice_wcs(wcs, slx=slice(480, 520), sly=slice(480, 520)):
"""Get slice of a WCS including higher orders like SIP and DET2IM
The normal `~astropy.wcs.wcs.WCS` `slice` method doesn't apply the
slice to all of the necessary keywords. For example, SIP WCS also
has a `CRPIX` reference pixel that needs to be offset along with
the main `CRPIX`.
Parameters
----------
slx, sly : slice
Slices in x and y dimensions to extract
"""
NX = slx.stop - slx.start
NY = sly.stop - sly.start
slice_wcs = wcs.slice((sly, slx))
if hasattr(slice_wcs, '_naxis1'):
slice_wcs.naxis1 = slice_wcs._naxis1 = NX
slice_wcs.naxis2 = slice_wcs._naxis2 = NY
else:
slice_wcs._naxis = [NX, NY]
slice_wcs._naxis1, slice_wcs._naxis2 = NX, NY
if hasattr(slice_wcs, 'sip'):
if slice_wcs.sip is not None:
for c in [0, 1]:
slice_wcs.sip.crpix[c] = slice_wcs.wcs.crpix[c]
ACS_CRPIX = [4096/2, 2048/2] # ACS
dx_crpix = slice_wcs.wcs.crpix[0] - ACS_CRPIX[0]
dy_crpix = slice_wcs.wcs.crpix[1] - ACS_CRPIX[1]
for ext in ['cpdis1', 'cpdis2', 'det2im1', 'det2im2']:
if hasattr(slice_wcs, ext):
wcs_ext = slice_wcs.__getattribute__(ext)
if wcs_ext is not None:
wcs_ext.crval[0] += dx_crpix
wcs_ext.crval[1] += dy_crpix
slice_wcs.__setattr__(ext, wcs_ext)
return slice_wcs
def get_slice(self, slx=slice(480, 520), sly=slice(480, 520),
get_slice_header=True):
"""Return cutout version of the `ImageData` object
Parameters
----------
slx, sly : slice
Slices in x and y dimensions to extract
get_slice_header : bool
Compute the full header of the slice. This takes a bit of time
and isn't necessary in all cases so can be omitted if only the
sliced data are of interest and the header isn't needed.
Returns
-------
slice_obj : `ImageData`
New `ImageData` object of the sliced subregion
"""
origin = [sly.start, slx.start]
NX = slx.stop - slx.start
NY = sly.stop - sly.start
# Test dimensions
if (origin[0] < 0) | (origin[0]+NY > self.sh[0]):
raise ValueError('Out of range in y')
if (origin[1] < 0) | (origin[1]+NX > self.sh[1]):
raise ValueError('Out of range in x')
# Sliced subimage
# sly = slice(origin[0], origin[0]+N)
# slx = slice(origin[1], origin[1]+N)
slice_origin = [self.origin[i] + origin[i] for i in range(2)]
slice_wcs = self.get_slice_wcs(self.wcs, slx=slx, sly=sly)
# slice_wcs = self.wcs.slice((sly, slx))
#slice_wcs.naxis1 = slice_wcs._naxis1 = NX
#slice_wcs.naxis2 = slice_wcs._naxis2 = NY
# Getting the full header can be slow as there appears to
# be substantial overhead with header.copy() and wcs.to_header()
if get_slice_header:
slice_header = self.header.copy()
slice_header['NAXIS1'] = NX
slice_header['NAXIS2'] = NY
# Sliced WCS keywords
hwcs = slice_wcs.to_header(relax=True)
for k in hwcs:
if not k.startswith('PC'):
slice_header[k] = hwcs[k]
else:
cd = k.replace('PC', 'CD')
slice_header[cd] = hwcs[k]
else:
slice_header = pyfits.Header()
# Generate new object
slice_obj = ImageData(sci=self.data['SCI'][sly, slx]/self.photflam,
err=self.data['ERR'][sly, slx]/self.photflam,
dq=self.data['DQ'][sly, slx]*1,
header=slice_header, wcs=slice_wcs,
photflam=self.photflam, photplam=self.photplam,
origin=slice_origin, instrument=self.instrument,
filter=self.filter, pupil=self.pupil,
process_jwst_header=False)
slice_obj.ref_photflam = self.ref_photflam
slice_obj.ref_photplam = self.ref_photplam
slice_obj.ref_filter = self.ref_filter
slice_obj.mdrizsky = self.mdrizsky
slice_obj.exptime = self.exptime
slice_obj.ABZP = self.ABZP
slice_obj.thumb_extension = self.thumb_extension
if self.data['REF'] is not None:
slice_obj.data['REF'] = self.data['REF'][sly, slx]*1
else:
slice_obj.data['REF'] = None
slice_obj.grow = self.grow
slice_obj.pad = self.pad
slice_obj.parent_file = self.parent_file
slice_obj.ref_file = self.ref_file
slice_obj.sci_extn = self.sci_extn
slice_obj.is_slice = True
# if hasattr(slice_obj.wcs, 'sip'):
# if slice_obj.wcs.sip is not None:
# for c in [0,1]:
# slice_obj.wcs.sip.crpix[c] = slice_obj.wcs.wcs.crpix[c]
#
# ACS_CRPIX = [4096/2,2048/2] # ACS
# dx_crpix = slice_obj.wcs.wcs.crpix[0] - ACS_CRPIX[0]
# dy_crpix = slice_obj.wcs.wcs.crpix[1] - ACS_CRPIX[1]
# for ext in ['cpdis1','cpdis2','det2im1','det2im2']:
# if hasattr(slice_obj.wcs, ext):
# wcs_ext = slice_obj.wcs.__getattribute__(ext)
# if wcs_ext is not None:
# wcs_ext.crval[0] += dx_crpix
# wcs_ext.crval[1] += dy_crpix
# slice_obj.wcs.__setattr__(ext, wcs_ext)
return slice_obj # , slx, sly
def get_HDUList(self, extver=1):
"""Convert attributes and data arrays to a `~astropy.io.fits.HDUList`
Parameters
----------
extver : int, float, str
value to use for the 'EXTVER' header keyword. For example, with
extver=1, the science extension can be addressed with the index
`HDU['SCI',1]`.
returns : `~astropy.io.fits.HDUList`
HDUList with header keywords copied from `self.header` along with
keywords for additional attributes. Will have `ImageHDU`
extensions 'SCI', 'ERR', and 'DQ', as well as 'REF' if a reference
file had been supplied.
"""
h = self.header.copy()
h['EXTVER'] = extver # self.filter #extver
h['FILTER'] = self.filter, 'element selected from filter wheel'
h['PUPIL'] = self.pupil, 'element selected from pupil wheel'
h['INSTRUME'] = (self.instrument,
'identifier for instrument used to acquire data')
h['PHOTFLAM'] = (self.photflam,
'inverse sensitivity, ergs/cm2/Ang/electron')
h['PHOTPLAM'] = self.photplam, 'Pivot wavelength (Angstroms)'
h['PARENT'] = self.parent_file, 'Parent filename'
h['SCI_EXTN'] = self.sci_extn, 'EXTNAME of the science data'
h['ISCUTOUT'] = self.is_slice, 'Arrays are sliced from larger image'
h['ORIGINX'] = self.origin[1], 'Origin from parent image, x'
h['ORIGINY'] = self.origin[0], 'Origin from parent image, y'
h['PAD'] = (self.pad, 'Image padding used')
hdu = []
exptime_corr = 1.
if 'BUNIT' in self.header:
if self.header['BUNIT'] == 'ELECTRONS':
exptime_corr = self.exptime
# Put back into original units
sci_data = self['SCI']*exptime_corr + self.mdrizsky
err_data = self['ERR']*exptime_corr
hdu.append(pyfits.ImageHDU(data=sci_data, header=h,
name='SCI'))
hdu.append(pyfits.ImageHDU(data=err_data, header=h,
name='ERR'))
hdu.append(pyfits.ImageHDU(data=self.data['DQ'], header=h, name='DQ'))
if self.data['REF'] is not None:
h['PHOTFLAM'] = self.ref_photflam
h['PHOTPLAM'] = self.ref_photplam
h['FILTER'] = self.ref_filter
h['REF_FILE'] = self.ref_file
hdu.append(pyfits.ImageHDU(data=self.data['REF'], header=h,
name='REF'))
hdul = pyfits.HDUList(hdu)
return hdul
def __getitem__(self, ext):
if self.data[ext] is None:
return None
if ext == 'REF':
return self.data['REF']/self.ref_photflam
elif ext == 'DQ':
return self.data['DQ']
else:
return self.data[ext]/self.photflam
def get_common_slices(self, other, verify_parent=True):
"""
Get slices of overlaps between two `ImageData` objects
"""
if verify_parent:
if self.parent_file != other.parent_file:
msg = ('Parent expodures don\'t match!\n' +
' self: {0}\n'.format(self.parent_file) +
' other: {0}\n'.format(other.parent_file))
raise IOError(msg)
ll = np.min([self.origin, other.origin], axis=0)
ur = np.max([self.origin+self.sh, other.origin+other.sh], axis=0)
# other in self
lls = np.minimum(other.origin - ll, self.sh)
urs = np.clip(other.origin + self.sh - self.origin, [0, 0], self.sh)
# self in other
llo = np.minimum(self.origin - ll, other.sh)
uro = np.clip(self.origin + other.sh - other.origin, [0, 0], other.sh)
self_slice = (slice(lls[0], urs[0]), slice(lls[1], urs[1]))
other_slice = (slice(llo[0], uro[0]), slice(llo[1], uro[1]))
return self_slice, other_slice
class GrismFLT(object):
"""Scripts for modeling of individual grism FLT images"""
def __init__(self, grism_file='', sci_extn=1, direct_file='',
pad=200, ref_file=None, ref_ext=0, seg_file=None,
shrink_segimage=True, force_grism='G141', verbose=True,
process_jwst_header=True):
"""Read FLT files and, optionally, reference/segmentation images.
Parameters
----------
grism_file : str
Grism image (optional).
Empty string or filename of a FITS file that must contain
extensions ('SCI', `sci_extn`), ('ERR', `sci_extn`), and
('DQ', `sci_extn`). For example, a WFC3/IR "FLT" FITS file.
sci_extn : int
EXTNAME of the file to consider. For WFC3/IR this can only be
1. For ACS and WFC3/UVIS, this can be 1 or 2 to specify the two
chips.
direct_file : str
Direct image (optional).
Empty string or filename of a FITS file that must contain
extensions ('SCI', `sci_extn`), ('ERR', `sci_extn`), and
('DQ', `sci_extn`). For example, a WFC3/IR "FLT" FITS file.
pad : int
Padding to add around the periphery of the images to allow
modeling of dispersed spectra for objects that could otherwise
fall off of the direct image itself. Modeling them requires an
external reference image (`ref_file`) that covers an area larger
than the individual direct image itself (e.g., a mosaic of a
survey field).
For WFC3/IR spectra, the first order spectra reach 248 and 195
pixels for G102 and G141, respectively, and `pad` could be set
accordingly if the reference image is large enough.
ref_file : str or `~astropy.io.fits.ImageHDU`/`~astropy.io.fits.PrimaryHDU`
Image mosaic to use as the reference image in place of the direct
image itself. For example, this could be the deeper image
drizzled from all direct images taken within a single visit or it
could be a much deeper/wider image taken separately in perhaps
even a different filter.
.. note::
Assumes that the WCS are aligned between `grism_file`,
`direct_file` and `ref_file`!
ref_ext : int
FITS extension to use if `ref_file` is a filename string.
seg_file : str or `~astropy.io.fits.ImageHDU`/`~astropy.io.fits.PrimaryHDU`
Segmentation image mosaic to associate pixels with discrete
objects. This would typically be generated from a rectified
image like `ref_file`, though here it is not required that
`ref_file` and `seg_file` have the same image dimensions but
rather just that the WCS are aligned between them.
shrink_segimage : bool
Try to make a smaller cutout of the reference images to speed
up blotting and array copying. This is most helpful for very
large input mosaics.
force_grism : str
Use this grism in "simulation mode" where only `direct_file` is
specified.
verbose : bool
Print status messages to the terminal.
Attributes
----------
grism, direct : `ImageData`
Grism and direct image data and parameters
conf : `~grizli.grismconf.aXeConf`
Grism configuration object.
seg : array-like
Segmentation image array.
model : array-like
Model of the grism exposure with the same dimensions as the
full detector array.
object_dispersers : dict
Container for storing information about what objects have been
added to the model of the grism exposure
catalog : `~astropy.table.Table`
Associated photometric catalog. Not required.
"""
import stwcs.wcsutil
# Read files
self.grism_file = grism_file
_GRISM_OPEN = False
if os.path.exists(grism_file):
grism_im = pyfits.open(grism_file)
_GRISM_OPEN = True
if grism_im[0].header['INSTRUME'] == 'ACS':
wcs = stwcs.wcsutil.HSTWCS(grism_im, ext=('SCI', sci_extn))
else:
wcs = None
self.grism = ImageData(hdulist=grism_im, sci_extn=sci_extn,
wcs=wcs,
process_jwst_header=process_jwst_header)
else:
if (grism_file is None) | (grism_file == ''):
self.grism = None
else:
print('\nFile not found: {0}!\n'.format(grism_file))
raise IOError
self.direct_file = direct_file
_DIRECT_OPEN = False
if os.path.exists(direct_file):
direct_im = pyfits.open(direct_file)
_DIRECT_OPEN = True
if direct_im[0].header['INSTRUME'] == 'ACS':
wcs = stwcs.wcsutil.HSTWCS(direct_im, ext=('SCI', sci_extn))
else:
wcs = None
self.direct = ImageData(hdulist=direct_im, sci_extn=sci_extn,
wcs=wcs,
process_jwst_header=process_jwst_header)
else:
if (direct_file is None) | (direct_file == ''):
self.direct = None
else:
print('\nFile not found: {0}!\n'.format(direct_file))
raise IOError
# ### Simulation mode, no grism exposure
if self.grism is not None:
self.pad = self.grism.pad
else:
self.pad = pad
if (self.grism is None) & (self.direct is not None):
self.grism = ImageData(hdulist=direct_im, sci_extn=sci_extn)
self.grism_file = self.direct_file
self.grism.filter = force_grism
# Grism exposure only, assumes will get reference from ref_file
if (self.direct is None) & (self.grism is not None):
self.direct = ImageData(hdulist=grism_im, sci_extn=sci_extn)
self.direct_file = self.grism_file
# Add padding
if self.direct is not None:
if pad > 0:
self.direct.add_padding(pad)
self.direct.unset_dq()
nbad = self.direct.flag_negative(sigma=-3)
self.direct.data['SCI'] *= (self.direct.data['DQ'] == 0)
if self.grism is not None:
if pad > 0:
self.grism.add_padding(pad)
self.pad = self.grism.pad
self.grism.unset_dq()
nbad = self.grism.flag_negative(sigma=-3)
self.grism.data['SCI'] *= (self.grism.data['DQ'] == 0)
# Load data from saved model files, if available
# if os.path.exists('%s_model.fits' %(self.grism_file)):
# pass
# Holder for the full grism model array
self.model = np.zeros_like(self.direct.data['SCI'])
# Grism configuration
if 'DFILTER' in self.grism.header:
direct_filter = self.grism.header['DFILTER']
elif self.grism.instrument in ['NIRCAM', 'NIRISS']:
direct_filter = self.grism.pupil
else:
direct_filter = self.direct.filter
conf_args = dict(instrume=self.grism.instrument,
filter=direct_filter,
grism=self.grism.filter,
module=self.grism.module,
chip=self.grism.ccdchip)
self.conf_file = grismconf.get_config_filename(**conf_args)
self.conf = grismconf.load_grism_config(self.conf_file)
self.object_dispersers = OrderedDict()
# Blot reference image
self.process_ref_file(ref_file, ref_ext=ref_ext,
shrink_segimage=shrink_segimage,
verbose=verbose)
# Blot segmentation image
self.process_seg_file(seg_file, shrink_segimage=shrink_segimage,
verbose=verbose)
# End things
self.get_dispersion_PA()
self.catalog = None
self.catalog_file = None
self.is_rotated = False
self.has_edge_mask = False
# Cleanup
if _GRISM_OPEN:
grism_im.close()
if _DIRECT_OPEN:
direct_im.close()
def process_ref_file(self, ref_file, ref_ext=0, shrink_segimage=True,
verbose=True):
"""Read and blot a reference image
Parameters
----------
ref_file : str or `~astropy.fits.io.ImageHDU` / `~astropy.fits.io.PrimaryHDU`
Filename or `astropy.io.fits` Image HDU of the reference image.
shrink_segimage : bool
Try to make a smaller cutout of the reference image to speed
up blotting and array copying. This is most helpful for very
large input mosaics.
verbose : bool
Print some status information to the terminal
Returns
-------
status : bool
False if `ref_file` is None. True if completes successfully.
The blotted reference image is stored in the array attribute
`self.direct.data['REF']`.
The `ref_filter` attribute is determined from the image header and the
`ref_photflam` scaling is taken either from the header if possible, or
the global `photflam` variable defined at the top of this file.
"""
if ref_file is None:
return False
if (isinstance(ref_file, pyfits.ImageHDU) |
isinstance(ref_file, pyfits.PrimaryHDU)):
self.ref_file = ref_file.fileinfo()['file'].name
ref_str = ''
ref_hdu = ref_file
_IS_OPEN = False
else:
self.ref_file = ref_file
ref_str = '{0}[0]'.format(self.ref_file)
_IS_OPEN = True
ref_im = pyfits.open(ref_file, load_lazy_hdus=False)
ref_hdu = ref_im[ref_ext]
refh = ref_hdu.header
if shrink_segimage:
ref_hdu = self.direct.shrink_large_hdu(ref_hdu, extra=self.pad,
verbose=True)
if verbose:
msg = '{0} / blot reference {1}'
print(msg.format(self.direct_file, ref_str))
blotted_ref = self.grism.blot_from_hdu(hdu=ref_hdu,
segmentation=False, interp='poly5')
header_values = {}
self.direct.ref_filter = utils.get_hst_filter(refh)
self.direct.ref_file = ref_str
key_list = {'PHOTFLAM': photflam_list, 'PHOTPLAM': photplam_list}
for key in ['PHOTFLAM', 'PHOTPLAM']:
if key in refh:
try:
header_values[key] = ref_hdu.header[key]*1.
except TypeError:
msg = 'Problem processing header keyword {0}: ** {1} **'
print(msg.format(key, ref_hdu.header[key]))
raise TypeError
else:
filt = self.direct.ref_filter
if filt in key_list[key]:
header_values[key] = key_list[key][filt]
else:
msg = 'Filter "{0}" not found in {1} tabulated list'
print(msg.format(filt, key))
raise IndexError
# Found keywords
self.direct.ref_photflam = header_values['PHOTFLAM']
self.direct.ref_photplam = header_values['PHOTPLAM']
# TBD: compute something like a cross-correlation offset
# between blotted reference and the direct image itself
self.direct.data['REF'] = np.cast[np.float32](blotted_ref)
# print self.direct.data['REF'].shape, self.direct.ref_photflam
self.direct.data['REF'] *= self.direct.ref_photflam
# Fill empty pixels in the reference image from the SCI image,
# but don't do it if direct['SCI'] is just a copy from the grism
if not self.direct.filter.startswith('G'):
empty = self.direct.data['REF'] == 0
self.direct.data['REF'][empty] += self.direct['SCI'][empty]
# self.direct.data['ERR'] *= 0.
# self.direct.data['DQ'] *= 0
self.direct.ABZP = (0*np.log10(self.direct.ref_photflam) - 21.10 -
5*np.log10(self.direct.ref_photplam) + 18.6921)
self.direct.thumb_extension = 'REF'
if _IS_OPEN:
ref_im.close()
# refh['FILTER'].upper()
return True
def process_seg_file(self, seg_file, shrink_segimage=True, verbose=True):
"""Read and blot a rectified segmentation image
Parameters
----------
seg_file : str or `~astropy.fits.io.ImageHDU` / `~astropy.fits.io.PrimaryHDU`
Filename or `astropy.io.fits` Image HDU of the segmentation image.
shrink_segimage : bool
Try to make a smaller cutout of the segmentation image to speed
up blotting and array copying. This is most helpful for very
large input mosaics.
verbose : bool
Print some status information to the terminal
Returns
-------
The blotted segmentation image is stored in the attribute `GrismFLT.seg`.
"""
if seg_file is not None:
if (isinstance(seg_file, pyfits.ImageHDU) |
isinstance(seg_file, pyfits.PrimaryHDU)):
self.seg_file = ''
seg_str = ''
seg_hdu = seg_file
segh = seg_hdu.header
_IS_OPEN = False
else:
self.seg_file = seg_file
seg_str = '{0}[0]'.format(self.seg_file)
seg_im = pyfits.open(seg_file)
seg_hdu = seg_im[0]
_IS_OPEN = True
if shrink_segimage:
seg_hdu = self.direct.shrink_large_hdu(seg_hdu,
extra=self.pad,
verbose=True)
# Make sure image big enough
seg_hdu = self.direct.expand_hdu(seg_hdu)
if verbose:
msg = '{0} / blot segmentation {1}'
print(msg.format(self.direct_file, seg_str))
blotted_seg = self.grism.blot_from_hdu(hdu=seg_hdu,
segmentation=True, grow=3,
interp='poly5')
self.seg = blotted_seg
if _IS_OPEN:
seg_im.close()
else:
self.seg = np.zeros(self.direct.sh, dtype=np.float32)
def get_dispersion_PA(self, decimals=0):
"""Compute exact PA of the dispersion axis, including tilt of the
trace and the FLT WCS
Parameters
----------
decimals : int or None
Number of decimal places to round to, passed to `~numpy.round`.
If None, then don't round.
Returns
-------
dispersion_PA : float
PA (angle East of North) of the dispersion axis.
"""
from astropy.coordinates import Angle
import astropy.units as u
# extra tilt of the 1st order grism spectra
x0 = self.conf.conf['BEAMA']
dy_trace, lam_trace = self.conf.get_beam_trace(x=507, y=507, dx=x0,
beam='A')
extra = np.arctan2(dy_trace[1]-dy_trace[0], x0[1]-x0[0])/np.pi*180
# Distorted WCS
crpix = self.direct.wcs.wcs.crpix
xref = [crpix[0], crpix[0]+1]
yref = [crpix[1], crpix[1]]
r, d = self.direct.wcs.all_pix2world(xref, yref, 1)
pa = Angle((extra +
np.arctan2(np.diff(r)*np.cos(d[0]/180*np.pi),
np.diff(d))[0]/np.pi*180)*u.deg)
dispersion_PA = pa.wrap_at(360*u.deg).value
if decimals is not None:
dispersion_PA = np.round(dispersion_PA, decimals=decimals)
self.dispersion_PA = dispersion_PA
return float(dispersion_PA)
def compute_model_orders(self, id=0, x=None, y=None, size=10, mag=-1,
spectrum_1d=None, is_cgs=False,
compute_size=False, max_size=None, store=True,
in_place=True, get_beams=None,
psf_params=None,
verbose=True):
"""Compute dispersed spectrum for a given object id
Parameters
----------
id : int
Object ID number to match in the segmentation image
x, y : float
Center of the cutout to extract
size : int
Radius of the cutout to extract. The cutout is equivalent to
>>> xc, yc = int(x), int(y)
>>> thumb = self.direct.data['SCI'][yc-size:yc+size, xc-size:xc+size]
mag : float
Specified object magnitude, which will be compared to the
"MMAG_EXTRACT_[BEAM]" parameters in `self.conf` to decide if the
object is bright enough to compute the higher spectral orders.
Default of -1 means compute all orders listed in `self.conf.beams`
spectrum_1d : None or [`~numpy.array`, `~numpy.array`]
Template 1D spectrum to convolve with the grism disperser. If
None, assumes trivial spectrum flat in f_lambda flux densities.
Otherwise, the template is taken to be
>>> wavelength, flux = spectrum_1d
is_cgs : bool
Flux units of `spectrum_1d[1]` are cgs f_lambda flux densities,
rather than normalized in the detection band.
compute_size : bool
Ignore `x`, `y`, and `size` and compute the extent of the
segmentation polygon directly using
`utils_c.disperse.compute_segmentation_limits`.
max_size : int or None
Enforce a maximum size of the cutout when using `compute_size`.
store : bool
If True, then store the computed beams in the OrderedDict
`self.object_dispersers[id]`.
If many objects are computed, this can be memory intensive. To
save memory, set to False and then the function just stores the
input template spectrum (`spectrum_1d`) and the beams will have
to be recomputed if necessary.
in_place : bool
If True, add the computed spectral orders into `self.model`.
Otherwise, make a clean array with only the orders of the given
object.
get_beams : list or None
Spectral orders to retrieve with names as defined in the
configuration files, e.g., ['A'] generally for the +1st order of
HST grisms. If `None`, then get all orders listed in the
`beams` attribute of the `~grizli.grismconf.aXeConf`
configuration object.
psf_params : list
Optional parameters for generating an `~grizli.utils.EffectivePSF`
object for the spatial morphology.
Returns
-------
output : bool or `numpy.array`
If `in_place` is True, return status of True if everything goes
OK. The computed spectral orders are stored in place in
`self.model`.
Returns False if the specified `id` is not found in the
segmentation array independent of `in_place`.
If `in_place` is False, return a full array including the model
for the single object.
"""
from .utils_c import disperse
# debug
# x=None; y=None; size=10; mag=-1; spectrum_1d=None; compute_size=True; store=False; in_place=False; add=True; get_beams=['A']; verbose=True
if id in self.object_dispersers:
object_in_model = True
beams = self.object_dispersers[id]
out = self.object_dispersers[id]
# Handle pre 0.3.0-7 formats
if len(out) == 3:
old_cgs, old_spectrum_1d, beams = out
else:
old_cgs, old_spectrum_1d = out
beams = None
else:
object_in_model = False
beams = None
if self.direct.data['REF'] is None:
ext = 'SCI'
else:
ext = 'REF'
# set up the beams to extract
if get_beams is None:
beam_names = self.conf.beams
else:
beam_names = get_beams
# Did we initialize the PSF model this call?
INIT_PSF_NOW = False
# Do we need to compute the dispersed beams?
if beams is None:
# Use catalog
xcat = ycat = None
if self.catalog is not None:
ix = self.catalog['id'] == id
if ix.sum() == 0:
if verbose:
print('ID {0:d} not found in segmentation image'.format(id))
return False
if hasattr(self.catalog['x_flt'][ix][0], 'unit'):
xcat = self.catalog['x_flt'][ix][0].value - 1
ycat = self.catalog['y_flt'][ix][0].value - 1
else:
xcat = self.catalog['x_flt'][ix][0] - 1
ycat = self.catalog['y_flt'][ix][0] - 1
# print '!!! X, Y: ', xcat, ycat, self.direct.origin, size
# use x, y if defined
if x is not None:
xcat = x
if y is not None:
ycat = y
if (compute_size) | (x is None) | (y is None) | (size is None):
# Get the array indices of the segmentation region
out = disperse.compute_segmentation_limits(self.seg, id,
self.direct.data[ext],
self.direct.sh)
ymin, ymax, y, xmin, xmax, x, area, segm_flux = out
if (area == 0) | ~np.isfinite(x) | ~np.isfinite(y):
if verbose:
print('ID {0:d} not found in segmentation image'.format(id))
return False
# Object won't disperse spectrum onto the grism image
if ((ymax < self.pad-5) |
(ymin > self.direct.sh[0]-self.pad+5) |
(ymin == 0) |
(ymax == self.direct.sh[0]) |
(xmin == 0) |
(xmax == self.direct.sh[1])):
return True
if compute_size:
try:
size = int(np.ceil(np.max([x-xmin, xmax-x,
y-ymin, ymax-y])))
except ValueError:
return False
size += 4
# Enforce minimum size
# size = np.maximum(size, 16)
size = np.maximum(size, 26)
# To do: enforce a larger minimum cutout size for grisms
# that need it, e.g., UVIS/G280L
# maximum size
if max_size is not None:
size = np.min([size, max_size])
# Avoid problems at the array edges
size = np.min([size, int(x)-2, int(y)-2])
if (size < 4):
return True
# Thumbnails
# print '!! X, Y: ', x, y, self.direct.origin, size
if xcat is not None:
xc, yc = int(np.round(xcat))+1, int(np.round(ycat))+1
xcenter = -(xcat-(xc-1))
ycenter = -(ycat-(yc-1))
else:
xc, yc = int(np.round(x))+1, int(np.round(y))+1
xcenter = -(x-(xc-1))
ycenter = -(y-(yc-1))
origin = [yc-size + self.direct.origin[0],
xc-size + self.direct.origin[1]]
thumb = self.direct.data[ext][yc-size:yc+size, xc-size:xc+size]
seg_thumb = self.seg[yc-size:yc+size, xc-size:xc+size]
# Test that the id is actually in the thumbnail
test = disperse.compute_segmentation_limits(seg_thumb, id, thumb,
np.array(thumb.shape))
if test[-2] == 0:
if verbose:
print(f'ID {id} not found in segmentation image')
return False
# # Get precomputed dispersers
# beams, old_spectrum_1d, old_cgs = None, None, False
# if object_in_model:
# out = self.object_dispersers[id]
#
# # Handle pre 0.3.0-7 formats
# if len(out) == 3:
# old_cgs, old_spectrum_1d, old_beams = out
# else:
# old_cgs, old_spectrum_1d = out
# old_beams = None
#
# # Pull out just the requested beams
# if old_beams is not None:
# beams = OrderedDict()
# for b in beam_names:
# beams[b] = old_beams[b]
#
# if beams is None:
# Compute spectral orders ("beams")
beams = OrderedDict()
for b in beam_names:
# Only compute order if bright enough
if mag > self.conf.conf['MMAG_EXTRACT_{0}'.format(b)]:
continue
try:
beam = GrismDisperser(id=id,
direct=thumb,
segmentation=seg_thumb,
xcenter=xcenter,
ycenter=ycenter,
origin=origin,
pad=self.pad,
grow=self.grism.grow,
beam=b,
conf=self.conf,
fwcpos=self.grism.fwcpos,
MW_EBV=self.grism.MW_EBV)
except:
continue
# Set PSF model if necessary
if psf_params is not None:
store = True
INIT_PSF_NOW = True
#print('xxx Init PSF', b)
if self.direct.ref_filter is None:
psf_filter = self.direct.filter
else:
psf_filter = self.direct.ref_filter
beam.x_init_epsf(flat_sensitivity=False,
psf_params=psf_params,
psf_filter=psf_filter, yoff=0.)
beams[b] = beam
# Compute old model
if object_in_model:
for b in beams:
beam = beams[b]
if hasattr(beam, 'psf') & (not INIT_PSF_NOW):
store = True
#print('xxx OLD PSF')
beam.compute_model_psf(spectrum_1d=old_spectrum_1d,
is_cgs=old_cgs)
else:
beam.compute_model(spectrum_1d=old_spectrum_1d,
is_cgs=old_cgs)
if get_beams:
out_beams = OrderedDict()
for b in beam_names:
out_beams[b] = beams[b]
return out_beams
if in_place:
# Update the internal model attribute
output = self.model
if store:
# Save the computed beams
self.object_dispersers[id] = is_cgs, spectrum_1d, beams
else:
# Just save the model spectrum (or empty spectrum)
self.object_dispersers[id] = is_cgs, spectrum_1d, None
else:
# Create a fresh array
output = np.zeros_like(self.model)
# if in_place:
# ### Update the internal model attribute
# output = self.model
# else:
# ### Create a fresh array
# output = np.zeros_like(self.model)
# Set PSF model if necessary
if psf_params is not None:
if self.direct.ref_filter is None:
psf_filter = self.direct.filter
else:
psf_filter = self.direct.ref_filter
# Loop through orders and add to the full model array, in-place or
# a separate image
for b in beams:
beam = beams[b]
# Subtract previously-added model
if object_in_model & in_place:
beam.add_to_full_image(-beam.model, output)
# Update PSF params
# if psf_params is not None:
# skip_init_psf = False
# if hasattr(beam, 'psf_params'):
# skip_init_psf |= np.product(np.isclose(beam.psf_params, psf_params)) > 0
#
# if not skip_init_psf:
# beam.x_init_epsf(flat_sensitivity=False, psf_params=psf_params, psf_filter=psf_filter, yoff=0.06)
# Compute model
if hasattr(beam, 'psf'):
beam.compute_model_psf(spectrum_1d=spectrum_1d, is_cgs=is_cgs)
else:
beam.compute_model(spectrum_1d=spectrum_1d, is_cgs=is_cgs)
# Add in new model
beam.add_to_full_image(beam.model, output)
if in_place:
return True
else:
return beams, output
def compute_full_model(self, ids=None, mags=None, mag_limit=22, store=True, verbose=False, size=10, compute_size=True):
"""Compute flat-spectrum model for multiple objects.
Parameters
----------
ids : None, list, or `~numpy.array`
id numbers to compute in the model. If None then take all ids
from unique values in `self.seg`.
mags : None, float, or list / `~numpy.array`
magnitudes corresponding to list if `ids`. If None, then compute
magnitudes based on the flux in segmentation regions and
zeropoints determined from PHOTFLAM and PHOTPLAM.
size, compute_size : int, bool
Sizes of individual cutouts, see
`~grizli.model.GrismFLT.compute_model_orders`.
Returns
-------
Updated model stored in `self.model` attribute.
"""
try:
from tqdm import tqdm
has_tqdm = True
except:
has_tqdm = False
print('(`pip install tqdm` for a better verbose iterator)')
from .utils_c import disperse
if ids is None:
ids = np.unique(self.seg)[1:]
# If `mags` array not specified, compute magnitudes within
# segmentation regions.
if mags is None:
if verbose:
print('Compute IDs/mags')
mags = np.zeros(len(ids))
for i, id in enumerate(ids):
out = disperse.compute_segmentation_limits(self.seg, id,
self.direct.data[self.direct.thumb_extension],
self.direct.sh)
ymin, ymax, y, xmin, xmax, x, area, segm_flux = out
mags[i] = self.direct.ABZP - 2.5*np.log10(segm_flux)
ix = mags < mag_limit
ids = ids[ix]
mags = mags[ix]
else:
if np.isscalar(mags):
mags = [mags for i in range(len(ids))]
else:
if len(ids) != len(mags):
raise ValueError('`ids` and `mags` lists different sizes')
# Now compute the full model
if verbose & has_tqdm:
iterator = tqdm(zip(ids, mags))
else:
iterator = zip(ids, mags)
for id_i, mag_i in iterator:
self.compute_model_orders(id=id_i, compute_size=compute_size,
mag=mag_i, size=size,
in_place=True, store=store)
def smooth_mask(self, gaussian_width=4, threshold=2.5):
"""Compute a mask where smoothed residuals greater than some value
Perhaps useful for flagging contaminated pixels that aren't in the
model, such as high orders dispersed from objects that fall off of the
direct image, but this hasn't yet been extensively tested.
Parameters
----------
gaussian_width : float
Width of the Gaussian filter used with
`~scipy.ndimage.gaussian_filter`.
threshold : float
Threshold, in sigma, above which to flag residuals.
Returns
-------
Nothing, but pixels are masked in `self.grism.data['SCI']`.
"""
import scipy.ndimage as nd
mask = self.grism['SCI'] != 0
resid = (self.grism['SCI'] - self.model)*mask
sm = nd.gaussian_filter(np.abs(resid), gaussian_width)
resid_mask = (np.abs(sm) > threshold*self.grism['ERR'])
self.grism.data['SCI'][resid_mask] = 0
def blot_catalog(self, input_catalog, columns=['id', 'ra', 'dec'],
sextractor=False, ds9=None):
"""Compute detector-frame coordinates of sky positions in a catalog.
Parameters
----------
input_catalog : `~astropy.table.Table`
Full catalog with sky coordinates. Can be SExtractor or other.
columns : [str,str,str]
List of columns that specify the object id, R.A. and Decl. For
catalogs created with SExtractor this might be
['NUMBER', 'X_WORLD', 'Y_WORLD'].
Detector coordinates will be computed with
`self.direct.wcs.all_world2pix` with `origin=1`.
ds9 : `~grizli.ds9.DS9`, optional
If provided, load circular regions at the derived detector
coordinates.
Returns
-------
catalog : `~astropy.table.Table`
New catalog with columns 'x_flt' and 'y_flt' of the detector
coordinates. Also will copy the `columns` names to columns with
names 'id','ra', and 'dec' if necessary, e.g., for SExtractor
catalogs.
"""
from astropy.table import Column
if sextractor:
columns = ['NUMBER', 'X_WORLD', 'Y_WORLD']
# Detector coordinates. N.B.: 1 indexed!
xy = self.direct.wcs.all_world2pix(input_catalog[columns[1]],
input_catalog[columns[2]], 1,
tolerance=-4,
quiet=True)
# Objects with positions within the image
sh = self.direct.sh
keep = ((xy[0] > 0) & (xy[0] < sh[1]) &
(xy[1] > (self.pad-5)) & (xy[1] < (sh[0]-self.pad+5)))
catalog = input_catalog[keep]
# Remove columns if they exist
for col in ['x_flt', 'y_flt']:
if col in catalog.colnames:
catalog.remove_column(col)
# Columns with detector coordinates
catalog.add_column(Column(name='x_flt', data=xy[0][keep]))
catalog.add_column(Column(name='y_flt', data=xy[1][keep]))
# Copy standardized column names if necessary
if ('id' not in catalog.colnames):
catalog.add_column(Column(name='id', data=catalog[columns[0]]))
if ('ra' not in catalog.colnames):
catalog.add_column(Column(name='ra', data=catalog[columns[1]]))
if ('dec' not in catalog.colnames):
catalog.add_column(Column(name='dec', data=catalog[columns[2]]))
# Show positions in ds9
if ds9:
for i in range(len(catalog)):
x_flt, y_flt = catalog['x_flt'][i], catalog['y_flt'][i]
reg = 'circle {0:f} {1:f} 5\n'.format(x_flt, y_flt)
ds9.set('regions', reg)
return catalog
def photutils_detection(self, use_seg=False, data_ext='SCI',
detect_thresh=2., grow_seg=5, gauss_fwhm=2.,
verbose=True, save_detection=False, ZP=None):
"""Use photutils to detect objects and make segmentation map
Parameters
----------
detect_thresh : float
Detection threshold, in sigma
grow_seg : int
Number of pixels to grow around the perimeter of detected objects
witha maximum filter
gauss_fwhm : float
FWHM of Gaussian convolution kernel that smoothes the detection
image.
verbose : bool
Print logging information to the terminal
save_detection : bool
Save the detection images and catalogs
ZP : float or None
AB magnitude zeropoint of the science array. If `None` then, try
to compute based on PHOTFLAM and PHOTPLAM values and use zero if
that fails.
Returns
---------
status : bool
True if completed successfully. False if `data_ext=='REF'` but
no reference image found.
Stores an astropy.table.Table object to `self.catalog` and a
segmentation array to `self.seg`.
"""
if ZP is None:
if ((self.direct.filter in photflam_list.keys()) &
(self.direct.filter in photplam_list.keys())):
# ABMAG_ZEROPOINT from
# http://www.stsci.edu/hst/wfc3/phot_zp_lbn
ZP = (-2.5*np.log10(photflam_list[self.direct.filter]) -
21.10 - 5*np.log10(photplam_list[self.direct.filter]) +
18.6921)
else:
ZP = 0.
if use_seg:
seg = self.seg
else:
seg = None
if self.direct.data['ERR'].max() != 0.:
err = self.direct.data['ERR']/self.direct.photflam
else:
err = None
if (data_ext == 'REF'):
if (self.direct.data['REF'] is not None):
err = None
else:
print('No reference data found for `self.direct.data[\'REF\']`')
return False
go_detect = utils.detect_with_photutils
cat, seg = go_detect(self.direct.data[data_ext]/self.direct.photflam,
err=err, dq=self.direct.data['DQ'], seg=seg,
detect_thresh=detect_thresh, npixels=8,
grow_seg=grow_seg, gauss_fwhm=gauss_fwhm,
gsize=3, wcs=self.direct.wcs,
save_detection=save_detection,
root=self.direct_file.split('.fits')[0],
background=None, gain=None, AB_zeropoint=ZP,
overwrite=True, verbose=verbose)
self.catalog = cat
self.catalog_file = '<photutils>'
self.seg = seg
return True
def load_photutils_detection(self, seg_file=None, seg_cat=None,
catalog_format='ascii.commented_header'):
"""
Load segmentation image and catalog, either from photutils
or SExtractor.
If SExtractor, use `catalog_format='ascii.sextractor'`.
"""
root = self.direct_file.split('.fits')[0]
if seg_file is None:
seg_file = root + '.detect_seg.fits'
if not os.path.exists(seg_file):
print('Segmentation image {0} not found'.format(seg_file))
return False
with pyfits.open(seg_file) as seg_im:
self.seg = seg_im[0].data.astype(np.float32)
if seg_cat is None:
seg_cat = root + '.detect.cat'
if not os.path.exists(seg_cat):
print('Segmentation catalog {0} not found'.format(seg_cat))
return False
self.catalog = Table.read(seg_cat, format=catalog_format)
self.catalog_file = seg_cat
def save_model(self, overwrite=True, verbose=True):
"""Save model properties to FITS file
"""
try:
import cPickle as pickle
except:
# Python 3
import pickle
root = self.grism_file.split('_flt.fits')[0].split('_rate.fits')[0]
root = root.split('_elec.fits')[0]
h = pyfits.Header()
h['GFILE'] = (self.grism_file, 'Grism exposure name')
h['GFILTER'] = (self.grism.filter, 'Grism spectral element')
h['INSTRUME'] = (self.grism.instrument, 'Instrument of grism file')
h['PAD'] = (self.pad, 'Image padding used')
h['DFILE'] = (self.direct_file, 'Direct exposure name')
h['DFILTER'] = (self.direct.filter, 'Grism spectral element')
h['REF_FILE'] = (self.ref_file, 'Reference image')
h['SEG_FILE'] = (self.seg_file, 'Segmentation image')
h['CONFFILE'] = (self.conf_file, 'Configuration file')
h['DISP_PA'] = (self.dispersion_PA, 'Dispersion position angle')
h0 = pyfits.PrimaryHDU(header=h)
model = pyfits.ImageHDU(data=self.model, header=self.grism.header,
name='MODEL')
seg = pyfits.ImageHDU(data=self.seg, header=self.grism.header,
name='SEG')
hdu = pyfits.HDUList([h0, model, seg])
if 'REF' in self.direct.data:
ref_header = self.grism.header.copy()
ref_header['FILTER'] = self.direct.ref_filter
ref_header['PARENT'] = self.ref_file
ref_header['PHOTFLAM'] = self.direct.ref_photflam
ref_header['PHOTPLAM'] = self.direct.ref_photplam
ref = pyfits.ImageHDU(data=self.direct['REF'],
header=ref_header, name='REFERENCE')
hdu.append(ref)
hdu.writeto('{0}_model.fits'.format(root), overwrite=overwrite,
output_verify='fix')
fp = open('{0}_model.pkl'.format(root), 'wb')
pickle.dump(self.object_dispersers, fp)
fp.close()
if verbose:
print('Saved {0}_model.fits and {0}_model.pkl'.format(root))
def save_full_pickle(self, verbose=True):
"""Save entire `GrismFLT` object to a pickle
"""
try:
import cPickle as pickle
except:
# Python 3
import pickle
root = self.grism_file.split('_flt.fits')[0].split('_cmb.fits')[0]
root = root.split('_flc.fits')[0].split('_rate.fits')[0]
root = root.split('_elec.fits')[0]
if root == self.grism_file:
# unexpected extension, so just insert before '.fits'
root = self.grism_file.split('.fits')[0]
hdu = pyfits.HDUList([pyfits.PrimaryHDU()])
for key in self.direct.data.keys():
hdu.append(pyfits.ImageHDU(data=self.direct.data[key],
header=self.direct.header,
name='D'+key))
for key in self.grism.data.keys():
hdu.append(pyfits.ImageHDU(data=self.grism.data[key],
header=self.grism.header,
name='G'+key))
hdu.append(pyfits.ImageHDU(data=self.seg,
header=self.grism.header,
name='SEG'))
hdu.append(pyfits.ImageHDU(data=self.model,
header=self.grism.header,
name='MODEL'))
hdu.writeto('{0}.{1:02d}.GrismFLT.fits'.format(root, self.grism.sci_extn), overwrite=True, output_verify='fix')
# zero out large data objects
self.direct.data = self.grism.data = self.seg = self.model = None
fp = open('{0}.{1:02d}.GrismFLT.pkl'.format(root, self.grism.sci_extn), 'wb')
pickle.dump(self, fp)
fp.close()
self.save_wcs(overwrite=True, verbose=False)
def save_wcs(self, overwrite=True, verbose=True):
"""TBD
"""
if self.direct.parent_file == self.grism.parent_file:
base_list = [self.grism]
else:
base_list = [self.direct, self.grism]
for base in base_list:
hwcs = base.wcs.to_fits(relax=True)
hwcs[0].header['PAD'] = base.pad
if 'CCDCHIP' in base.header:
ext = {1: 2, 2: 1}[base.header['CCDCHIP']]
else:
ext = base.header['EXTVER']
wcsfile = base.parent_file.replace('.fits', '.{0:02d}.wcs.fits'.format(ext))
try:
hwcs.writeto(wcsfile, overwrite=overwrite)
except:
hwcs.writeto(wcsfile, clobber=overwrite)
if verbose:
print(wcsfile)
def load_from_fits(self, save_file):
"""Load saved data from a FITS file
Parameters
----------
save_file : str
Filename of the saved output
Returns
-------
True if completed successfully
"""
fits = pyfits.open(save_file)
self.seg = fits['SEG'].data*1
self.model = fits['MODEL'].data*1
self.direct.data = OrderedDict()
self.grism.data = OrderedDict()
for ext in range(1, len(fits)):
key = fits[ext].header['EXTNAME'][1:]
if fits[ext].header['EXTNAME'].startswith('D'):
if fits[ext].data is None:
self.direct.data[key] = None
else:
self.direct.data[key] = fits[ext].data*1
elif fits[ext].header['EXTNAME'].startswith('G'):
if fits[ext].data is None:
self.grism.data[key] = None
else:
self.grism.data[key] = fits[ext].data*1
else:
pass
fits.close()
del(fits)
return True
def transform_NIRISS(self, verbose=True):
"""
Rotate data & wcs so that spectra are increasing to +x
"""
if self.grism.instrument not in ['NIRCAM', 'NIRISS']:
return True
if self.grism.instrument == 'NIRISS':
if self.grism.filter == 'GR150C':
rot = 2
else:
rot = -1
elif self.grism.instrument in ['NIRCAM', 'NIRCAMA']:
# Module A
if self.grism.pupil == 'GRISMC':
rot = 1
else:
# Do nothing, A+GRISMR disperses to +x
return True
elif self.grism.instrument == 'NIRCAMB':
if self.grism.pupil == 'GRISMC':
rot = 1
else:
rot = 2
if self.is_rotated:
rot *= -1
self.is_rotated = not self.is_rotated
if verbose:
print('Transform NIRISS: flip={0}'.format(self.is_rotated))
# Compute new CRPIX coordinates
center = np.array(self.grism.sh)/2.+0.5
crpix = self.grism.wcs.wcs.crpix
rad = np.deg2rad(-90*rot)
mat = np.zeros((2, 2))
mat[0, :] = np.array([np.cos(rad), -np.sin(rad)])
mat[1, :] = np.array([np.sin(rad), np.cos(rad)])
crpix_new = np.dot(mat, crpix-center)+center
for obj in [self.grism, self.direct]:
obj.header['CRPIX1'] = crpix_new[0]
obj.header['CRPIX2'] = crpix_new[1]
# Get rotated CD
out_wcs = utils.transform_wcs(obj.wcs, translation=[0., 0.], rotation=rad, scale=1.)
new_cd = out_wcs.wcs.cd
for i in range(2):
for j in range(2):
obj.header['CD{0}_{1}'.format(i+1, j+1)] = new_cd[i, j]
# Update wcs
obj.get_wcs()
if obj.wcs.wcs.has_pc():
obj.get_wcs()
# Rotate data
for k in obj.data.keys():
if obj.data[k] is not None:
obj.data[k] = np.rot90(obj.data[k], rot)
# Rotate segmentation image
self.seg = np.rot90(self.seg, rot)
self.model = np.rot90(self.model, rot)
#print('xx Rotate images {0}'.format(rot))
if self.catalog is not None:
#print('xx Rotate catalog {0}'.format(rot))
self.catalog = self.blot_catalog(self.catalog,
sextractor=('X_WORLD' in self.catalog.colnames))
def apply_POM(self, warn_if_too_small=True, verbose=True):
"""
Apply pickoff mask to segmentation map to control sources that are dispersed onto the detector
"""
if not self.grism.instrument.startswith('NIRCAM'):
print('POM only defined for NIRCam')
return True
pom_file = os.path.join(GRIZLI_PATH,
f'CONF/GRISM_NIRCAM/V2/NIRCAM_LW_POM_Mod{self.grism.module}.fits')
if not os.path.exists(pom_file):
print(f'Couldn\'t find POM reference file {pom_file}')
return False
if verbose:
print(f'NIRCam: apply POM geometry from {pom_file}')
pom = pyfits.open(pom_file)[-1]
pomh = pom.header
if (self.pad < 790) & warn_if_too_small:
print('Warning: `pad` should be > 790 for NIRCam to catch '
'all out-of-field sources within the POM coverage.')
# Slice geometry
a_origin = np.array([-self.pad, -self.pad])
a_shape = np.array(self.grism.sh)
b_origin = np.array([-pomh['NOMYSTRT'], -pomh['NOMXSTRT']])
b_shape = np.array(pom.data.shape)
self_sl, pom_sl = utils.get_common_slices(a_origin, a_shape,
b_origin, b_shape)
pom_data = self.seg*0
pom_data[self_sl] += pom.data[pom_sl]
self.pom_data = pom_data
self.seg *= (pom_data > 0)
return True
def mask_mosaic_edges(self, sky_poly=None, verbose=True, force=False, err_scale=10, dq_mask=False, dq_value=1024, resid_sn=7):
"""
Mask edges of exposures that might not have modeled spectra
"""
import pyregion
import scipy.ndimage as nd
if (self.has_edge_mask) & (force is False):
return True
if sky_poly is None:
return True
xy_image = self.grism.wcs.all_world2pix(np.array(sky_poly.boundary.xy).T, 0)
# Calculate edge for mask
#xedge = 100
x0 = 0
y0 = (self.grism.sh[0]-2*self.pad)/2
dx = np.arange(500)
tr_y, tr_lam = self.conf.get_beam_trace(x0, y0, dx=dx, beam='A')
tr_sens = np.interp(tr_lam, self.conf.sens['A']['WAVELENGTH'],
self.conf.sens['A']['SENSITIVITY'],
left=0, right=0)
xedge = dx[tr_sens > tr_sens.max()*0.05].max()
xy_image[:, 0] += xedge
xy_str = 'image;polygon('+','.join(['{0:.1f}'.format(p) for p in xy_image.flatten()])+')'
reg = pyregion.parse(xy_str)
mask = reg.get_mask(shape=tuple(self.grism.sh))*1 == 0
# Only mask large residuals
if resid_sn > 0:
resid_mask = (self.grism['SCI'] - self.model) > resid_sn*self.grism['ERR']
resid_mask = nd.binary_dilation(resid_mask, iterations=3)
mask &= resid_mask
if dq_mask:
self.grism.data['DQ'] |= dq_value*mask
if verbose:
print('# mask mosaic edges: {0} ({1}, {2} pix) DQ={3:.0f}'.format(self.grism.parent_file, self.grism.filter, xedge, dq_value))
else:
self.grism.data['ERR'][mask] *= err_scale
if verbose:
print('# mask mosaic edges: {0} ({1}, {2} pix) err_scale={3:.1f}'.format(self.grism.parent_file, self.grism.filter, xedge, err_scale))
self.has_edge_mask = True
def old_make_edge_mask(self, scale=3, force=False):
"""Make a mask for the edge of the grism FoV that isn't covered by the direct image
Parameters
----------
scale : float
Scale factor to multiply to the mask before it's applied to the
`self.grism.data['ERR']` array.
force : bool
Force apply the mask even if `self.has_edge_mask` is set
indicating that the function has already been run.
Returns
-------
Nothing, updates `self.grism.data['ERR']` in place.
Sets `self.has_edge_mask = True`.
"""
import scipy.ndimage as nd
if (self.has_edge_mask) & (force is False):
return True
kern = (np.arange(self.conf.conf['BEAMA'][1]) > self.conf.conf['BEAMA'][0])*1.
kern /= kern.sum()
if self.direct['REF'] is not None:
mask = self.direct['REF'] == 0
else:
mask = self.direct['SCI'] == 0
full_mask = nd.convolve(mask*1., kern.reshape((1, -1)),
origin=(0, -kern.size//2+20))
self.grism.data['ERR'] *= np.exp(full_mask*scale)
self.has_edge_mask = True
class BeamCutout(object):
def __init__(self, flt=None, beam=None, conf=None,
get_slice_header=True, fits_file=None, scale=1.,
contam_sn_mask=[10, 3], min_mask=0.01, min_sens=0.08,
mask_resid=True):
"""Cutout spectral object from the full frame.
Parameters
----------
flt : `GrismFLT`
Parent FLT frame.
beam : `GrismDisperser`
Object and spectral order to consider
conf : `.grismconf.aXeConf`
Pre-computed configuration file. If not specified will regenerate
based on header parameters, which might be necessary for
multiprocessing parallelization and pickling.
get_slice_header : bool
TBD
fits_file : None or str
Optional FITS file containing the beam information, rather than
reading directly from a `GrismFLT` object with the `flt` and
`beam` paremters. Load with `load_fits`.
contam_sn_mask : TBD
min_mask : float
Minimum factor relative to the maximum pixel value of the flat
f-lambda model where the 2D cutout data are considered good.
min_sens : float
Minimum sensitivity relative to the maximum for a given grism
above which pixels are included in the fit.
Attributes
----------
grism, direct : `ImageData` (sliced)
Cutouts of the grism and direct images.
beam : `GrismDisperser`
High-level tools for computing dispersed models of the object
mask : array-like (bool)
Basic mask where `grism` DQ > 0 | ERR == 0 | SCI == 0.
fit_mask, DoF : array-like, int
Additional mask, DoF is `fit_mask.sum()` representing the
effective degrees of freedom for chi-squared.
ivar : array-like
Inverse variance array, taken from `grism` 1/ERR^2
model, modelf : array-like
2D and flattened versions of the object model array
contam : array-like
Contamination model
scif : array_like
Flattened version of `grism['SCI'] - contam`.
flat_flam : array-like
Flattened version of the flat-flambda object model
poly_order : int
Order of the polynomial model
"""
self.background = 0.
self.module = None
if fits_file is not None:
self.load_fits(fits_file, conf)
else:
self.init_from_input(flt, beam, conf, get_slice_header)
self.beam.scale = scale
self._parse_params = {'contam_sn_mask':contam_sn_mask,
'min_mask':min_mask,
'min_sens':min_sens,
'mask_resid':mask_resid}
# self.contam_sn_mask = contam_sn_mask
# self.min_mask = min_mask
# self.min_sens = min_sens
# self.mask_resid = mask_resid
self._parse_from_data(**self._parse_params)
def _parse_from_data(self, contam_sn_mask=[10, 3], min_mask=0.01,
seg_ids=None, min_sens=0.08, mask_resid=True):
"""
See parameter description for `~grizli.model.BeamCutout`.
"""
# bad pixels or problems with uncertainties
self.mask = ((self.grism.data['DQ'] > 0) |
(self.grism.data['ERR'] == 0) |
(self.grism.data['SCI'] == 0))
self.var = self.grism.data['ERR']**2
self.var[self.mask] = 1.e30
self.ivar = 1/self.var
self.ivar[self.mask] = 0
self.thumbs = {}
#self.compute_model = self.beam.compute_model
#self.model = self.beam.model
self.modelf = self.beam.modelf # .flatten()
self.model = self.beam.modelf.reshape(self.beam.sh_beam)
# Attributes
self.size = self.modelf.size
self.wave = self.beam.lam
self.sh = self.beam.sh_beam
# Initialize for fits
if seg_ids is None:
self.flat_flam = self.compute_model(in_place=False, is_cgs=True)
else:
for i, sid in enumerate(seg_ids):
flat_i = self.compute_model(id=sid, in_place=False,
is_cgs=True)
if i == 0:
self.flat_flam = flat_i
else:
self.flat_flam += flat_i
# OK data where the 2D model has non-zero flux
self.fit_mask = (~self.mask.flatten()) & (self.ivar.flatten() != 0)
self.fit_mask &= (self.flat_flam > min_mask*self.flat_flam.max())
#self.fit_mask &= (self.flat_flam > 3*self.contam.flatten())
# Apply minimum sensitivity mask
self.sens_mask = 1.
if min_sens > 0:
flux_min_sens = (self.beam.sensitivity <
min_sens*self.beam.sensitivity.max())*1.
if flux_min_sens.sum() > 0:
test_spec = [self.beam.lam, flux_min_sens]
if seg_ids is None:
flat_sens = self.compute_model(in_place=False,
is_cgs=True,
spectrum_1d=test_spec)
else:
for i, sid in enumerate(seg_ids):
f_i = self.compute_model(id=sid, in_place=False,
is_cgs=True, spectrum_1d=test_spec)
if i == 0:
flat_sens = f_i
else:
flat_sens += f_i
# self.sens_mask = flat_sens == 0
# Make mask along columns
is_masked = (flat_sens.reshape(self.sh) > 0).sum(axis=0)
self.sens_mask = (np.dot(np.ones((self.sh[0], 1)), is_masked[None, :]) == 0).flatten()
self.fit_mask &= self.sens_mask
# Flat versions of sci/ivar arrays
self.scif = (self.grism.data['SCI'] - self.contam).flatten()
self.ivarf = self.ivar.flatten()
self.wavef = np.dot(np.ones((self.sh[0], 1)), self.wave[None, :]).flatten()
# Mask large residuals where throughput is low
if mask_resid:
resid = np.abs(self.scif - self.flat_flam)*np.sqrt(self.ivarf)
bad_resid = (self.flat_flam < 0.05*self.flat_flam.max())
bad_resid &= (resid > 5)
self.bad_resid = bad_resid
self.fit_mask *= ~bad_resid
else:
self.bad_resid = np.zeros_like(self.fit_mask)
# Mask very contaminated
contam_mask = ((self.contam*np.sqrt(self.ivar) > contam_sn_mask[0]) &
(self.model*np.sqrt(self.ivar) < contam_sn_mask[1]))
#self.fit_mask *= ~contam_mask.flatten()
self.contam_mask = ~nd.maximum_filter(contam_mask, size=5).flatten()
self.poly_order = None
# self.init_poly_coeffs(poly_order=1)
def init_from_input(self, flt, beam, conf=None, get_slice_header=True):
"""Initialize from data objects
Parameters
----------
flt : `GrismFLT`
Parent FLT frame.
beam : `GrismDisperser`
Object and spectral order to consider
conf : `.grismconf.aXeConf`
Pre-computed configuration file. If not specified will regenerate
based on header parameters, which might be necessary for
multiprocessing parallelization and pickling.
get_slice_header : bool
Get full header of the sliced data. Costs some overhead so can
be skipped if full header information isn't required.
Returns
-------
Loads attributes to `self`.
"""
self.id = beam.id
if conf is None:
conf = grismconf.load_grism_config(flt.conf_file)
self.beam = GrismDisperser(id=beam.id, direct=beam.direct*1,
segmentation=beam.seg*1, origin=beam.origin,
pad=beam.pad, grow=beam.grow,
beam=beam.beam, conf=conf, xcenter=beam.xcenter,
ycenter=beam.ycenter, fwcpos=flt.grism.fwcpos,
MW_EBV=flt.grism.MW_EBV)
if hasattr(beam, 'psf_params'):
self.beam.x_init_epsf(psf_params=beam.psf_params, psf_filter=beam.psf_filter, yoff=beam.psf_yoff)
if beam.spectrum_1d is None:
self.compute_model() # spectrum_1d=beam.spectrum_1d)
else:
self.compute_model(spectrum_1d=beam.spectrum_1d,
is_cgs=beam.is_cgs)
slx_thumb = slice(self.beam.origin[1],
self.beam.origin[1]+self.beam.sh[1])
sly_thumb = slice(self.beam.origin[0],
self.beam.origin[0]+self.beam.sh[0])
self.direct = flt.direct.get_slice(slx_thumb, sly_thumb,
get_slice_header=get_slice_header)
self.grism = flt.grism.get_slice(self.beam.slx_parent,
self.beam.sly_parent,
get_slice_header=get_slice_header)
self.contam = flt.model[self.beam.sly_parent, self.beam.slx_parent]*1
if self.beam.id in flt.object_dispersers:
self.contam -= self.beam.model
def load_fits(self, file, conf=None, direct_extn=1, grism_extn=2):
"""Initialize from FITS file
Parameters
----------
file : str
FITS file to read (as output from `write_fits`).
Returns
-------
Loads attributes to `self`.
"""
if isinstance(file, str):
hdu = pyfits.open(file)
file_is_open = True
else:
file_is_open = False
hdu = file
self.direct = ImageData(hdulist=hdu, sci_extn=direct_extn)
self.grism = ImageData(hdulist=hdu, sci_extn=grism_extn)
self.contam = hdu['CONTAM'].data*1
try:
self.modelf = hdu['MODEL'].data.flatten().astype(np.float32)*1
except:
self.modelf = self.grism['SCI'].flatten().astype(np.float32)*0.
if ('REF', 1) in hdu:
direct = hdu['REF', 1].data*1
else:
direct = hdu['SCI', 1].data*1
h0 = hdu[0].header
# if 'DFILTER' in self.grism.header:
# direct_filter = self.grism.header['DFILTER']
# else:
# direct_filter = self.direct.filter
# #
if 'DFILTER' in self.grism.header:
direct_filter = self.grism.header['DFILTER']
elif self.grism.instrument in ['NIRCAM', 'NIRISS']:
direct_filter = self.grism.pupil
else:
direct_filter = self.direct.filter
if conf is None:
conf_args = dict(instrume=self.grism.instrument,
filter=direct_filter,
grism=self.grism.filter,
module=self.grism.module,
chip=self.grism.ccdchip)
self.conf_file = grismconf.get_config_filename(**conf_args)
conf = grismconf.load_grism_config(self.conf_file)
if 'GROW' in self.grism.header:
grow = self.grism.header['GROW']
else:
grow = 1
if 'MW_EBV' in h0:
self.grism.MW_EBV = h0['MW_EBV']
else:
self.grism.MW_EBV = 0
self.grism.fwcpos = h0['FWCPOS']
if (self.grism.fwcpos == 0) | (self.grism.fwcpos == ''):
self.grism.fwcpos = None
if 'TYOFFSET' in h0:
yoffset = h0['TYOFFSET']
else:
yoffset = 0.
self.beam = GrismDisperser(id=h0['ID'], direct=direct,
segmentation=hdu['SEG'].data*1,
origin=self.direct.origin,
pad=h0['PAD'],
grow=grow, beam=h0['BEAM'],
xcenter=h0['XCENTER'],
ycenter=h0['YCENTER'],
conf=conf, fwcpos=self.grism.fwcpos,
MW_EBV=self.grism.MW_EBV,
yoffset=yoffset)
self.grism.parent_file = h0['GPARENT']
self.direct.parent_file = h0['DPARENT']
self.id = h0['ID']
self.modelf = self.beam.modelf
# Cleanup
if file_is_open:
hdu.close()
@property
def trace_table(self):
"""
Table of trace parameters. Trace is unit-indexed.
"""
dtype = np.float32
tab = utils.GTable()
tab.meta['CONFFILE'] = os.path.basename(self.beam.conf.conf_file)
tab['wavelength'] = np.cast[dtype](self.beam.lam*u.Angstrom)
tab['trace'] = np.cast[dtype](self.beam.ytrace + self.beam.sh_beam[0]/2 - self.beam.ycenter)
sens_units = u.erg/u.second/u.cm**2/u.Angstrom/(u.electron/u.second)
tab['sensitivity'] = np.cast[dtype](self.beam.sensitivity*sens_units)
return tab
def write_fits(self, root='beam_', overwrite=True, strip=False, include_model=True, get_hdu=False, get_trace_table=True):
"""Write attributes and data to FITS file
Parameters
----------
root : str
Output filename will be
'{root}_{self.id}.{self.grism.filter}.{self.beam}.fits'
with `self.id` zero-padded with 5 digits.
overwrite : bool
Overwrite existing file.
strip : bool
Strip out extensions that aren't totally necessary for
regenerating the `ImageData` object. That is, strip out the
direct image `SCI`, `ERR`, and `DQ` extensions if `REF` is
defined. Also strip out `MODEL`.
get_hdu : bool
Return `~astropy.io.fits.HDUList` rather than writing a file.
Returns
-------
hdu : `~astropy.io.fits.HDUList`
If `get_hdu` is True
outfile : str
If `get_hdu` is False, return the output filename.
"""
h0 = pyfits.Header()
h0['ID'] = self.beam.id, 'Object ID'
h0['PAD'] = self.beam.pad, 'Padding of input image'
h0['BEAM'] = self.beam.beam, 'Grism order ("beam")'
h0['XCENTER'] = (self.beam.xcenter,
'Offset of centroid wrt thumb center')
h0['YCENTER'] = (self.beam.ycenter,
'Offset of centroid wrt thumb center')
if hasattr(self.beam, 'yoffset'):
h0['TYOFFSET'] = (self.beam.yoffset,
'Cross dispersion offset of the trace')
h0['GPARENT'] = (self.grism.parent_file,
'Parent grism file')
h0['DPARENT'] = (self.direct.parent_file,
'Parent direct file')
h0['FWCPOS'] = (self.grism.fwcpos,
'Filter wheel position (NIRISS)')
h0['MW_EBV'] = (self.grism.MW_EBV,
'Milky Way exctinction E(B-V)')
hdu = pyfits.HDUList([pyfits.PrimaryHDU(header=h0)])
hdu.extend(self.direct.get_HDUList(extver=1))
hdu.append(pyfits.ImageHDU(data=np.cast[np.int32](self.beam.seg),
header=hdu[-1].header, name='SEG'))
# 2D grism spectra
grism_hdu = self.grism.get_HDUList(extver=2)
#######
# 2D Spectroscopic WCS
hdu2d, wcs2d = self.get_2d_wcs()
# Get available 'WCSNAME'+key
for key in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
if 'WCSNAME{0}'.format(key) not in self.grism.header:
break
else:
wcsname = self.grism.header['WCSNAME{0}'.format(key)]
if wcsname == 'BeamLinear2D':
break
h2d = wcs2d.to_header(key=key)
for ext in grism_hdu:
for k in h2d:
ext.header[k] = h2d[k], h2d.comments[k]
####
hdu.extend(grism_hdu)
hdu.append(pyfits.ImageHDU(data=self.contam, header=hdu[-1].header,
name='CONTAM'))
if include_model:
hdu.append(pyfits.ImageHDU(data=np.cast[np.float32](self.model),
header=hdu[-1].header, name='MODEL'))
if get_trace_table:
trace_hdu = pyfits.table_to_hdu(self.trace_table)
trace_hdu.header['EXTNAME'] = 'TRACE'
trace_hdu.header['EXTVER'] = 2
hdu.append(trace_hdu)
if strip:
# Blotted reference is attached, don't need individual direct
# arrays.
if self.direct['REF'] is not None:
for ext in [('SCI', 1), ('ERR', 1), ('DQ', 1)]:
if ext in hdu:
ix = hdu.index_of(ext)
p = hdu.pop(ix)
# This can be regenerated
# if strip & 2:
# ix = hdu.index_of('MODEL')
# p = hdu.pop(ix)
# Put Primary keywords in first extension
SKIP_KEYS = ['EXTEND', 'SIMPLE']
for key in h0:
if key not in SKIP_KEYS:
hdu[1].header[key] = (h0[key], h0.comments[key])
hdu['SCI', 2].header[key] = (h0[key], h0.comments[key])
if get_hdu:
return hdu
outfile = '{0}_{1:05d}.{2}.{3}.fits'.format(root, self.beam.id,
self.grism.filter.lower(),
self.beam.beam)
hdu.writeto(outfile, overwrite=overwrite)
return outfile
def compute_model(self, use_psf=True, **kwargs):
"""Link to `self.beam.compute_model`
`self.beam` is a `GrismDisperser` object.
"""
if use_psf & hasattr(self.beam, 'psf'):
result = self.beam.compute_model_psf(**kwargs)
else:
result = self.beam.compute_model(**kwargs)
reset_inplace = True
if 'in_place' in kwargs:
reset_inplace = kwargs['in_place']
if reset_inplace:
self.modelf = self.beam.modelf # .flatten()
self.model = self.beam.modelf.reshape(self.beam.sh_beam)
return result
def get_wavelength_wcs(self, wavelength=1.3e4):
"""Compute *celestial* WCS of the 2D spectrum array for a specified central wavelength
This essentially recenters the celestial SIP WCS such that the
desired wavelength was at the object position as observed in the
direct image (which has associated geometric distortions etc).
Parameters
----------
wavelength : float
Central wavelength to use for derived WCS.
Returns
-------
header : `~astropy.io.fits.Header`
FITS header
wcs : `~astropy.wcs.WCS`
Derived celestial WCS
"""
wcs = self.grism.wcs.deepcopy()
xarr = np.arange(self.beam.lam_beam.shape[0])
# Trace properties at desired wavelength
dx = np.interp(wavelength, self.beam.lam_beam, xarr)
dy = np.interp(wavelength, self.beam.lam_beam, self.beam.ytrace_beam)
dl = np.interp(wavelength, self.beam.lam_beam[1:],
np.diff(self.beam.lam_beam))
ysens = np.interp(wavelength, self.beam.lam_beam,
self.beam.sensitivity_beam)
# Update CRPIX
dc = 0 # python array center to WCS pixel center
for wcs_ext in [wcs.sip, wcs.wcs]:
if wcs_ext is None:
continue
else:
cr = wcs_ext.crpix
cr[0] += dx + self.beam.x0[1] + self.beam.dxfull[0] + dc
cr[1] += dy + dc
for wcs_ext in [wcs.cpdis1, wcs.cpdis2, wcs.det2im1, wcs.det2im2]:
if wcs_ext is None:
continue
else:
cr = wcs_ext.crval
cr[0] += dx + self.beam.sh[0]/2 + self.beam.dxfull[0] + dc
cr[1] += dy + dc
# Make SIP CRPIX match CRPIX
# if wcs.sip is not None:
# for i in [0,1]:
# wcs.sip.crpix[i] = wcs.wcs.crpix[i]
for wcs_ext in [wcs.sip]:
if wcs_ext is not None:
for i in [0, 1]:
wcs_ext.crpix[i] = wcs.wcs.crpix[i]
# WCS header
header = wcs.to_header(relax=True)
for key in header:
if key.startswith('PC'):
header.rename_keyword(key, key.replace('PC', 'CD'))
header['LONPOLE'] = 180.
header['RADESYS'] = 'ICRS'
header['LTV1'] = (0.0, 'offset in X to subsection start')
header['LTV2'] = (0.0, 'offset in Y to subsection start')
header['LTM1_1'] = (1.0, 'reciprocal of sampling rate in X')
header['LTM2_2'] = (1.0, 'reciprocal of sampling rate in X')
header['INVSENS'] = (ysens, 'inverse sensitivity, 10**-17 erg/s/cm2')
header['DLDP'] = (dl, 'delta wavelength per pixel')
return header, wcs
def get_2d_wcs(self, data=None, key=None):
"""Get simplified WCS of the 2D spectrum
Parameters
----------
data : array-like
Put this data in the output HDU rather than empty zeros
key : None
Key for WCS extension, passed to `~astropy.wcs.WCS.to_header`.
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
Image HDU with header and data properties.
wcs : `~astropy.wcs.WCS`
WCS appropriate for the 2D spectrum with spatial (y) and spectral
(x) axes.
.. note::
Assumes linear dispersion and trace functions!
"""
h = pyfits.Header()
h['WCSNAME'] = 'BeamLinear2D'
h['CRPIX1'] = self.beam.sh_beam[0]/2 - self.beam.xcenter
h['CRPIX2'] = self.beam.sh_beam[0]/2 - self.beam.ycenter
# Wavelength, A
h['CNAME1'] = 'Wave-Angstrom'
h['CTYPE1'] = 'WAVE'
#h['CUNIT1'] = 'Angstrom'
h['CRVAL1'] = self.beam.lam_beam[0]
h['CD1_1'] = self.beam.lam_beam[1] - self.beam.lam_beam[0]
h['CD1_2'] = 0.
# Linear trace
h['CNAME2'] = 'Trace'
h['CTYPE2'] = 'LINEAR'
h['CRVAL2'] = -1*self.beam.ytrace_beam[0]
h['CD2_2'] = 1.
h['CD2_1'] = -(self.beam.ytrace_beam[1] - self.beam.ytrace_beam[0])
if data is None:
data = np.zeros(self.beam.sh_beam, dtype=np.float32)
hdu = pyfits.ImageHDU(data=data, header=h)
wcs = pywcs.WCS(hdu.header)
#wcs.pscale = np.sqrt(wcs.wcs.cd[0,0]**2 + wcs.wcs.cd[1,0]**2)*3600.
wcs.pscale = utils.get_wcs_pscale(wcs)
return hdu, wcs
def full_2d_wcs(self, data=None):
"""Get trace WCS of the 2D spectrum
Parameters
----------
data : array-like
Put this data in the output HDU rather than empty zeros
Returns
-------
hdu : `~astropy.io.fits.ImageHDU`
Image HDU with header and data properties.
wcs : `~astropy.wcs.WCS`
WCS appropriate for the 2D spectrum with spatial (y) and spectral
(x) axes.
.. note::
Assumes linear dispersion and trace functions!
"""
h = pyfits.Header()
h['CRPIX1'] = self.beam.sh_beam[0]/2 - self.beam.xcenter
h['CRPIX2'] = self.beam.sh_beam[0]/2 - self.beam.ycenter
h['CRVAL1'] = self.beam.lam_beam[0]/1.e4
h['CD1_1'] = (self.beam.lam_beam[1] - self.beam.lam_beam[0])/1.e4
h['CD1_2'] = 0.
h['CRVAL2'] = -1*self.beam.ytrace_beam[0]
h['CD2_2'] = 1.
h['CD2_1'] = -(self.beam.ytrace_beam[1] - self.beam.ytrace_beam[0])
h['CTYPE1'] = 'RA---TAN-SIP'
h['CUNIT1'] = 'mas'
h['CTYPE2'] = 'DEC--TAN-SIP'
h['CUNIT2'] = 'mas'
#wcs_header = grizli.utils.to_header(self.grism.wcs)
x = np.arange(len(self.beam.lam_beam))
c = np.polyfit(x, self.beam.lam_beam/1.e4, 2)
#c = np.polyfit((self.beam.lam_beam-self.beam.lam_beam[0])/1.e4, x/h['CD1_1'], 2)
ct = np.polyfit(x, self.beam.ytrace_beam, 2)
h['A_ORDER'] = 2
h['B_ORDER'] = 2
h['A_0_2'] = 0.
h['A_1_2'] = 0.
h['A_2_2'] = 0.
h['A_2_1'] = 0.
h['A_2_0'] = c[0] # /c[1]
h['CD1_1'] = c[1]
h['B_0_2'] = 0.
h['B_1_2'] = 0.
h['B_2_2'] = 0.
h['B_2_1'] = 0.
if ct[1] != 0:
h['B_2_0'] = ct[0] # /ct[1]
else:
h['B_2_0'] = 0
#h['B_2_0'] = 0
if data is None:
data = np.zeros(self.beam.sh_beam, dtype=np.float32)
hdu = pyfits.ImageHDU(data=data, header=h)
wcs = pywcs.WCS(hdu.header)
# xf = x + h['CRPIX1']-1
# coo = np.array([xf, xf*0])
# tr = wcs.all_pix2world(coo.T, 0)
#wcs.pscale = np.sqrt(wcs.wcs.cd[0,0]**2 + wcs.wcs.cd[1,0]**2)*3600.
wcs.pscale = utils.get_wcs_pscale(wcs)
return hdu, wcs
def get_sky_coords(self):
"""Get WCS coordinates of the center of the direct image
Returns
-------
ra, dec : float
Center coordinates of the beam thumbnail in decimal degrees
"""
pix_center = np.array([self.beam.sh][::-1])/2.
pix_center -= np.array([self.beam.xcenter, self.beam.ycenter])
if self.direct.wcs.sip is not None:
for i in range(2):
self.direct.wcs.sip.crpix[i] = self.direct.wcs.wcs.crpix[i]
ra, dec = self.direct.wcs.all_pix2world(pix_center, 1)[0]
return ra, dec
def get_dispersion_PA(self, decimals=0):
"""Compute exact PA of the dispersion axis, including tilt of the
trace and the FLT WCS
Parameters
----------
decimals : int or None
Number of decimal places to round to, passed to `~numpy.round`.
If None, then don't round.
Returns
-------
dispersion_PA : float
PA (angle East of North) of the dispersion axis.
"""
from astropy.coordinates import Angle
import astropy.units as u
# extra tilt of the 1st order grism spectra
x0 = self.beam.conf.conf['BEAMA']
dy_trace, lam_trace = self.beam.conf.get_beam_trace(x=507, y=507,
dx=x0, beam='A')
extra = np.arctan2(dy_trace[1]-dy_trace[0], x0[1]-x0[0])/np.pi*180
# Distorted WCS
crpix = self.direct.wcs.wcs.crpix
xref = [crpix[0], crpix[0]+1]
yref = [crpix[1], crpix[1]]
r, d = self.direct.wcs.all_pix2world(xref, yref, 1)
pa = Angle((extra +
np.arctan2(np.diff(r)*np.cos(d[0]/180*np.pi),
np.diff(d))[0]/np.pi*180)*u.deg)
dispersion_PA = pa.wrap_at(360*u.deg).value
if decimals is not None:
dispersion_PA = np.round(dispersion_PA, decimals=decimals)
return float(dispersion_PA)
def init_epsf(self, center=None, tol=1.e-3, yoff=0., skip=1., flat_sensitivity=False, psf_params=None, N=4, get_extended=False, only_centering=True):
"""Initialize ePSF fitting for point sources
TBD
"""
import scipy.sparse
EPSF = utils.EffectivePSF()
ivar = 1/self.direct['ERR']**2
ivar[~np.isfinite(ivar)] = 0
ivar[self.direct['DQ'] > 0] = 0
ivar[self.beam.seg != self.id] = 0
if ivar.max() == 0:
ivar = ivar+1.
origin = np.array(self.direct.origin) - np.array(self.direct.pad)
if psf_params is None:
self.beam.psf_ivar = ivar*1
self.beam.psf_sci = self.direct['SCI']*1
self.psf_params = EPSF.fit_ePSF(self.direct['SCI'],
ivar=ivar,
center=center, tol=tol,
N=N, origin=origin,
filter=self.direct.filter,
get_extended=get_extended,
only_centering=only_centering)
else:
self.beam.psf_ivar = ivar*1
self.beam.psf_sci = self.direct['SCI']*1
self.psf_params = psf_params
self.beam.x_init_epsf(flat_sensitivity=False, psf_params=self.psf_params, psf_filter=self.direct.filter, yoff=yoff, skip=skip, get_extended=get_extended)
self._parse_from_data(**self._parse_params)
return None
# self.psf = EPSF.get_ePSF(self.psf_params, origin=origin, shape=self.beam.sh, filter=self.direct.filter)
#
# self.psf_resid = self.direct['SCI'] - self.psf
#
# y0, x0 = np.array(self.beam.sh)/2.-1
#
# # Center in detector coords
# xd = self.psf_params[1] + self.direct.origin[1] - self.direct.pad + x0
# yd = self.psf_params[2] + self.direct.origin[0] - self.direct.pad + y0
#
# # Get wavelength array
# psf_xy_lam = []
# for i, filter in enumerate(['F105W', 'F125W', 'F160W']):
# psf_xy_lam.append(EPSF.get_at_position(x=xd, y=yd, filter=filter))
#
# filt_ix = np.arange(3)
# filt_lam = np.array([1.0551, 1.2486, 1.5369])*1.e4
#
# yp_beam, xp_beam = np.indices(self.beam.sh_beam)
# #skip = 1
# xarr = np.arange(0,self.beam.lam_beam.shape[0], skip)
# xarr = xarr[xarr <= self.beam.lam_beam.shape[0]-1]
# xbeam = np.arange(self.beam.lam_beam.shape[0])*1.
#
# #yoff = 0 #-0.15
# psf_model = self.model*0.
# A_psf = []
# lam_psf = []
#
# lam_offset = self.beam.sh[1]/2 - self.psf_params[1] - 1
# self.lam_offset = lam_offset
#
# for xi in xarr:
# yi = np.interp(xi, xbeam, self.beam.ytrace_beam)
# li = np.interp(xi, xbeam, self.beam.lam_beam)
# dx = xp_beam-self.psf_params[1]-xi-x0
# dy = yp_beam-self.psf_params[2]-yi+yoff-y0
#
# # wavelength-dependent
# ii = np.interp(li, filt_lam, filt_ix, left=-1, right=10)
# if ii == -1:
# psf_xy_i = psf_xy_lam[0]*1
# elif ii == 10:
# psf_xy_i = psf_xy_lam[2]*1
# else:
# ni = int(ii)
# f = 1-(li-filt_lam[ni])/(filt_lam[ni+1]-filt_lam[ni])
# psf_xy_i = f*psf_xy_lam[ni] + (1-f)*psf_xy_lam[ni+1]
#
# psf = EPSF.eval_ePSF(psf_xy_i, dx, dy)*self.psf_params[0]
#
# A_psf.append(psf.flatten())
# lam_psf.append(li)
#
# # Sensitivity
# self.lam_psf = np.array(lam_psf)
# if flat_sensitivity:
# s_i_scale = np.abs(np.gradient(self.lam_psf))*self.direct.photflam
# else:
# sens = self.beam.conf.sens[self.beam.beam]
# so = np.argsort(self.lam_psf)
# s_i = interp.interp_conserve_c(self.lam_psf[so], sens['WAVELENGTH'], sens['SENSITIVITY'])*np.gradient(self.lam_psf[so])*self.direct.photflam
# s_i_scale = s_i*0.
# s_i_scale[so] = s_i
#
# self.A_psf = scipy.sparse.csr_matrix(np.array(A_psf).T*s_i_scale)
# def xcompute_model_psf(self, id=None, spectrum_1d=None, in_place=True, is_cgs=True):
# if spectrum_1d is None:
# model = np.array(self.A_psf.sum(axis=1))
# model = model.reshape(self.beam.sh_beam)
# else:
# dx = np.diff(self.lam_psf)[0]
# if dx < 0:
# coeffs = interp.interp_conserve_c(self.lam_psf[::-1],
# spectrum_1d[0],
# spectrum_1d[1])[::-1]
# else:
# coeffs = interp.interp_conserve_c(self.lam_psf,
# spectrum_1d[0],
# spectrum_1d[1])
#
#
# model = self.A_psf.dot(coeffs).reshape(self.beam.sh_beam)
#
# if in_place:
# self.model = model
# self.beam.model = self.model
# return True
# else:
# return model.flatten()
# Below here will be cut out after verifying that the demos
# can be run with the new fitting tools
def init_poly_coeffs(self, poly_order=1, fit_background=True):
"""Initialize arrays for polynomial fits to the spectrum
Provides capabilities of fitting n-order polynomials to observed
spectra rather than galaxy/stellar templates.
Parameters
----------
poly_order : int
Order of the polynomial
fit_background : bool
Compute additional arrays for allowing the background to be fit
along with the polynomial coefficients.
Returns
-------
Polynomial parameters stored in attributes `y_poly`, `n_poly`, ...
"""
# Already done?
if poly_order == self.poly_order:
return None
self.poly_order = poly_order
# Model: (a_0 x**0 + ... a_i x**i)*continuum + line
yp, xp = np.indices(self.beam.sh_beam)
NX = self.beam.sh_beam[1]
self.xpf = (xp.flatten() - NX/2.)
self.xpf /= (NX/2.)
# Polynomial continuum arrays
if fit_background:
self.n_bg = 1
self.A_poly = [self.flat_flam*0+1]
self.A_poly.extend([self.xpf**order*self.flat_flam
for order in range(poly_order+1)])
else:
self.n_bg = 0
self.A_poly = [self.xpf**order*self.flat_flam
for order in range(poly_order+1)]
# Array for generating polynomial "template"
x = (np.arange(NX) - NX/2.) / (NX/2.)
self.y_poly = np.array([x**order for order in range(poly_order+1)])
self.n_poly = self.y_poly.shape[0]
self.n_simp = self.n_poly + self.n_bg
self.DoF = self.fit_mask.sum()
# def load_templates(self, fwhm=400, line_complexes=True):
# """TBD
#
# ***
# These below will probably be cut since they're all now implemented
# in more detail in multifit.py. Need to update demos before
# taking them out completely.
# ***
#
# """
# # templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed1_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed2_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed4_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed5_nolines.dat',
# # 'templates/EAZY_v1.0_lines/eazy_v1.0_sed6_nolines.dat',
# # 'templates/cvd12_t11_solar_Chabrier.extend.dat',
# # 'templates/dobos11/bc03_pr_ch_z02_ltau07.0_age09.2_av2.5.dat']
#
# templates = ['templates/EAZY_v1.0_lines/eazy_v1.0_sed3_nolines.dat',
# 'templates/cvd12_t11_solar_Chabrier.extend.dat']
#
# temp_list = OrderedDict()
# for temp in templates:
# data = np.loadtxt(GRIZLI_PATH + '/' + temp, unpack=True)
# scl = np.interp(5500., data[0], data[1])
# name = os.path.basename(temp)
# temp_list[name] = utils.SpectrumTemplate(wave=data[0],
# flux=data[1]/scl)
# #plt.plot(temp_list[-1].wave, temp_list[-1].flux, label=temp, alpha=0.5)
#
# line_wavelengths = {} ; line_ratios = {}
# line_wavelengths['Ha'] = [6564.61]; line_ratios['Ha'] = [1.]
# line_wavelengths['Hb'] = [4862.68]; line_ratios['Hb'] = [1.]
# line_wavelengths['Hg'] = [4341.68]; line_ratios['Hg'] = [1.]
# line_wavelengths['Hd'] = [4102.892]; line_ratios['Hd'] = [1.]
# line_wavelengths['OIIIx'] = [4364.436]; line_ratios['OIIIx'] = [1.]
# line_wavelengths['OIII'] = [5008.240, 4960.295]; line_ratios['OIII'] = [2.98, 1]
# line_wavelengths['OIII+Hb'] = [5008.240, 4960.295, 4862.68]; line_ratios['OIII+Hb'] = [2.98, 1, 3.98/8.]
#
# line_wavelengths['OIII+Hb+Ha'] = [5008.240, 4960.295, 4862.68, 6564.61]; line_ratios['OIII+Hb+Ha'] = [2.98, 1, 3.98/10., 3.98/10.*2.86]
#
# line_wavelengths['OIII+Hb+Ha+SII'] = [5008.240, 4960.295, 4862.68, 6564.61, 6718.29, 6732.67]
# line_ratios['OIII+Hb+Ha+SII'] = [2.98, 1, 3.98/10., 3.98/10.*2.86*4, 3.98/10.*2.86/10.*4, 3.98/10.*2.86/10.*4]
#
# line_wavelengths['OII'] = [3729.875]; line_ratios['OII'] = [1]
# line_wavelengths['OI'] = [6302.046]; line_ratios['OI'] = [1]
#
# line_wavelengths['Ha+SII'] = [6564.61, 6718.29, 6732.67]; line_ratios['Ha+SII'] = [1., 1./10, 1./10]
# line_wavelengths['SII'] = [6718.29, 6732.67]; line_ratios['SII'] = [1., 1.]
#
# if line_complexes:
# #line_list = ['Ha+SII', 'OIII+Hb+Ha', 'OII']
# line_list = ['Ha+SII', 'OIII+Hb', 'OII']
# else:
# line_list = ['Ha', 'SII', 'OIII', 'Hb', 'OII']
# #line_list = ['Ha', 'SII']
#
# for line in line_list:
# scl = line_ratios[line]/np.sum(line_ratios[line])
# for i in range(len(scl)):
# line_i = utils.SpectrumTemplate(wave=line_wavelengths[line][i],
# flux=None, fwhm=fwhm, velocity=True)
#
# if i == 0:
# line_temp = line_i*scl[i]
# else:
# line_temp = line_temp + line_i*scl[i]
#
# temp_list['line {0}'.format(line)] = line_temp
#
# return temp_list
#
# def fit_at_z(self, z=0., templates={}, fitter='lstsq', poly_order=3):
# """TBD
# """
# import copy
#
# import sklearn.linear_model
# import numpy.linalg
#
# self.init_poly_coeffs(poly_order=poly_order)
#
# NTEMP = len(self.A_poly)
# A_list = copy.copy(self.A_poly)
# ok_temp = np.ones(NTEMP+len(templates), dtype=bool)
#
# for i, key in enumerate(templates.keys()):
# NTEMP += 1
# temp = templates[key].zscale(z, 1.)
# spectrum_1d = [temp.wave, temp.flux]
#
# if ((temp.wave[0] > self.beam.lam_beam[-1]) |
# (temp.wave[-1] < self.beam.lam_beam[0])):
#
# A_list.append(self.flat_flam*1)
# ok_temp[NTEMP-1] = False
# #print 'skip TEMP: %d, %s' %(i, key)
# continue
# else:
# pass
# #print 'TEMP: %d' %(i)
#
# temp_model = self.compute_model(spectrum_1d=spectrum_1d,
# in_place=False)
#
# ### Test that model spectrum has non-zero pixel values
# #print 'TEMP: %d, %.3f' %(i, temp_model[self.fit_mask].max()/temp_model.max())
# if temp_model[self.fit_mask].max()/temp_model.max() < 0.2:
# #print 'skipx TEMP: %d, %s' %(i, key)
# ok_temp[NTEMP-1] = False
#
# A_list.append(temp_model)
#
# A = np.vstack(A_list).T
# out_coeffs = np.zeros(NTEMP)
#
# ### LSTSQ coefficients
# if fitter == 'lstsq':
# out = numpy.linalg.lstsq(A[self.fit_mask, :][:, ok_temp],
# self.scif[self.fit_mask])
# lstsq_coeff, residuals, rank, s = out
# coeffs = lstsq_coeff
# else:
# clf = sklearn.linear_model.LinearRegression()
# status = clf.fit(A[self.fit_mask, :][:, ok_temp],
# self.scif[self.fit_mask])
# coeffs = clf.coef_
#
# out_coeffs[ok_temp] = coeffs
# model = np.dot(A, out_coeffs)
# model_2d = model.reshape(self.beam.sh_beam)
#
# chi2 = np.sum(((self.scif - model)**2*self.ivarf)[self.fit_mask])
#
# return A, out_coeffs, chi2, model_2d
#
# def fit_redshift(self, prior=None, poly_order=1, fwhm=500,
# make_figure=True, zr=None, dz=None, verbose=True):
# """TBD
# """
# # if False:
# # reload(grizlidev.utils); utils = grizlidev.utils
# # reload(grizlidev.utils_c); reload(grizlidev.model);
# # reload(grizlidev.grismconf); reload(grizlidev.utils); reload(grizlidev.multifit); reload(grizlidev); reload(grizli)
# #
# # beams = []
# # if id in flt.object_dispersers:
# # b = flt.object_dispersers[id]['A']
# # beam = grizli.model.BeamCutout(flt, b, conf=flt.conf)
# # #print beam.grism.pad, beam.beam.grow
# # beams.append(beam)
# # else:
# # print flt.grism.parent_file, 'ID %d not found' %(id)
# #
# # #plt.imshow(beam.beam.direct*(beam.beam.seg == id), interpolation='Nearest', origin='lower', cmap='viridis_r')
# # self = beam
# #
# # #poly_order = 3
#
# if self.grism.filter == 'G102':
# if zr is None:
# zr = [0.78e4/6563.-1, 1.2e4/5007.-1]
# if dz is None:
# dz = [0.001, 0.0005]
#
# if self.grism.filter == 'G141':
# if zr is None:
# zr = [1.1e4/6563.-1, 1.65e4/5007.-1]
# if dz is None:
# dz = [0.003, 0.0005]
#
# zgrid = utils.log_zgrid(zr, dz=dz[0])
# NZ = len(zgrid)
#
# templates = self.load_templates(fwhm=fwhm)
# NTEMP = len(templates)
#
# out = self.fit_at_z(z=0., templates=templates, fitter='lstsq',
# poly_order=poly_order)
#
# A, coeffs, chi2, model_2d = out
#
# chi2 = np.zeros(NZ)
# coeffs = np.zeros((NZ, coeffs.shape[0]))
#
# for i in range(NZ):
# out = self.fit_at_z(z=zgrid[i], templates=templates,
# fitter='lstsq', poly_order=poly_order)
#
# A, coeffs[i,:], chi2[i], model_2d = out
# if verbose:
# print(utils.NO_NEWLINE + '{0:.4f} {1:9.1f}'.format(zgrid[i], chi2[i]))
#
# # peaks
# import peakutils
# chi2nu = (chi2.min()-chi2)/self.DoF
# indexes = peakutils.indexes((chi2nu+0.01)*(chi2nu > -0.004), thres=0.003, min_dist=20)
# num_peaks = len(indexes)
# # plt.plot(zgrid, (chi2-chi2.min())/ self.DoF)
# # plt.scatter(zgrid[indexes], (chi2-chi2.min())[indexes]/ self.DoF, color='r')
#
#
# ### zoom
# if ((chi2.max()-chi2.min())/self.DoF > 0.01) & (num_peaks < 5):
# threshold = 0.01
# else:
# threshold = 0.001
#
# zgrid_zoom = utils.zoom_zgrid(zgrid, chi2/self.DoF, threshold=threshold, factor=10)
# NZOOM = len(zgrid_zoom)
#
# chi2_zoom = np.zeros(NZOOM)
# coeffs_zoom = np.zeros((NZOOM, coeffs.shape[1]))
#
# for i in range(NZOOM):
# out = self.fit_at_z(z=zgrid_zoom[i], templates=templates,
# fitter='lstsq', poly_order=poly_order)
#
# A, coeffs_zoom[i,:], chi2_zoom[i], model_2d = out
# if verbose:
# print(utils.NO_NEWLINE + '- {0:.4f} {1:9.1f}'.format(zgrid_zoom[i], chi2_zoom[i]))
#
# zgrid = np.append(zgrid, zgrid_zoom)
# chi2 = np.append(chi2, chi2_zoom)
# coeffs = np.append(coeffs, coeffs_zoom, axis=0)
#
# so = np.argsort(zgrid)
# zgrid = zgrid[so]
# chi2 = chi2[so]
# coeffs=coeffs[so,:]
#
# ### Best redshift
# templates = self.load_templates(line_complexes=False, fwhm=fwhm)
# zbest = zgrid[np.argmin(chi2)]
# out = self.fit_at_z(z=zbest, templates=templates,
# fitter='lstsq', poly_order=poly_order)
#
# A, coeffs_full, chi2_best, model_full = out
#
# ## Continuum fit
# mask = np.isfinite(coeffs_full)
# for i, key in enumerate(templates.keys()):
# if key.startswith('line'):
# mask[self.n_simp+i] = False
#
# model_continuum = np.dot(A, coeffs_full*mask)
# model_continuum = model_continuum.reshape(self.beam.sh_beam)
#
# ### 1D spectrum
# model1d = utils.SpectrumTemplate(wave=self.beam.lam,
# flux=np.dot(self.y_poly.T,
# coeffs_full[self.n_bg:self.n_poly+self.n_bg]))
#
# cont1d = model1d*1
#
# line_flux = OrderedDict()
# for i, key in enumerate(templates.keys()):
# temp_i = templates[key].zscale(zbest, coeffs_full[self.n_simp+i])
# model1d += temp_i
# if not key.startswith('line'):
# cont1d += temp_i
# else:
# line_flux[key.split()[1]] = (coeffs_full[self.n_simp+i] * 1.)
# #self.beam.total_flux/1.e-17)
#
#
# fit_data = OrderedDict()
# fit_data['poly_order'] = poly_order
# fit_data['fwhm'] = fwhm
# fit_data['zbest'] = zbest
# fit_data['zgrid'] = zgrid
# fit_data['A'] = A
# fit_data['coeffs'] = coeffs
# fit_data['chi2'] = chi2
# fit_data['model_full'] = model_full
# fit_data['coeffs_full'] = coeffs_full
# fit_data['line_flux'] = line_flux
# #fit_data['templates_full'] = templates
# fit_data['model_cont'] = model_continuum
# fit_data['model1d'] = model1d
# fit_data['cont1d'] = cont1d
#
# fig = None
# if make_figure:
# fig = self.show_redshift_fit(fit_data)
# #fig.savefig('fit.pdf')
#
# return fit_data, fig
def show_redshift_fit(self, fit_data):
"""Make a plot based on results from `simple_line_fit`.
Parameters
----------
fit_data : dict
returned data from `simple_line_fit`. I.e.,
>>> fit_outputs = BeamCutout.simple_line_fit()
>>> fig = BeamCutout.show_simple_fit_results(fit_outputs)
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object that can be optionally written to a hardcopy file.
"""
import matplotlib.gridspec
#zgrid, A, coeffs, chi2, model_best, model_continuum, model1d = fit_outputs
# Full figure
fig = plt.figure(figsize=(12, 5))
#fig = plt.Figure(figsize=(8,4))
# 1D plots
gsb = matplotlib.gridspec.GridSpec(3, 1)
xspec, yspec, yerr = self.beam.optimal_extract(self.grism.data['SCI']
- self.contam,
ivar=self.ivar)
flat_model = self.flat_flam.reshape(self.beam.sh_beam)
xspecm, yspecm, yerrm = self.beam.optimal_extract(flat_model)
out = self.beam.optimal_extract(fit_data['model_full'])
xspecl, yspecl, yerrl = out
ax = fig.add_subplot(gsb[-2:, :])
ax.errorbar(xspec/1.e4, yspec, yerr, linestyle='None', marker='o',
markersize=3, color='black', alpha=0.5,
label='Data (id={0:d})'.format(self.beam.id))
ax.plot(xspecm/1.e4, yspecm, color='red', linewidth=2, alpha=0.8,
label=r'Flat $f_\lambda$ ({0})'.format(self.direct.filter))
zbest = fit_data['zgrid'][np.argmin(fit_data['chi2'])]
ax.plot(xspecl/1.e4, yspecl, color='orange', linewidth=2, alpha=0.8,
label='Template (z={0:.4f})'.format(zbest))
ax.legend(fontsize=8, loc='lower center', scatterpoints=1)
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel('flux (e-/s)')
if self.grism.filter == 'G102':
xlim = [0.7, 1.25]
if self.grism.filter == 'G141':
xlim = [1., 1.8]
xt = np.arange(xlim[0], xlim[1], 0.1)
ax.set_xlim(xlim[0], xlim[1])
ax.set_xticks(xt)
ax = fig.add_subplot(gsb[-3, :])
ax.plot(fit_data['zgrid'], fit_data['chi2']/self.DoF)
for d in [1, 4, 9]:
ax.plot(fit_data['zgrid'],
fit_data['chi2']*0+(fit_data['chi2'].min()+d)/self.DoF,
color='{0:.1f}'.format(d/20.))
# ax.set_xticklabels([])
ax.set_ylabel(r'$\chi^2/(\nu={0:d})$'.format(self.DoF))
ax.set_xlabel('z')
ax.set_xlim(fit_data['zgrid'][0], fit_data['zgrid'][-1])
# axt = ax.twiny()
# axt.set_xlim(np.array(ax.get_xlim())*1.e4/6563.-1)
# axt.set_xlabel(r'$z_\mathrm{H\alpha}$')
# 2D spectra
gst = matplotlib.gridspec.GridSpec(4, 1)
if 'viridis_r' in plt.colormaps():
cmap = 'viridis_r'
else:
cmap = 'cubehelix_r'
ax = fig.add_subplot(gst[0, :])
ax.imshow(self.grism.data['SCI'], vmin=-0.05, vmax=0.2, cmap=cmap,
interpolation='Nearest', origin='lower', aspect='auto')
ax.set_ylabel('Observed')
ax = fig.add_subplot(gst[1, :])
mask2d = self.fit_mask.reshape(self.beam.sh_beam)
ax.imshow((self.grism.data['SCI'] - self.contam)*mask2d,
vmin=-0.05, vmax=0.2, cmap=cmap,
interpolation='Nearest', origin='lower', aspect='auto')
ax.set_ylabel('Masked')
ax = fig.add_subplot(gst[2, :])
ax.imshow(fit_data['model_full']+self.contam, vmin=-0.05, vmax=0.2,
cmap=cmap, interpolation='Nearest', origin='lower',
aspect='auto')
ax.set_ylabel('Model')
ax = fig.add_subplot(gst[3, :])
ax.imshow(self.grism.data['SCI']-fit_data['model_full']-self.contam,
vmin=-0.05, vmax=0.2, cmap=cmap, interpolation='Nearest',
origin='lower', aspect='auto')
ax.set_ylabel('Resid.')
for ax in fig.axes[-4:]:
self.beam.twod_axis_labels(wscale=1.e4,
limits=[xlim[0], xlim[1], 0.1],
mpl_axis=ax)
self.beam.twod_xlim(xlim, wscale=1.e4, mpl_axis=ax)
ax.set_yticklabels([])
ax.set_xlabel(r'$\lambda$')
for ax in fig.axes[-4:-1]:
ax.set_xticklabels([])
gsb.tight_layout(fig, pad=0.1, h_pad=0.01, rect=(0, 0, 0.5, 1))
gst.tight_layout(fig, pad=0.1, h_pad=0.01, rect=(0.5, 0.01, 1, 0.98))
return fig
def simple_line_fit(self, fwhm=48., grid=[1.12e4, 1.65e4, 1, 4],
fitter='lstsq', poly_order=3):
"""Function to fit a Gaussian emission line and a polynomial continuum
Parameters
----------
fwhm : float
FWHM of the emission line
grid : list `[l0, l1, dl, skip]`
The base wavelength array will be generated like
>>> wave = np.arange(l0, l1, dl)
and lines will be generated every `skip` wavelength grid points:
>>> line_centers = wave[::skip]
fitter : str, 'lstsq' or 'sklearn'
Least-squares fitting function for determining template
normalization coefficients.
order : int (>= 0)
Polynomial order to use for the continuum
Returns
-------
line_centers : length N `~numpy.array`
emission line center positions
coeffs : (N, M) `~numpy.ndarray` where `M = (poly_order+1+1)`
Normalization coefficients for the continuum and emission line
templates.
chi2 : `~numpy.array`
Chi-squared evaluated at each line_centers[i]
ok_data : `~numpy.ndarray`
Boolean mask of pixels used for the Chi-squared calculation.
Consists of non-masked DQ pixels, non-zero ERR pixels and pixels
where `self.model > 0.03*self.model.max()` for the flat-spectrum
model.
best_model : `~numpy.ndarray`
2D array with best-fit continuum + line model
best_model_cont : `~numpy.ndarray`
2D array with Best-fit continuum-only model.
best_line_center : float
wavelength where chi2 is minimized.
best_line_flux : float
Emission line flux where chi2 is minimized
"""
# Test fit
import sklearn.linear_model
import numpy.linalg
clf = sklearn.linear_model.LinearRegression()
# Continuum
self.compute_model()
self.model = self.modelf.reshape(self.beam.sh_beam)
# OK data where the 2D model has non-zero flux
ok_data = (~self.mask.flatten()) & (self.ivar.flatten() != 0)
ok_data &= (self.modelf > 0.03*self.modelf.max())
# Flat versions of sci/ivar arrays
scif = (self.grism.data['SCI'] - self.contam).flatten()
ivarf = self.ivar.flatten()
# Model: (a_0 x**0 + ... a_i x**i)*continuum + line
yp, xp = np.indices(self.beam.sh_beam)
xpf = (xp.flatten() - self.beam.sh_beam[1]/2.)
xpf /= (self.beam.sh_beam[1]/2)
# Polynomial continuum arrays
A_list = [xpf**order*self.modelf for order in range(poly_order+1)]
# Extra element for the computed line model
A_list.append(self.modelf*1)
A = np.vstack(A_list).T
# Normalized Gaussians on a grid
waves = np.arange(grid[0], grid[1], grid[2])
line_centers = waves[grid[3] // 2::grid[3]]
rms = fwhm/2.35
gaussian_lines = np.exp(-(line_centers[:, None]-waves)**2/2/rms**2)
gaussian_lines /= np.sqrt(2*np.pi*rms**2)
N = len(line_centers)
coeffs = np.zeros((N, A.shape[1]))
chi2 = np.zeros(N)
chi2min = 1e30
# Loop through line models and fit for template coefficients
# Compute chi-squared.
for i in range(N):
self.compute_model(spectrum_1d=[waves, gaussian_lines[i, :]])
A[:, -1] = self.model.flatten()
if fitter == 'lstsq':
out = np.linalg.lstsq(A[ok_data, :], scif[ok_data],
rcond=utils.LSTSQ_RCOND)
lstsq_coeff, residuals, rank, s = out
coeffs[i, :] += lstsq_coeff
model = np.dot(A, lstsq_coeff)
else:
status = clf.fit(A[ok_data, :], scif[ok_data])
coeffs[i, :] = clf.coef_
model = np.dot(A, clf.coef_)
chi2[i] = np.sum(((scif-model)**2*ivarf)[ok_data])
if chi2[i] < chi2min:
chi2min = chi2[i]
# print chi2
ix = np.argmin(chi2)
self.compute_model(spectrum_1d=[waves, gaussian_lines[ix, :]])
A[:, -1] = self.model.flatten()
best_coeffs = coeffs[ix, :]*1
best_model = np.dot(A, best_coeffs).reshape(self.beam.sh_beam)
# Continuum
best_coeffs_cont = best_coeffs*1
best_coeffs_cont[-1] = 0.
best_model_cont = np.dot(A, best_coeffs_cont)
best_model_cont = best_model_cont.reshape(self.beam.sh_beam)
best_line_center = line_centers[ix]
best_line_flux = coeffs[ix, -1]*self.beam.total_flux/1.e-17
return (line_centers, coeffs, chi2, ok_data,
best_model, best_model_cont,
best_line_center, best_line_flux)
def show_simple_fit_results(self, fit_outputs):
"""Make a plot based on results from `simple_line_fit`.
Parameters
----------
fit_outputs : tuple
returned data from `simple_line_fit`. I.e.,
>>> fit_outputs = BeamCutout.simple_line_fit()
>>> fig = BeamCutout.show_simple_fit_results(fit_outputs)
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure object that can be optionally written to a hardcopy file.
"""
import matplotlib.gridspec
line_centers, coeffs, chi2, ok_data, best_model, best_model_cont, best_line_center, best_line_flux = fit_outputs
# Full figure
fig = plt.figure(figsize=(10, 5))
#fig = plt.Figure(figsize=(8,4))
# 1D plots
gsb = matplotlib.gridspec.GridSpec(3, 1)
xspec, yspec, yerr = self.beam.optimal_extract(self.grism.data['SCI']
- self.contam,
ivar=self.ivar)
flat_model = self.compute_model(in_place=False)
flat_model = flat_model.reshape(self.beam.sh_beam)
xspecm, yspecm, yerrm = self.beam.optimal_extract(flat_model)
xspecl, yspecl, yerrl = self.beam.optimal_extract(best_model)
ax = fig.add_subplot(gsb[-2:, :])
ax.errorbar(xspec/1.e4, yspec, yerr, linestyle='None', marker='o',
markersize=3, color='black', alpha=0.5,
label='Data (id={0:d})'.format(self.beam.id))
ax.plot(xspecm/1.e4, yspecm, color='red', linewidth=2, alpha=0.8,
label=r'Flat $f_\lambda$ ({0})'.format(self.direct.filter))
ax.plot(xspecl/1.e4, yspecl, color='orange', linewidth=2, alpha=0.8,
label='Cont+line ({0:.4f}, {1:.2e})'.format(best_line_center/1.e4, best_line_flux*1.e-17))
ax.legend(fontsize=8, loc='lower center', scatterpoints=1)
ax.set_xlabel(r'$\lambda$')
ax.set_ylabel('flux (e-/s)')
ax = fig.add_subplot(gsb[-3, :])
ax.plot(line_centers/1.e4, chi2/ok_data.sum())
ax.set_xticklabels([])
ax.set_ylabel(r'$\chi^2/(\nu={0:d})$'.format(ok_data.sum()))
if self.grism.filter == 'G102':
xlim = [0.7, 1.25]
if self.grism.filter == 'G141':
xlim = [1., 1.8]
xt = np.arange(xlim[0], xlim[1], 0.1)
for ax in fig.axes:
ax.set_xlim(xlim[0], xlim[1])
ax.set_xticks(xt)
axt = ax.twiny()
axt.set_xlim(np.array(ax.get_xlim())*1.e4/6563.-1)
axt.set_xlabel(r'$z_\mathrm{H\alpha}$')
# 2D spectra
gst = matplotlib.gridspec.GridSpec(3, 1)
if 'viridis_r' in plt.colormaps():
cmap = 'viridis_r'
else:
cmap = 'cubehelix_r'
ax = fig.add_subplot(gst[0, :])
ax.imshow(self.grism.data['SCI'], vmin=-0.05, vmax=0.2, cmap=cmap,
interpolation='Nearest', origin='lower', aspect='auto')
ax.set_ylabel('Observed')
ax = fig.add_subplot(gst[1, :])
ax.imshow(best_model+self.contam, vmin=-0.05, vmax=0.2, cmap=cmap,
interpolation='Nearest', origin='lower', aspect='auto')
ax.set_ylabel('Model')
ax = fig.add_subplot(gst[2, :])
ax.imshow(self.grism.data['SCI']-best_model-self.contam, vmin=-0.05,
vmax=0.2, cmap=cmap, interpolation='Nearest',
origin='lower', aspect='auto')
ax.set_ylabel('Resid.')
for ax in fig.axes[-3:]:
self.beam.twod_axis_labels(wscale=1.e4,
limits=[xlim[0], xlim[1], 0.1],
mpl_axis=ax)
self.beam.twod_xlim(xlim, wscale=1.e4, mpl_axis=ax)
ax.set_yticklabels([])
ax.set_xlabel(r'$\lambda$')
for ax in fig.axes[-3:-1]:
ax.set_xticklabels([])
gsb.tight_layout(fig, pad=0.1, h_pad=0.01, rect=(0, 0, 0.5, 1))
gst.tight_layout(fig, pad=0.1, h_pad=0.01, rect=(0.5, 0.1, 1, 0.9))
return fig
| gbrammer/grizli | grizli/model.py | Python | mit | 195,537 | [
"Galaxy",
"Gaussian",
"VisIt"
] | a48c4132840b1ff2b3ee6999e2cb8fe98605b646fc3f0492448806c7602edb22 |
import numpy as np
from ase import Atoms, Atom
from ase.parallel import barrier, rank, size
from gpaw.cluster import Cluster
from gpaw.test import equal
from ase.structure import molecule
from math import pi, sqrt
R = 2.0
CO = Atoms([Atom('C', (1, 0, 0)), Atom('O', (1, 0, R))])
CO.rotate('y', pi/2)
equal(CO.positions[1, 0], R, 1e-10)
# translate
CO.translate(-CO.get_center_of_mass())
p = CO.positions.copy()
for i in range(2):
equal(p[i, 1], 0, 1e-10)
equal(p[i, 2], 0, 1e-10)
# rotate the nuclear axis to the direction (1,1,1)
CO.rotate(p[1] - p[0], (1, 1, 1))
q = CO.positions.copy()
for c in range(3):
equal(q[0, c], p[0, 0] / sqrt(3), 1e-10)
equal(q[1, c], p[1, 0] / sqrt(3), 1e-10)
# minimal box
b=4.0
CO = Cluster([Atom('C', (1, 0, 0)), Atom('O', (1, 0, R))])
CO.minimal_box(b)
cc = CO.get_cell()
for c in range(3):
width = 2*b
if c==2:
width += R
equal(cc[c, c], width, 1e-10)
# minimal box, ensure multiple of 4
h = .13
b = [2, 3, 4]
CO.minimal_box(b, h=h)
cc = CO.get_cell()
for c in range(3):
# print "cc[c,c], cc[c,c] / h % 4 =", cc[c, c], cc[c, c] / h % 4
for a in CO:
print a.symbol, b[c], a.position[c], cc[c, c] - a.position[c]
assert(a.position[c] > b[c])
equal(cc[c, c] / h % 4, 0.0, 1e-10)
# .............................................
# connected atoms
assert(len(CO.find_connected(0, 1.1 * R)) == 2)
assert(len(CO.find_connected(0, 0.9 * R)) == 1)
H2O = Cluster(molecule('H2O'))
assert (len(H2O.find_connected(0)) == 3)
assert (len(H2O.find_connected(0, scale=0.9)) == 1)
# .............................................
# I/O
fxyz='CO.xyz'
fpdb='CO.pdb'
cell = [2.,3.,R+2.]
CO.set_cell(cell, scale_atoms=True)
barrier()
CO.write(fxyz)
barrier()
CO_b = Cluster(filename=fxyz)
assert(len(CO) == len(CO_b))
#for a, b in zip(cell, CO_b.get_cell().diagonal()):
# assert(a == b)
offdiagonal = CO_b.get_cell().sum() - CO_b.get_cell().diagonal().sum()
assert(offdiagonal == 0.0)
barrier()
CO.write(fxyz, repeat=[1,1,1])
barrier()
CO_b = Cluster(filename=fxyz)
assert(8*len(CO) == len(CO_b))
barrier()
CO.write(fpdb)
# read xyz files with additional info
read_with_additional = True
if read_with_additional:
if rank == 0:
f = open(fxyz, 'w')
print >> f, """2
C 0 0 0. 1 2 3
O 0 0 1. 6. 7. 8."""
f.close()
barrier()
CO = Cluster(filename=fxyz)
| robwarm/gpaw-symm | gpaw/test/cluster.py | Python | gpl-3.0 | 2,383 | [
"ASE",
"GPAW"
] | 6b168ea51f043e9e71917983e5ec3bd82707e7d073ce346e0afbce8031b6a4ad |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from zoo.orca.automl.metrics import Evaluator
from zoo.orca import init_orca_context, stop_orca_context
from zoo.chronos.forecaster.seq2seq_forecaster import Seq2SeqForecaster
from zoo.chronos.data.repo_dataset import get_public_dataset
def get_tsdata():
name = 'network_traffic'
path = '~/.chronos/dataset'
tsdata_train, _,\
tsdata_test = get_public_dataset(name, path,
redownload=False,
with_split=True,
test_ratio=0.1)
minmax = MinMaxScaler()
for tsdata in [tsdata_train, tsdata_test]:
tsdata.gen_dt_feature(one_hot_features=['HOUR', 'WEEK'])\
.impute("last")\
.scale(minmax, fit=tsdata is tsdata_train)\
.roll(lookback=100, horizon=10)
return tsdata_train, tsdata_test
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cores', type=int, default=4,
help="The number of cpu cores you want to use on each node."
"You can change it depending on your own cluster setting.")
parser.add_argument('--memory', type=str, default="32g",
help="The memory you want to use on each node."
"You can change it depending on your own cluster setting.")
parser.add_argument('--cluster_mode', type=str, default='local',
help="The mode for the Spark cluster.")
parser.add_argument('--num_workers', type=int, default=1,
help="The number of nodes to be used in the cluster"
"You can change it depending on your own cluster setting.")
parser.add_argument('--epochs', type=int, default=2,
help="Max number of epochs to train in each trial.")
parser.add_argument('--workers_per_node', type=int, default=1,
help="the number of worker you want to use."
"The value defaults to 1. The param is only effective"
"when distributed is set to True.")
args = parser.parse_args()
num_nodes = 1 if args.cluster_mode == 'local' else args.num_workers
init_orca_context(cluster_mode=args.cluster_mode, cores=args.cores,
memory=args.memory, num_nodes=num_nodes)
tsdata_train, tsdata_test = get_tsdata()
x_train, y_train = tsdata_train.to_numpy()
x_test, y_test = tsdata_test.to_numpy()
forecaster = Seq2SeqForecaster(past_seq_len=100,
future_seq_len=10,
input_feature_num=x_train.shape[-1],
output_feature_num=2,
metrics=['mse'],
distributed=True,
workers_per_node=args.workers_per_node,
seed=0)
forecaster.fit((x_train, y_train), epochs=args.epochs,
batch_size=512//(1 if not forecaster.distributed else args.workers_per_node))
yhat = forecaster.predict(x_test)
unscale_yhat = tsdata_test.unscale_numpy(yhat)
unscale_y_test = tsdata_test.unscale_numpy(y_test)
rmse, smape = [Evaluator.evaluate(m, y_true=unscale_y_test,
y_pred=unscale_yhat,
multioutput='raw_values') for m in ['rmse', 'smape']]
print(f'rmse is: {np.mean(rmse)}')
print(f'smape is: {np.mean(smape):.4f}')
stop_orca_context()
| intel-analytics/analytics-zoo | pyzoo/zoo/chronos/examples/distributed/distributed_training_network_traffic.py | Python | apache-2.0 | 4,292 | [
"ORCA"
] | 65e84ae939ffb21338fc068755cca460dcbcad1d69cd193202523d65130b823f |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
""" Bibauthorid Web Interface Logic and URL handler. """
# pylint: disable=W0105
# pylint: disable=C0301
# pylint: disable=W0613
from cgi import escape
from pprint import pformat
from operator import itemgetter
import re
try:
from invenio.jsonutils import json, json_unicode_to_utf8, CFG_JSON_AVAILABLE
except ImportError:
CFG_JSON_AVAILABLE = False
json = None
from invenio.bibauthorid_webapi import add_cname_to_hepname_record
from invenio.config import CFG_SITE_URL, CFG_BASE_URL
from invenio.bibauthorid_config import AID_ENABLED, PERSON_SEARCH_RESULTS_SHOW_PAPERS_PERSON_LIMIT, \
BIBAUTHORID_UI_SKIP_ARXIV_STUB_PAGE, VALID_EXPORT_FILTERS, PERSONS_PER_PAGE, \
MAX_NUM_SHOW_PAPERS
from invenio.config import CFG_SITE_LANG, CFG_SITE_URL, CFG_SITE_NAME, CFG_INSPIRE_SITE, CFG_SITE_SECURE_URL
from invenio.bibauthorid_name_utils import most_relevant_name
from invenio.webpage import page, pageheaderonly, pagefooteronly
from invenio.messages import gettext_set_language # , wash_language
from invenio.template import load
from invenio.webinterface_handler import wash_urlargd, WebInterfaceDirectory
from invenio.session import get_session
from invenio.urlutils import redirect_to_url, get_canonical_and_alternates_urls
from invenio.webuser import (getUid,
page_not_authorized,
collect_user_info,
set_user_preferences,
get_user_preferences,
email_valid_p,
emailUnique,
get_email_from_username,
get_uid_from_email,
isGuestUser)
from invenio.access_control_admin import acc_get_user_roles
from invenio.search_engine import perform_request_search
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibauthorid_config import CREATE_NEW_PERSON
import invenio.webinterface_handler_config as apache
import invenio.webauthorprofile_interface as webauthorapi
import invenio.bibauthorid_webapi as webapi
from invenio.bibauthorid_general_utils import get_title_of_doi, get_title_of_arxiv_pubid, is_valid_orcid
from invenio.bibauthorid_backinterface import update_external_ids_of_authors, get_orcid_id_of_author, \
get_validated_request_tickets_for_author, get_title_of_paper, get_claimed_papers_of_author
from invenio.bibauthorid_dbinterface import defaultdict, remove_arxiv_papers_of_author
from invenio.webauthorprofile_orcidutils import get_dois_from_orcid
from invenio.bibauthorid_webauthorprofileinterface import is_valid_canonical_id, get_person_id_from_canonical_id, \
get_person_redirect_link, author_has_papers
from invenio.bibauthorid_templates import WebProfileMenu, WebProfilePage
# Imports related to hepnames update form
from invenio.bibedit_utils import get_bibrecord
from invenio.bibrecord import record_get_field_value, record_get_field_values, \
record_get_field_instances, field_get_subfield_values
TEMPLATE = load('bibauthorid')
class WebInterfaceBibAuthorIDClaimPages(WebInterfaceDirectory):
'''
Handles /author/claim pages and AJAX requests.
Supplies the methods:
/author/claim/<string>
/author/claim/action
/author/claim/claimstub
/author/claim/export
/author/claim/generate_autoclaim_data
/author/claim/merge_profiles_ajax
/author/claim/search_box_ajax
/author/claim/tickets_admin
/author/claim/search
'''
_exports = ['',
'action',
'claimstub',
'export',
'generate_autoclaim_data',
'merge_profiles_ajax',
'search_box_ajax',
'tickets_admin'
]
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDClaimPages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
return
# check if it's a canonical id: e.g. "J.R.Ellis.1"
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Serve the main person page.
Will use the object's person id to get a person's information.
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: a full page formatted in HTML
@rtype: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'open_claim': (str, None),
'ticketid': (int, -1),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
req.argd = argd # needed for perform_req_search
if self.person_id < 0:
return redirect_to_url(req, '%s/author/search' % (CFG_SITE_URL))
no_access = self._page_access_permission_wall(req, [self.person_id])
if no_access:
return no_access
pinfo['claim_in_process'] = True
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = pinfo['claim_in_process']
session.dirty = True
if self.person_id != -1:
pinfo['claimpaper_admin_last_viewed_pid'] = self.person_id
rt_ticket_id = argd['ticketid']
if rt_ticket_id != -1:
pinfo["admin_requested_ticket_id"] = rt_ticket_id
session.dirty = True
## Create menu and page using templates
cname = webapi.get_canonical_id_from_person_id(self.person_id)
menu = WebProfileMenu(str(cname), "claim", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("claim", webapi.get_longest_name_from_pid(self.person_id))
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s", guestPrompt: true});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
# content += self._generate_person_info_box(ulevel, ln) #### Name variants
# metaheaderadd = self._scripts() + '\n <meta name="robots" content="nofollow" />'
# body = self._generate_optional_menu(ulevel, req, form)
content = self._generate_tabs(ulevel, req)
content += self._generate_footer(ulevel)
content = content.decode('utf-8', 'strict')
webapi.history_log_visit(req, 'claim', pid=self.person_id)
return page(title=self._generate_title(ulevel),
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=profile_page.get_wrapped_body(content).encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _page_access_permission_wall(self, req, req_pid=None, req_level=None):
'''
Display an error page if user not authorized to use the interface.
@param req: Apache Request Object for session management
@type req: Apache Request Object
@param req_pid: Requested person id
@type req_pid: int
@param req_level: Request level required for the page
@type req_level: string
'''
session = get_session(req)
uid = getUid(req)
pinfo = session["personinfo"]
uinfo = collect_user_info(req)
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
is_authorized = True
pids_to_check = []
if not AID_ENABLED:
return page_not_authorized(req, text=_("Fatal: Author ID capabilities are disabled on this system."))
if req_level and 'ulevel' in pinfo and pinfo["ulevel"] != req_level:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
if req_pid and not isinstance(req_pid, list):
pids_to_check = [req_pid]
elif req_pid and isinstance(req_pid, list):
pids_to_check = req_pid
if (not (uinfo['precached_usepaperclaim']
or uinfo['precached_usepaperattribution'])
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
if is_authorized and not webapi.user_can_view_CMP(uid):
is_authorized = False
if is_authorized and 'ticket' in pinfo:
for tic in pinfo["ticket"]:
if 'pid' in tic:
pids_to_check.append(tic['pid'])
if pids_to_check and is_authorized:
user_pid = webapi.get_pid_from_uid(uid)
if not uinfo['precached_usepaperattribution']:
if (not user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
is_authorized = False
elif (user_pid in pids_to_check
and 'ulevel' in pinfo
and not pinfo["ulevel"] == "admin"):
for tic in list(pinfo["ticket"]):
if not tic["pid"] == user_pid:
pinfo['ticket'].remove(tic)
if not is_authorized:
return page_not_authorized(req, text=_("Fatal: You are not allowed to access this functionality."))
else:
return ""
def _generate_title(self, ulevel):
'''
Generates the title for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: title
@rtype: str
'''
def generate_title_guest():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_user():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (user interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
def generate_title_admin():
title = 'Assign papers'
if self.person_id:
title = 'Assign papers (administrator interface) for: ' + str(webapi.get_person_redirect_link(self.person_id))
return title
generate_title = {'guest': generate_title_guest,
'user': generate_title_user,
'admin': generate_title_admin}
return generate_title[ulevel]()
def _generate_optional_menu(self, ulevel, req, form):
'''
Generates the menu for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@param form: POST/GET variables of the request
@type form: dict
@return: menu
@rtype: str
'''
def generate_optional_menu_guest(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_user(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
def generate_optional_menu_admin(req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
menu = TEMPLATE.tmpl_person_menu_admin(self.person_id, argd['ln'])
if "verbose" in argd and argd["verbose"] > 0:
session = get_session(req)
pinfo = session['personinfo']
menu += "\n<pre>" + pformat(pinfo) + "</pre>\n"
return menu
generate_optional_menu = {'guest': generate_optional_menu_guest,
'user': generate_optional_menu_user,
'admin': generate_optional_menu_admin}
return "<div class=\"clearfix\">" + generate_optional_menu[ulevel](req, form) + "</div>"
def _generate_ticket_box(self, ulevel, req):
'''
Generates the semi-permanent info box for the specified user permission
level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: info box
@rtype: str
'''
def generate_ticket_box_guest(req):
session = get_session(req)
pinfo = session['personinfo']
ticket = pinfo['ticket']
results = list()
pendingt = list()
for t in ticket:
if 'execution_result' in t:
for res in t['execution_result']:
results.append(res)
else:
pendingt.append(t)
box = ""
if pendingt:
box += TEMPLATE.tmpl_ticket_box('in_process', 'transaction', len(pendingt))
if results:
failed = [messages for status, messages in results if not status]
if failed:
box += TEMPLATE.tmpl_transaction_box('failure', failed)
successfull = [messages for status, messages in results if status]
if successfull:
box += TEMPLATE.tmpl_transaction_box('success', successfull)
return box
def generate_ticket_box_user(req):
return generate_ticket_box_guest(req)
def generate_ticket_box_admin(req):
return generate_ticket_box_guest(req)
generate_ticket_box = {'guest': generate_ticket_box_guest,
'user': generate_ticket_box_user,
'admin': generate_ticket_box_admin}
return generate_ticket_box[ulevel](req)
def _generate_person_info_box(self, ulevel, ln):
'''
Generates the name info box for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param ln: page display language
@type ln: str
@return: name info box
@rtype: str
'''
def generate_person_info_box_guest(ln):
names = webapi.get_person_names_from_id(self.person_id)
box = TEMPLATE.tmpl_admin_person_info_box(ln, person_id=self.person_id,
names=names)
return box
def generate_person_info_box_user(ln):
return generate_person_info_box_guest(ln)
def generate_person_info_box_admin(ln):
return generate_person_info_box_guest(ln)
generate_person_info_box = {'guest': generate_person_info_box_guest,
'user': generate_person_info_box_user,
'admin': generate_person_info_box_admin}
return generate_person_info_box[ulevel](ln)
def _generate_tabs(self, ulevel, req):
'''
Generates the tabs content for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@param req: apache request object
@type req: apache request object
@return: tabs content
@rtype: str
'''
from invenio.bibauthorid_templates import verbiage_dict as tmpl_verbiage_dict
from invenio.bibauthorid_templates import buttons_verbiage_dict as tmpl_buttons_verbiage_dict
def generate_tabs_guest(req):
links = list() # ['delete', 'commit','del_entry','commit_entry']
tabs = ['records', 'repealed', 'review']
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=list(),
verbiage_dict=tmpl_verbiage_dict['guest'],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['guest'],
show_reset_button=False)
def generate_tabs_user(req):
links = ['delete', 'del_entry']
tabs = ['records', 'repealed', 'review', 'tickets']
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
user_is_owner = 'not_owner'
if pinfo["claimpaper_admin_last_viewed_pid"] == webapi.get_pid_from_uid(uid):
user_is_owner = 'owner'
open_tickets = webapi.get_person_request_ticket(self.person_id)
tickets = list()
for t in open_tickets:
owns = False
for row in t[0]:
if row[0] == 'uid-ip' and row[1].split('||')[0] == str(uid):
owns = True
if owns:
tickets.append(t)
return generate_tabs_admin(req, show_tabs=tabs, ticket_links=links,
open_tickets=tickets,
verbiage_dict=tmpl_verbiage_dict['user'][user_is_owner],
buttons_verbiage_dict=tmpl_buttons_verbiage_dict['user'][user_is_owner])
def generate_tabs_admin(req, show_tabs=['records', 'repealed', 'review', 'comments', 'tickets', 'data'],
ticket_links=['delete', 'commit', 'del_entry', 'commit_entry'], open_tickets=None,
verbiage_dict=None, buttons_verbiage_dict=None, show_reset_button=True):
session = get_session(req)
personinfo = dict()
try:
personinfo = session["personinfo"]
except KeyError:
return ""
if 'ln' in personinfo:
ln = personinfo["ln"]
else:
ln = CFG_SITE_LANG
all_papers = webapi.get_papers_by_person_id(self.person_id, ext_out=True)
records = [{'recid': paper[0],
'bibref': paper[1],
'flag': paper[2],
'authorname': paper[3],
'authoraffiliation': paper[4],
'paperdate': paper[5],
'rt_status': paper[6],
'paperexperiment': paper[7]} for paper in all_papers]
rejected_papers = [row for row in records if row['flag'] < -1]
rest_of_papers = [row for row in records if row['flag'] >= -1]
review_needed = webapi.get_review_needing_records(self.person_id)
if len(review_needed) < 1:
if 'review' in show_tabs:
show_tabs.remove('review')
if open_tickets == None:
open_tickets = webapi.get_person_request_ticket(self.person_id)
else:
if len(open_tickets) < 1 and 'tickets' in show_tabs:
show_tabs.remove('tickets')
rt_tickets = None
if "admin_requested_ticket_id" in personinfo:
rt_tickets = personinfo["admin_requested_ticket_id"]
if verbiage_dict is None:
verbiage_dict = translate_dict_values(tmpl_verbiage_dict['admin'], ln)
if buttons_verbiage_dict is None:
buttons_verbiage_dict = translate_dict_values(tmpl_buttons_verbiage_dict['admin'], ln)
# send data to the template function
tabs = TEMPLATE.tmpl_admin_tabs(ln, person_id=self.person_id,
rejected_papers=rejected_papers,
rest_of_papers=rest_of_papers,
review_needed=review_needed,
rt_tickets=rt_tickets,
open_rt_tickets=open_tickets,
show_tabs=show_tabs,
ticket_links=ticket_links,
verbiage_dict=verbiage_dict,
buttons_verbiage_dict=buttons_verbiage_dict,
show_reset_button=show_reset_button)
return tabs
def translate_dict_values(dictionary, ln):
def translate_str_values(dictionary, f=lambda x: x):
translated_dict = dict()
for key, value in dictionary.iteritems():
if isinstance(value, str):
translated_dict[key] = f(value)
elif isinstance(value, dict):
translated_dict[key] = translate_str_values(value, f)
else:
raise TypeError("Value should be either string or dictionary.")
return translated_dict
return translate_str_values(dictionary, f=gettext_set_language(ln))
generate_tabs = {'guest': generate_tabs_guest,
'user': generate_tabs_user,
'admin': generate_tabs_admin}
return generate_tabs[ulevel](req)
def _generate_footer(self, ulevel):
'''
Generates the footer for the specified user permission level.
@param ulevel: user permission level
@type ulevel: str
@return: footer
@rtype: str
'''
def generate_footer_guest():
return TEMPLATE.tmpl_invenio_search_box()
def generate_footer_user():
return generate_footer_guest()
def generate_footer_admin():
return generate_footer_guest()
generate_footer = {'guest': generate_footer_guest,
'user': generate_footer_user,
'admin': generate_footer_admin}
return generate_footer[ulevel]()
def _ticket_dispatch_end(self, req):
'''
The ticket dispatch is finished, redirect to the original page of
origin or to the last_viewed_pid or return to the papers autoassigned box to populate its data
'''
session = get_session(req)
pinfo = session["personinfo"]
webapi.session_bareinit(req)
if 'claim_in_process' in pinfo:
pinfo['claim_in_process'] = False
if "merge_ticket" in pinfo and pinfo['merge_ticket']:
pinfo['merge_ticket'] = []
user_info = collect_user_info(req)
user_info['precached_viewclaimlink'] = True
session.dirty = True
if "referer" in pinfo and pinfo["referer"]:
referer = pinfo["referer"]
del(pinfo["referer"])
session.dirty = True
return redirect_to_url(req, referer)
# if we are coming fromt he autoclaim box we should not redirect and just return to the caller function
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == False and pinfo['autoclaim']['begin_autoclaim'] == True:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] = False
session.dirty = True
else:
redirect_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], limit_to_page=['manage_profile', 'claim'])
if not redirect_page:
redirect_page = webapi.get_fallback_redirect_link(req)
if 'autoclaim' in pinfo and pinfo['autoclaim']['review_failed'] == True and pinfo['autoclaim']['checkout'] == True:
redirect_page = '%s/author/claim/action?checkout=True' % (CFG_SITE_URL,)
pinfo['autoclaim']['checkout'] = False
session.dirty = True
elif not 'manage_profile' in redirect_page:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
redirect_page = '%s/author/claim/%s?open_claim=True' % (CFG_SITE_URL, webapi.get_person_redirect_link(pinfo["claimpaper_admin_last_viewed_pid"]))
else:
pinfo['autoclaim']['review_failed'] = False
pinfo['autoclaim']['begin_autoclaim'] == False
pinfo['autoclaim']['checkout'] = True
session.dirty = True
return redirect_to_url(req, redirect_page)
# redirect_link = diary('get_redirect_link', caller='_ticket_dispatch_end', parameters=[('open_claim','True')])
# return redirect_to_url(req, redirect_link)
# need review if should be deleted
def __user_is_authorized(self, req, action):
'''
Determines if a given user is authorized to perform a specified action
@param req: Apache Request Object
@type req: Apache Request Object
@param action: the action the user wants to perform
@type action: string
@return: True if user is allowed to perform the action, False if not
@rtype: boolean
'''
if not req:
return False
if not action:
return False
else:
action = escape(action)
uid = getUid(req)
if not isinstance(uid, int):
return False
if uid == 0:
return False
allowance = [i[1] for i in acc_find_user_role_actions({'uid': uid})
if i[1] == action]
if allowance:
return True
return False
@staticmethod
def _scripts(kill_browser_cache=False):
'''
Returns html code to be included in the meta header of the html page.
The actual code is stored in the template.
@return: html formatted Javascript and CSS inclusions for the <head>
@rtype: string
'''
return TEMPLATE.tmpl_meta_includes(kill_browser_cache)
def _check_user_fields(self, req, form):
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'user_first_name': (str, None),
'user_last_name': (str, None),
'user_email': (str, None),
'user_comments': (str, None)})
session = get_session(req)
pinfo = session["personinfo"]
ulevel = pinfo["ulevel"]
skip_checkout_faulty_fields = False
if ulevel in ['user', 'admin']:
skip_checkout_faulty_fields = True
if not ("user_first_name_sys" in pinfo and pinfo["user_first_name_sys"]):
if "user_first_name" in argd and argd['user_first_name']:
if not argd["user_first_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_first_name")
else:
pinfo["user_first_name"] = escape(argd["user_first_name"])
if not ("user_last_name_sys" in pinfo and pinfo["user_last_name_sys"]):
if "user_last_name" in argd and argd['user_last_name']:
if not argd["user_last_name"] and not skip_checkout_faulty_fields:
pinfo["checkout_faulty_fields"].append("user_last_name")
else:
pinfo["user_last_name"] = escape(argd["user_last_name"])
if not ("user_email_sys" in pinfo and pinfo["user_email_sys"]):
if "user_email" in argd and argd['user_email']:
if not email_valid_p(argd["user_email"]):
pinfo["checkout_faulty_fields"].append("user_email")
else:
pinfo["user_email"] = escape(argd["user_email"])
if (ulevel == "guest"
and emailUnique(argd["user_email"]) > 0):
pinfo["checkout_faulty_fields"].append("user_email_taken")
else:
pinfo["checkout_faulty_fields"].append("user_email")
if "user_comments" in argd:
if argd["user_comments"]:
pinfo["user_ticket_comments"] = escape(argd["user_comments"])
else:
pinfo["user_ticket_comments"] = ""
session.dirty = True
def action(self, req, form):
'''
Initial step in processing of requests: ticket generation/update.
Also acts as action dispatcher for interface mass action requests.
Valid mass actions are:
- add_external_id: add an external identifier to an author
- add_missing_external_ids: add missing external identifiers of an author
- bibref_check_submit:
- cancel: clean the session (erase tickets and so on)
- cancel_rt_ticket:
- cancel_search_ticket:
- cancel_stage:
- checkout:
- checkout_continue_claiming:
- checkout_remove_transaction:
- checkout_submit:
- claim: claim papers for an author
- commit_rt_ticket:
- confirm: confirm assignments to an author
- delete_external_ids: delete external identifiers of an author
- repeal: repeal assignments from an author
- reset: reset assignments of an author
- set_canonical_name: set/swap the canonical name of an author
- to_other_person: assign a document from an author to another author
@param req: apache request object
@type req: apache request object
@param form: parameters sent via GET or POST request
@type form: dict
@return: a full page formatted in HTML
@return: str
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session["personinfo"]
argd = wash_urlargd(form,
{'autoclaim_show_review':(str, None),
'canonical_name': (str, None),
'existing_ext_ids': (list, None),
'ext_id': (str, None),
'uid': (int, None),
'ext_system': (str, None),
'ln': (str, CFG_SITE_LANG),
'pid': (int, -1),
'primary_profile':(str, None),
'search_param': (str, None),
'rt_action': (str, None),
'rt_id': (int, None),
'selection': (list, None),
# permitted actions
'add_external_id': (str, None),
'set_uid': (str, None),
'add_missing_external_ids': (str, None),
'associate_profile': (str, None),
'bibref_check_submit': (str, None),
'cancel': (str, None),
'cancel_merging': (str, None),
'cancel_rt_ticket': (str, None),
'cancel_search_ticket': (str, None),
'cancel_stage': (str, None),
'checkout': (str, None),
'checkout_continue_claiming': (str, None),
'checkout_remove_transaction': (str, None),
'checkout_submit': (str, None),
'assign': (str, None),
'commit_rt_ticket': (str, None),
'confirm': (str, None),
'delete_external_ids': (str, None),
'merge': (str, None),
'reject': (str, None),
'repeal': (str, None),
'reset': (str, None),
'send_message': (str, None),
'set_canonical_name': (str, None),
'to_other_person': (str, None)})
ulevel = pinfo["ulevel"]
ticket = pinfo["ticket"]
uid = getUid(req)
ln = argd['ln']
action = None
permitted_actions = ['add_external_id',
'set_uid',
'add_missing_external_ids',
'associate_profile',
'bibref_check_submit',
'cancel',
'cancel_merging',
'cancel_rt_ticket',
'cancel_search_ticket',
'cancel_stage',
'checkout',
'checkout_continue_claiming',
'checkout_remove_transaction',
'checkout_submit',
'assign',
'commit_rt_ticket',
'confirm',
'delete_external_ids',
'merge',
'reject',
'repeal',
'reset',
'send_message',
'set_canonical_name',
'to_other_person']
for act in permitted_actions:
# one action (the most) is enabled in the form
if argd[act] is not None:
action = act
no_access = self._page_access_permission_wall(req, None)
if no_access and action not in ["assign"]:
return no_access
# incomplete papers (incomplete paper info or other problems) trigger action function without user's interference
# in order to fix those problems and claim papers or remove them from the ticket
if (action is None
and "bibref_check_required" in pinfo
and pinfo["bibref_check_required"]):
if "bibref_check_reviewed_bibrefs" in pinfo:
del(pinfo["bibref_check_reviewed_bibrefs"])
session.dirty = True
def add_external_id():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot add external id to unknown person")
if argd['ext_system'] is not None:
ext_sys = argd['ext_system']
else:
return self._error_page(req, ln,
"Fatal: cannot add an external id without specifying the system")
if argd['ext_id'] is not None:
ext_id = argd['ext_id']
else:
return self._error_page(req, ln,
"Fatal: cannot add a custom external id without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.add_person_external_id(pid, ext_sys, ext_id, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def set_uid():
'''
associates the user with pid to the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: current user is unknown")
if argd['uid'] is not None:
dest_uid = int(argd['uid'])
else:
return self._error_page(req, ln,
"Fatal: user id is not valid")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.set_person_uid(pid, dest_uid, userinfo)
# remove arxiv pubs of current pid
remove_arxiv_papers_of_author(pid)
dest_uid_pid = webapi.get_pid_from_uid(dest_uid)
if dest_uid_pid > -1:
# move the arxiv pubs of the dest_uid to the current pid
dest_uid_arxiv_papers = webapi.get_arxiv_papers_of_author(dest_uid_pid)
webapi.add_arxiv_papers_to_author(dest_uid_arxiv_papers, pid)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def add_missing_external_ids():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot recompute external ids for an unknown person")
update_external_ids_of_authors([pid], overwrite=False)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def associate_profile():
'''
associates the user with user id to the person profile with pid
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot associate profile without a person id.")
uid = getUid(req)
pid, profile_claimed = webapi.claim_profile(uid, pid)
redirect_pid = pid
if profile_claimed:
pinfo['pid'] = pid
pinfo['should_check_to_autoclaim'] = True
pinfo["login_info_message"] = "confirm_success"
session.dirty = True
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, redirect_pid))
# if someone have already claimed this profile it redirects to choose_profile with an error message
else:
param=''
if 'search_param' in argd and argd['search_param']:
param = '&search_param=' + argd['search_param']
redirect_to_url(req, '%s/author/choose_profile?failed=%s%s' % (CFG_SITE_URL, True, param))
def bibref_check_submit():
pinfo["bibref_check_reviewed_bibrefs"] = list()
add_rev = pinfo["bibref_check_reviewed_bibrefs"].append
if ("bibrefs_auto_assigned" in pinfo
or "bibrefs_to_confirm" in pinfo):
person_reviews = list()
if ("bibrefs_auto_assigned" in pinfo
and pinfo["bibrefs_auto_assigned"]):
person_reviews.append(pinfo["bibrefs_auto_assigned"])
if ("bibrefs_to_confirm" in pinfo
and pinfo["bibrefs_to_confirm"]):
person_reviews.append(pinfo["bibrefs_to_confirm"])
for ref_review in person_reviews:
for person_id in ref_review:
for bibrec in ref_review[person_id]["bibrecs"]:
rec_grp = "bibrecgroup%s" % bibrec
elements = list()
if rec_grp in form:
if isinstance(form[rec_grp], str):
elements.append(form[rec_grp])
elif isinstance(form[rec_grp], list):
elements += form[rec_grp]
else:
continue
for element in elements:
test = element.split("||")
if test and len(test) > 1 and test[1]:
tref = test[1] + "," + str(bibrec)
tpid = webapi.wash_integer_id(test[0])
if (webapi.is_valid_bibref(tref)
and tpid > -1):
add_rev(element + "," + str(bibrec))
session.dirty = True
def cancel():
self.__session_cleanup(req)
return self._ticket_dispatch_end(req)
def cancel_merging():
'''
empties the session out of merge content and redirects to the manage profile page
that the user was viewing before the merge
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: Couldn't redirect to the previous page")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if pinfo['merge_profiles']:
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def cancel_rt_ticket():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot cancel unknown ticket")
if argd['rt_id'] is not None and argd['rt_action'] is not None:
rt_id = int(argd['rt_id'])
rt_action = argd['rt_action']
for bibrefrec in bibrefrecs:
webapi.delete_transaction_from_request_ticket(pid, rt_id, rt_action, bibrefrec)
else:
rt_id = int(bibrefrecs[0])
webapi.delete_request_ticket(pid, rt_id)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def cancel_search_ticket(without_return=False):
if 'search_ticket' in pinfo:
del(pinfo['search_ticket'])
session.dirty = True
if "claimpaper_admin_last_viewed_pid" in pinfo:
pid = pinfo["claimpaper_admin_last_viewed_pid"]
if not without_return:
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
if not without_return:
return self.search(req, form)
def cancel_stage():
if 'bibref_check_required' in pinfo:
del(pinfo['bibref_check_required'])
if 'bibrefs_auto_assigned' in pinfo:
del(pinfo['bibrefs_auto_assigned'])
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
for tt in [row for row in ticket if 'incomplete' in row]:
ticket.remove(tt)
session.dirty = True
return self._ticket_dispatch_end(req)
def checkout():
pass
# return self._ticket_final_review(req)
def checkout_continue_claiming():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
return self._ticket_dispatch_end(req)
def checkout_remove_transaction():
bibref = argd['checkout_remove_transaction']
if webapi.is_valid_bibref(bibref):
for rmt in [row for row in ticket if row["bibref"] == bibref]:
ticket.remove(rmt)
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def checkout_submit():
pinfo["checkout_faulty_fields"] = list()
self._check_user_fields(req, form)
if not ticket:
pinfo["checkout_faulty_fields"].append("tickets")
pinfo["checkout_confirmed"] = True
if pinfo["checkout_faulty_fields"]:
pinfo["checkout_confirmed"] = False
session.dirty = True
# return self._ticket_final_review(req)
def claim():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot claim papers to an unknown person")
if action == 'assign':
claimed_recs = [paper[2] for paper in get_claimed_papers_of_author(pid)]
for bibrefrec in list(bibrefrecs):
_, rec = webapi.split_bibrefrec(bibrefrec)
if rec in claimed_recs:
bibrefrecs.remove(bibrefrec)
for bibrefrec in bibrefrecs:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
continue
ticket = pinfo['ticket']
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def claim_to_other_person():
if argd['selection'] is not None:
bibrefrecs = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot create ticket without any bibrefrec")
return self._ticket_open_assign_to_other_person(req, bibrefrecs, form)
def commit_rt_ticket():
if argd['selection'] is not None:
tid = argd['selection'][0]
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot cancel unknown ticket")
return self._commit_rt_ticket(req, tid, pid)
def confirm_repeal_reset():
if argd['pid'] > -1 or int(argd['pid']) == CREATE_NEW_PERSON:
pid = argd['pid']
cancel_search_ticket(without_return = True)
else:
return self._ticket_open_assign_to_other_person(req, argd['selection'], form)
#return self._error_page(req, ln, "Fatal: cannot create ticket without a person id! (crr %s)" %repr(argd))
bibrefrecs = argd['selection']
if argd['confirm']:
action = 'assign'
elif argd['repeal']:
action = 'reject'
elif argd['reset']:
action = 'reset'
else:
return self._error_page(req, ln, "Fatal: not existent action!")
for bibrefrec in bibrefrecs:
form['jsondata'] = json.dumps({'pid': str(pid),
'action': action,
'bibrefrec': bibrefrec,
'on': 'user'})
t = WebInterfaceAuthorTicketHandling()
t.add_operation(req, form)
return redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def delete_external_ids():
'''
deletes association between the user with pid and the external id ext_id
'''
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot delete external ids from an unknown person")
if argd['existing_ext_ids'] is not None:
existing_ext_ids = argd['existing_ext_ids']
else:
return self._error_page(req, ln,
"Fatal: you must select at least one external id in order to delete it")
userinfo = "%s||%s" % (uid, req.remote_ip)
webapi.delete_person_external_ids(pid, existing_ext_ids, userinfo)
return redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid)))
def none_action():
return self._error_page(req, ln,
"Fatal: cannot create ticket if no action selected.")
def merge():
'''
performs a merge if allowed on the profiles that the user chose
'''
if argd['primary_profile']:
primary_cname = argd['primary_profile']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without a primary profile!")
if argd['selection']:
profiles_to_merge = argd['selection']
else:
return self._error_page(req, ln,
"Fatal: cannot perform a merge without any profiles selected!")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
pids_to_merge = [webapi.get_person_id_from_canonical_id(cname) for cname in profiles_to_merge]
is_admin = False
if pinfo['ulevel'] == 'admin':
is_admin = True
# checking if there are restrictions regarding this merge
can_perform_merge, preventing_pid = webapi.merge_is_allowed(primary_pid, pids_to_merge, is_admin)
if not can_perform_merge:
# when redirected back to the merge profiles page display an error message about the currently attempted merge
pinfo['merge_info_message'] = ("failure", "confirm_failure")
session.dirty = True
redirect_url = "%s/author/merge_profiles?primary_profile=%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
if is_admin:
webapi.merge_profiles(primary_pid, pids_to_merge)
# when redirected back to the manage profile page display a message about the currently attempted merge
pinfo['merge_info_message'] = ("success", "confirm_success")
else:
name = ''
if 'user_last_name' in pinfo:
name = pinfo['user_last_name']
if 'user_first_name' in pinfo:
name += pinfo['user_first_name']
email = ''
if 'user_email' in pinfo:
email = pinfo['user_email']
selection_str = "&selection=".join(profiles_to_merge)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'merge link': "%s/author/merge_profiles?primary_profile=%s&selection=%s" % (CFG_SITE_URL, primary_cname, selection_str)}
# a message is sent to the admin with info regarding the currently attempted merge
webapi.create_request_message(userinfo, subj='Merge profiles request')
# when redirected back to the manage profile page display a message about the merge
pinfo['merge_info_message'] = ("success", "confirm_operation")
pinfo['merge_profiles'] = list()
session.dirty = True
redirect_url = "%s/author/manage_profile/%s" % (CFG_SITE_URL, primary_cname)
return redirect_to_url(req, redirect_url)
def send_message():
'''
sends a message from the user to the admin
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
#pp = pprint.PrettyPrinter(indent=4)
#session_dump = pp.pprint(pinfo)
session_dump = str(pinfo)
name = ''
name_changed = False
name_given = ''
email = ''
email_changed = False
email_given = ''
comment = ''
last_page_visited = ''
if "user_last_name" in pinfo:
name = pinfo["user_last_name"]
if "user_first_name" in pinfo:
name += pinfo["user_first_name"]
name = name.rstrip()
if "user_email" in pinfo:
email = pinfo["user_email"]
email = email.rstrip()
if 'Name' in form:
if not name:
name = form['Name']
elif name != form['Name']:
name_given = form['Name']
name_changed = True
name = name.rstrip()
if 'E-mail'in form:
if not email:
email = form['E-mail']
elif name != form['E-mail']:
email_given = form['E-mail']
email_changed = True
email = email.rstrip()
if 'Comment' in form:
comment = form['Comment']
comment = comment.rstrip()
if not name or not comment or not email:
redirect_to_url(req, '%s/author/help?incomplete_params=%s' % (CFG_SITE_URL, True))
if 'last_page_visited' in form:
last_page_visited = form['last_page_visited']
uid = getUid(req)
userinfo = {'uid-ip': "userid: %s (from %s)" % (uid, req.remote_ip),
'name': name,
'email': email,
'comment': comment,
'last_page_visited': last_page_visited,
'session_dump': session_dump,
'name_given': name_given,
'email_given': email_given,
'name_changed': name_changed,
'email_changed': email_changed}
webapi.create_request_message(userinfo)
def set_canonical_name():
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln,
"Fatal: cannot set canonical name to unknown person")
if argd['canonical_name'] is not None:
cname = argd['canonical_name']
else:
return self._error_page(req, ln,
"Fatal: cannot set a custom canonical name without a suggestion")
userinfo = "%s||%s" % (uid, req.remote_ip)
if webapi.is_valid_canonical_id(cname):
webapi.swap_person_canonical_name(pid, cname, userinfo)
else:
webapi.update_person_canonical_name(pid, cname, userinfo)
return redirect_to_url(req, "%s/author/claim/%s%s" % (CFG_SITE_URL, webapi.get_person_redirect_link(pid), '#tabData'))
action_functions = {'add_external_id': add_external_id,
'set_uid': set_uid,
'add_missing_external_ids': add_missing_external_ids,
'associate_profile': associate_profile,
'bibref_check_submit': bibref_check_submit,
'cancel': cancel,
'cancel_merging': cancel_merging,
'cancel_rt_ticket': cancel_rt_ticket,
'cancel_search_ticket': cancel_search_ticket,
'cancel_stage': cancel_stage,
'checkout': checkout,
'checkout_continue_claiming': checkout_continue_claiming,
'checkout_remove_transaction': checkout_remove_transaction,
'checkout_submit': checkout_submit,
'assign': claim,
'commit_rt_ticket': commit_rt_ticket,
'confirm': confirm_repeal_reset,
'delete_external_ids': delete_external_ids,
'merge': merge,
'reject': claim,
'repeal': confirm_repeal_reset,
'reset': confirm_repeal_reset,
'send_message': send_message,
'set_canonical_name': set_canonical_name,
'to_other_person': claim_to_other_person,
None: none_action}
return action_functions[action]()
def _ticket_open_claim(self, req, bibrefs, ln):
'''
Generate page to let user choose how to proceed
@param req: Apache Request Object
@type req: Apache Request Object
@param bibrefs: list of record IDs to perform an action on
@type bibrefs: list of int
@param ln: language to display the page in
@type ln: string
'''
session = get_session(req)
uid = getUid(req)
uinfo = collect_user_info(req)
pinfo = session["personinfo"]
if 'ln' in pinfo:
ln = pinfo["ln"]
else:
ln = CFG_SITE_LANG
_ = gettext_set_language(ln)
no_access = self._page_access_permission_wall(req)
session.dirty = True
pid = -1
search_enabled = True
if not no_access and uinfo["precached_usepaperclaim"]:
tpid = webapi.get_pid_from_uid(uid)
if tpid > -1:
pid = tpid
last_viewed_pid = False
if (not no_access
and "claimpaper_admin_last_viewed_pid" in pinfo
and pinfo["claimpaper_admin_last_viewed_pid"]):
names = webapi.get_person_names_from_id(pinfo["claimpaper_admin_last_viewed_pid"])
names = sorted([i for i in names], key=lambda k: k[1], reverse=True)
if len(names) > 0:
if len(names[0]) > 0:
last_viewed_pid = [pinfo["claimpaper_admin_last_viewed_pid"], names[0][0]]
if no_access:
search_enabled = False
pinfo["referer"] = uinfo["referer"]
session.dirty = True
body = TEMPLATE.tmpl_open_claim(bibrefs, pid, last_viewed_pid,
search_enabled=search_enabled)
body = TEMPLATE.tmpl_person_detail_layout(body)
title = _('Claim this paper')
metaheaderadd = WebInterfaceBibAuthorIDClaimPages._scripts(kill_browser_cache=True)
return page(title=title,
metaheaderadd=metaheaderadd,
body=body,
req=req,
language=ln)
def _ticket_open_assign_to_other_person(self, req, bibrefs, form):
'''
Initializes search to find a person to attach the selected records to
@param req: Apache request object
@type req: Apache request object
@param bibrefs: list of record IDs to consider
@type bibrefs: list of int
@param form: GET/POST request parameters
@type form: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
pinfo["search_ticket"] = dict()
search_ticket = pinfo["search_ticket"]
search_ticket['action'] = 'assign'
search_ticket['bibrefs'] = bibrefs
session.dirty = True
return self.search(req, form)
def _cancel_rt_ticket(self, req, tid, pid):
'''
deletes an RT ticket
'''
webapi.delete_request_ticket(pid, tid)
return redirect_to_url(req, "%s/author/claim/%s" %
(CFG_SITE_URL, webapi.get_person_redirect_link(str(pid))))
def _cancel_transaction_from_rt_ticket(self, tid, pid, action, bibref):
'''
deletes a transaction from an rt ticket
'''
webapi.delete_transaction_from_request_ticket(pid, tid, action, bibref)
def _commit_rt_ticket(self, req, tid, pid):
'''
Commit of an rt ticket: creates a real ticket and commits.
'''
session = get_session(req)
pinfo = session["personinfo"]
ticket = pinfo["ticket"]
uid = getUid(req)
tid = int(tid)
rt_ticket = get_validated_request_tickets_for_author(pid, tid)[0]
for action, bibrefrec in rt_ticket['operations']:
operation_parts = {'pid': pid,
'action': action,
'bibrefrec': bibrefrec}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
webapi.delete_request_ticket(pid, tid)
redirect_to_url(req, "%s/author/claim/%s" % (CFG_SITE_URL, pid))
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
def __session_cleanup(self, req):
'''
Cleans the session from all bibauthorid specific settings and
with that cancels any transaction currently in progress.
@param req: Apache Request Object
@type req: Apache Request Object
'''
session = get_session(req)
try:
pinfo = session["personinfo"]
except KeyError:
return
if "ticket" in pinfo:
pinfo['ticket'] = []
if "search_ticket" in pinfo:
pinfo['search_ticket'] = dict()
# clear up bibref checker if it's done.
if ("bibref_check_required" in pinfo
and not pinfo["bibref_check_required"]):
if 'bibrefs_to_confirm' in pinfo:
del(pinfo['bibrefs_to_confirm'])
if "bibrefs_auto_assigned" in pinfo:
del(pinfo["bibrefs_auto_assigned"])
del(pinfo["bibref_check_required"])
if "checkout_confirmed" in pinfo:
del(pinfo["checkout_confirmed"])
if "checkout_faulty_fields" in pinfo:
del(pinfo["checkout_faulty_fields"])
# pinfo['ulevel'] = ulevel
# pinfo["claimpaper_admin_last_viewed_pid"] = -1
pinfo["admin_requested_ticket_id"] = -1
session.dirty = True
def _generate_search_ticket_box(self, req):
'''
Generate the search ticket to remember a pending search for Person
entities in an attribution process
@param req: Apache request object
@type req: Apache request object
'''
session = get_session(req)
pinfo = session["personinfo"]
search_ticket = None
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
if not search_ticket:
return ''
else:
return TEMPLATE.tmpl_search_ticket_box('person_search', 'assign_papers', search_ticket['bibrefs'])
def search_box(self, query, shown_element_functions):
'''
collecting the persons' data that the search function returned
@param req: Apache request object
@type req: Apache request object
@param query: the query string
@type query: string
@param shown_element_functions: contains the functions that will tell to the template which columns to show and what buttons to print
@type shown_element_functions: dict
@return: html body
@rtype: string
'''
pid_list = self._perform_search(query)
search_results = []
for pid in pid_list:
result = defaultdict(list)
result['pid'] = pid
result['canonical_id'] = webapi.get_canonical_id_from_person_id(pid)
result['name_variants'] = webapi.get_person_names_from_id(pid)
result['external_ids'] = webapi.get_external_ids_from_person_id(pid)
# this variable shows if we want to use the following data in the search template
if 'pass_status' in shown_element_functions and shown_element_functions['pass_status']:
result['status'] = webapi.is_profile_available(pid)
search_results.append(result)
body = TEMPLATE.tmpl_author_search(query, search_results, shown_element_functions)
body = TEMPLATE.tmpl_person_detail_layout(body)
return body
def search(self, req, form):
'''
Function used for searching a person based on a name with which the
function is queried.
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0),
'q': (str, None)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "search", ln, is_owner, self._is_admin(pinfo))
title = "Person search"
# Create Wrapper Page Markup
profile_page = WebProfilePage("search", title, no_cache=True)
profile_page.add_profile_menu(menu)
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '10';var gPID = '10'; var gNumOfWorkers= '10'; var gReqTimeout= '10'; var gPageTimeout= '10';",
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
no_access = self._page_access_permission_wall(req)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_general_search_bar()
if no_access:
return no_access
search_ticket = None
bibrefs = []
if 'search_ticket' in pinfo:
search_ticket = pinfo['search_ticket']
for r in search_ticket['bibrefs']:
bibrefs.append(r)
if search_ticket and "ulevel" in pinfo:
if pinfo["ulevel"] == "admin":
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_assigning_search_new_person_generator(bibrefs)
content = ""
if search_ticket:
shown_element_functions['button_gen'] = TEMPLATE.tmpl_assigning_search_button_generator(bibrefs)
content = content + self._generate_search_ticket_box(req)
query = None
if 'q' in argd:
if argd['q']:
query = escape(argd['q'])
content += self.search_box(query, shown_element_functions)
body = profile_page.get_wrapped_body(content)
parameter = None
if query:
parameter = '?search_param=%s' + query
webapi.history_log_visit(req, 'search', params = parameter)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def merge_profiles(self, req, form):
'''
begginig of the proccess that performs the merge over multipe person profiles
@param req: Apache Request Object
@type form: dict
@return: a full page formatted in HTML
@rtype: string
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'primary_profile': (str, None),
'search_param': (str, ''),
'selection': (list, None),
'verbose': (int, 0)})
ln = argd['ln']
primary_cname = argd['primary_profile']
search_param = argd['search_param']
selection = argd['selection']
debug = 'verbose' in argd and argd['verbose'] > 0
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
profiles_to_merge = pinfo['merge_profiles']
_ = gettext_set_language(ln)
if not primary_cname:
return page_not_authorized(req, text=_('This page is not accessible directly.'))
no_access = self._page_access_permission_wall(req)
if no_access:
return no_access
if selection is not None:
profiles_to_merge_session = [cname for cname, is_available in profiles_to_merge]
for profile in selection:
if profile not in profiles_to_merge_session:
pid = webapi.get_person_id_from_canonical_id(profile)
is_available = webapi.is_profile_available(pid)
pinfo['merge_profiles'].append([profile, '1' if is_available else '0'])
session.dirty = True
primary_pid = webapi.get_person_id_from_canonical_id(primary_cname)
is_available = webapi.is_profile_available(primary_pid)
if not session['personinfo']['merge_primary_profile']:
session['personinfo']['merge_primary_profile'] = [primary_cname, '1' if is_available else '0']
session.dirty = True
body = ''
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
title = 'Merge Profiles'
menu = WebProfileMenu(str(cname), "manage_profile", ln, is_owner, self._is_admin(pinfo))
merge_page = WebProfilePage("merge_profile", title, no_cache=True)
merge_page.add_profile_menu(menu)
if debug:
merge_page.add_debug_info(pinfo)
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
body += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
body += TEMPLATE.tmpl_merge_ticket_box('person_search', 'merge_profiles', primary_cname)
shown_element_functions = dict()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_merge_profiles_search_bar(primary_cname)
shown_element_functions['button_gen'] = TEMPLATE.merge_profiles_button_generator()
shown_element_functions['pass_status'] = 'True'
merge_page.add_bootstrapped_data(json.dumps({
"other": "var gMergeProfile = %s; var gMergeList = %s;" % ([primary_cname, '1' if is_available else '0'], profiles_to_merge)
}))
body += self.search_box(search_param, shown_element_functions)
body = merge_page.get_wrapped_body(body)
return page(title=title,
metaheaderadd=merge_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def _perform_search(self, search_param):
'''
calls the search function on the search_param and returns the results
@param search_param: query string
@type search_param: String
@return: list of pids that the search found they match with the search query
@return: list
'''
pid_canditates_list = []
nquery = None
if search_param:
if search_param.count(":"):
try:
left, right = search_param.split(":")
try:
nsearch_param = str(right)
except (ValueError, TypeError):
try:
nsearch_param = str(left)
except (ValueError, TypeError):
nsearch_param = search_param
except ValueError:
nsearch_param = search_param
else:
nsearch_param = search_param
sorted_results = webapi.search_person_ids_by_name(nsearch_param)
for result in sorted_results:
pid_canditates_list.append(result[0])
return pid_canditates_list
def merge_profiles_ajax(self, req, form):
'''
Function used for handling Ajax requests used in order to add/remove profiles
in/from the merging profiles list, which is saved in the session.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'addProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
person_id = webapi.get_person_id_from_canonical_id(profile)
if person_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
profile_availability = webapi.is_profile_available(person_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
if profile not in [el[0] for el in profiles_to_merge]:
profiles_to_merge.append([profile, profile_availability])
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'addedPofile': profile})
json_response.update({'addedPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'removeProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
if webapi.get_person_id_from_canonical_id(profile) != -1:
webapi.session_bareinit(req)
session = get_session(req)
profiles_to_merge = session["personinfo"]["merge_profiles"]
# print (str(profiles_to_merge))
if profile in [el[0] for el in profiles_to_merge]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
session.dirty = True
# TODO check access rights and get profile from db
json_response.update({'resultCode': 1})
json_response.update({'removedProfile': profile})
else:
json_response.update({'result': 'Error: Profile was missing already from the list'})
else:
json_response.update({'result': 'Error: Profile does not exist'})
else:
json_response.update({'result': 'Error: Missing profile'})
elif req_type == 'setPrimaryProfile':
if json_data.has_key('profile'):
profile = json_data['profile']
profile_id = webapi.get_person_id_from_canonical_id(profile)
if profile_id != -1:
webapi.session_bareinit(req)
session = get_session(req)
profile_availability = webapi.is_profile_available(profile_id)
if profile_availability:
profile_availability = "1"
else:
profile_availability = "0"
profiles_to_merge = session["personinfo"]["merge_profiles"]
if profile in [el[0] for el in profiles_to_merge if el and el[0]]:
for prof in list(profiles_to_merge):
if prof[0] == profile:
profiles_to_merge.remove(prof)
primary_profile = session["personinfo"]["merge_primary_profile"]
if primary_profile and primary_profile not in profiles_to_merge:
profiles_to_merge.append(primary_profile)
session["personinfo"]["merge_primary_profile"] = [profile, profile_availability]
session.dirty = True
json_response.update({'resultCode': 1})
json_response.update({'primaryProfile': profile})
json_response.update({'primaryPofileAvailability': profile_availability})
else:
json_response.update({'result': 'Error: Profile was already in the list'})
else:
json_response.update({'result': 'Error: Missing profile'})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def search_box_ajax(self, req, form):
'''
Function used for handling Ajax requests used in the search box.
@param req: Apache Request Object
@type req: Apache Request Object
@param form: Parameters sent via Ajax request
@type form: dict
@return: json data
'''
# Abort if the simplejson module isn't available
if not CFG_JSON_AVAILABLE:
print "Json not configurable"
# If it is an Ajax request, extract any JSON data.
ajax_request = False
# REcent papers request
if form.has_key('jsondata'):
json_data = json.loads(str(form['jsondata']))
# Deunicode all strings (Invenio doesn't have unicode
# support).
json_data = json_unicode_to_utf8(json_data)
ajax_request = True
json_response = {'resultCode': 0}
# Handle request.
if ajax_request:
req_type = json_data['requestType']
if req_type == 'getPapers':
if json_data.has_key('personId'):
pId = json_data['personId']
papers = sorted([[p[0]] for p in webapi.get_papers_by_person_id(int(pId), -1)],
key=itemgetter(0))
papers_html = TEMPLATE.tmpl_gen_papers(papers[0:MAX_NUM_SHOW_PAPERS])
json_response.update({'result': "\n".join(papers_html)})
json_response.update({'totalPapers': len(papers)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Missing person id'})
elif req_type == 'getNames':
if json_data.has_key('personId'):
pId = json_data['personId']
names = webapi.get_person_names_from_id(int(pId))
names_html = TEMPLATE.tmpl_gen_names(names)
json_response.update({'result': "\n".join(names_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'getIDs':
if json_data.has_key('personId'):
pId = json_data['personId']
ids = webapi.get_external_ids_from_person_id(int(pId))
ids_html = TEMPLATE.tmpl_gen_ext_ids(ids)
json_response.update({'result': "\n".join(ids_html)})
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
elif req_type == 'isProfileClaimed':
if json_data.has_key('personId'):
pId = json_data['personId']
isClaimed = webapi.get_uid_from_personid(pId)
if isClaimed != -1:
json_response.update({'resultCode': 1})
json_response.update({'pid': str(pId)})
else:
json_response.update({'result': 'Error: Wrong request type'})
return json.dumps(json_response)
def choose_profile(self, req, form):
'''
Generate SSO landing/choose_profile page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG),
'search_param': (str, None),
'failed': (str, None),
'verbose': (int, 0)})
ln = argd['ln']
debug = "verbose" in argd and argd["verbose"] > 0
req.argd = argd # needed for perform_req_search
search_param = argd['search_param']
webapi.session_bareinit(req)
session = get_session(req)
uid = getUid(req)
pinfo = session['personinfo']
failed = True
if not argd['failed']:
failed = False
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
if 'arXiv' not in login_info['logged_in_to_remote_systems']:
return page_not_authorized(req, text=_("This page is not accessible directly."))
pid = webapi.get_user_pid(login_info['uid'])
# Create Wrapper Page Markup
is_owner = False
menu = WebProfileMenu('', "choose_profile", ln, is_owner, self._is_admin(pinfo))
choose_page = WebProfilePage("choose_profile", "Choose your profile", no_cache=True)
choose_page.add_profile_menu(menu)
if debug:
choose_page.add_debug_info(pinfo)
content = TEMPLATE.tmpl_choose_profile(failed)
body = choose_page.get_wrapped_body(content)
#In any case, when we step by here, an autoclaim should be performed right after!
pinfo = session["personinfo"]
pinfo['should_check_to_autoclaim'] = True
session.dirty = True
last_visited_pid = webapi.history_get_last_visited_pid(session['personinfo']['visit_diary'])
# if already logged in then redirect the user to the page he was viewing
if pid != -1:
redirect_pid = pid
if last_visited_pid:
redirect_pid = last_visited_pid
redirect_to_url(req, '%s/author/manage_profile/%s' % (CFG_SITE_URL, str(redirect_pid)))
else:
# get name strings and email addresses from SSO/Oauth logins: {'system':{'name':[variant1,...,variantn], 'email':'blabla@bla.bla', 'pants_size':20}}
remote_login_systems_info = webapi.get_remote_login_systems_info(req, login_info['logged_in_to_remote_systems'])
# get union of recids that are associated to the ids from all the external systems: set(inspire_recids_list)
recids = webapi.get_remote_login_systems_recids(req, login_info['logged_in_to_remote_systems'])
# this is the profile with the biggest intersection of papers so it's more probable that this is the profile the user seeks
probable_pid = webapi.match_profile(req, recids, remote_login_systems_info)
# if not search_param and probable_pid > -1 and probable_pid == last_visited_pid:
# # try to assign the user to the profile he chose. If for some reason the profile is not available we assign him to an empty profile
# redirect_pid, profile_claimed = webapi.claim_profile(login_info['uid'], probable_pid)
# if profile_claimed:
# redirect_to_url(req, '%s/author/claim/action?associate_profile=True&redirect_pid=%s' % (CFG_SITE_URL, str(redirect_pid)))
probable_profile_suggestion_info = None
last_viewed_profile_suggestion_info = None
if last_visited_pid > -1 and webapi.is_profile_available(last_visited_pid):
# get information about the most probable profile and show it to the user
last_viewed_profile_suggestion_info = webapi.get_profile_suggestion_info(req, last_visited_pid, recids)
if probable_pid > -1 and webapi.is_profile_available(probable_pid):
# get information about the most probable profile and show it to the user
probable_profile_suggestion_info = webapi.get_profile_suggestion_info(req, probable_pid, recids )
if not search_param:
# we prefil the search with most relevant among the names that we get from external systems
name_variants = webapi.get_name_variants_list_from_remote_systems_names(remote_login_systems_info)
search_param = most_relevant_name(name_variants)
body = body + TEMPLATE.tmpl_probable_profile_suggestion(probable_profile_suggestion_info, last_viewed_profile_suggestion_info, search_param)
shown_element_functions = dict()
shown_element_functions['button_gen'] = TEMPLATE.tmpl_choose_profile_search_button_generator()
shown_element_functions['new_person_gen'] = TEMPLATE.tmpl_choose_profile_search_new_person_generator()
shown_element_functions['show_search_bar'] = TEMPLATE.tmpl_choose_profile_search_bar()
# show in the templates the column status (if profile is bound to a user or not)
shown_element_functions['show_status'] = True
# pass in the templates the data of the column status (if profile is bound to a user or not)
# we might need the data without having to show them in the columne (fi merge_profiles
shown_element_functions['pass_status'] = True
# show search results to the user
body = body + self.search_box(search_param, shown_element_functions)
body = body + TEMPLATE.tmpl_choose_profile_footer()
title = _(' ')
return page(title=title,
metaheaderadd=choose_page.get_head().encode('utf-8'),
body=body,
req=req,
language=ln)
@staticmethod
def _arxiv_box(req, login_info, person_id, user_pid):
'''
Proccess and collect data for arXiv box
@param req: Apache request object
@type req: Apache request object
@param login_info: status of login in the following format: {'logged_in': True, 'uid': 2, 'logged_in_to_remote_systems':['Arxiv', ...]}
@type login_info: dict
@param login_info: person id of the current page's profile
@type login_info: int
@param login_info: person id of the user
@type login_info: int
@return: data required to built the arXiv box
@rtype: dict
'''
session = get_session(req)
pinfo = session["personinfo"]
arxiv_data = dict()
arxiv_data['view_own_profile'] = person_id == user_pid
# if the user is not a guest and he is connected through arXiv
arxiv_data['login'] = login_info['logged_in']
arxiv_data['user_pid'] = user_pid
arxiv_data['user_has_pid'] = user_pid != -1
# if the profile the use is logged in is the same with the profile of the page that the user views
arxiv_data['view_own_profile'] = user_pid == person_id
return arxiv_data
@staticmethod
def _orcid_box(arxiv_logged_in, person_id, user_pid, ulevel):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param arxiv_logged_in: shows if the user is logged in through arXiv or not
@type arxiv_logged_in: boolean
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param ulevel: user's level
@type ulevel: string
@return: data required to built the orcid box
@rtype: dict
'''
orcid_data = dict()
orcid_data['arxiv_login'] = arxiv_logged_in
orcid_data['orcids'] = None
orcid_data['add_power'] = False
orcid_data['own_profile'] = False
orcid_data['pid'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
orcid_data['own_profile'] = True
# if the user is an admin then he can add an existing orcid to the profile
if ulevel == "admin":
orcid_data['add_power'] = True
orcids = webapi.get_orcids_by_pid(person_id)
if orcids:
orcid_data['orcids'] = orcids
return orcid_data
@staticmethod
def _autoclaim_papers_box(req, person_id, user_pid, remote_logged_in_systems):
'''
Proccess and collect data for orcid box
@param req: Apache request object
@type req: Apache request object
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the autoclaim box
@rtype: dict
'''
autoclaim_data = dict()
# if no autoclaim should occur or had occured and results should be shown then the box should remain hidden
autoclaim_data['hidden'] = True
autoclaim_data['person_id'] = person_id
# if the profile the use is logged in is the same with the profile of the page that the user views
if person_id == user_pid:
recids_to_autoclaim = webapi.get_remote_login_systems_recids(req, remote_logged_in_systems)
autoclaim_data['hidden'] = False
autoclaim_data['num_of_claims'] = len(recids_to_autoclaim)
return autoclaim_data
############################################
# New autoclaim functions #
############################################
def generate_autoclaim_data(self, req, form):
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
pid = int(json_data['personId'])
except:
raise NotImplementedError("Some error with the parameter from the Ajax request occured.")
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
# If autoclaim was done already and no new remote systems exist
# in order to autoclaim new papers send the cached result
if not pinfo['orcid']['import_pubs'] and pinfo['autoclaim']['res'] is not None:
autoclaim_data = pinfo['autoclaim']['res']
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
return json.dumps(json_response)
external_pubs_association = pinfo['autoclaim']['external_pubs_association']
autoclaim_ticket = pinfo['autoclaim']['ticket']
ulevel = pinfo['ulevel']
uid = getUid(req)
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_status = webapi.get_login_info(uid, params)
remote_systems = login_status['logged_in_to_remote_systems']
papers_to_autoclaim = set(webapi.get_papers_from_remote_systems(remote_systems, params, external_pubs_association))
already_claimed_recids = set([rec for _, _, rec in get_claimed_papers_of_author(pid)]) & papers_to_autoclaim
papers_to_autoclaim = papers_to_autoclaim - set([rec for _, _, rec in get_claimed_papers_of_author(pid)])
for paper in papers_to_autoclaim:
operation_parts = {'pid': pid,
'action': 'assign',
'bibrefrec': str(paper)}
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
# In case the operation could not be created (because of an
# erroneous bibrefrec) ignore it and continue with the rest
continue
webapi.add_operation_to_ticket(operation_to_be_added, autoclaim_ticket)
additional_info = {'first_name': '', 'last_name': '', 'email': '',
'comments': 'Assigned automatically when autoclaim was triggered.'}
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=False)
webapi.commit_operations_from_ticket(autoclaim_ticket, userinfo, uid, ulevel)
autoclaim_data = dict()
autoclaim_data['hidden'] = False
autoclaim_data['person_id'] = pid
autoclaim_data['successfull_recids'] = set([op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket) if 'execution_result' in op]) | already_claimed_recids
webapi.clean_ticket(autoclaim_ticket)
autoclaim_data['unsuccessfull_recids'] = [op['rec'] for op in webapi.get_ticket_status(autoclaim_ticket)]
autoclaim_data['num_of_unsuccessfull_recids'] = len(autoclaim_data['unsuccessfull_recids'])
autoclaim_data['recids_to_external_ids'] = dict()
for key, value in external_pubs_association.iteritems():
ext_system, ext_id = key
rec = value
title = get_title_of_paper(rec)
autoclaim_data['recids_to_external_ids'][rec] = title
# cache the result in the session
pinfo['autoclaim']['res'] = autoclaim_data
if pinfo['orcid']['import_pubs']:
pinfo['orcid']['import_pubs'] = False
session.dirty = True
json_response = {'resultCode': 1, 'result': TEMPLATE.tmpl_autoclaim_box(autoclaim_data, CFG_SITE_LANG, add_box=False, loading=False)}
req.write(json.dumps(json_response))
@staticmethod
def get_params_to_check_login_info(session):
def get_params_to_check_login_info_of_arxiv(session):
try:
return session['user_info']
except KeyError:
return None
def get_params_to_check_login_info_of_orcid(session):
pinfo = session['personinfo']
try:
pinfo['orcid']['has_orcid_id'] = bool(get_orcid_id_of_author(pinfo['pid'])[0][0] and pinfo['orcid']['import_pubs'])
except:
pinfo['orcid']['has_orcid_id'] = False
session.dirty = True
return pinfo['orcid']
get_params_for_remote_system = {'arXiv': get_params_to_check_login_info_of_arxiv,
'orcid': get_params_to_check_login_info_of_orcid}
params = dict()
for system, get_params in get_params_for_remote_system.iteritems():
params[system] = get_params(session)
return params
@staticmethod
def _claim_paper_box(person_id):
'''
Proccess and collect data for claim paper box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the claim paper box
@rtype: dict
'''
claim_paper_data = dict()
claim_paper_data['canonical_id'] = str(webapi.get_canonical_id_from_person_id(person_id))
return claim_paper_data
@staticmethod
def _support_box():
'''
Proccess and collect data for support box
@return: data required to built the support box
@rtype: dict
'''
support_data = dict()
return support_data
@staticmethod
def _merge_box(person_id):
'''
Proccess and collect data for merge box
@param person_id: person id of the current page's profile
@type person_id: int
@return: data required to built the merge box
@rtype: dict
'''
merge_data = dict()
search_param = webapi.get_canonical_id_from_person_id(person_id)
name_variants = [element[0] for element in webapi.get_person_names_from_id(person_id)]
relevant_name = most_relevant_name(name_variants)
if relevant_name:
search_param = relevant_name.split(",")[0]
merge_data['search_param'] = search_param
merge_data['canonical_id'] = webapi.get_canonical_id_from_person_id(person_id)
return merge_data
@staticmethod
def _internal_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
external_ids_data = dict()
external_ids_data['uid'],external_ids_data['old_uids'] = webapi.get_internal_user_id_from_person_id(person_id)
external_ids_data['person_id'] = person_id
external_ids_data['user_pid'] = user_pid
external_ids_data['ulevel'] = ulevel
return external_ids_data
@staticmethod
def _external_ids_box(person_id, user_pid, ulevel):
'''
Proccess and collect data for external_ids box
@param person_id: person id of the current page's profile
@type person_id: int
@param user_pid: person id of the user
@type user_pid: int
@param remote_logged_in_systems: the remote logged in systems
@type remote_logged_in_systems: list
@return: data required to built the external_ids box
@rtype: dict
'''
internal_ids_data = dict()
internal_ids_data['ext_ids'] = webapi.get_external_ids_from_person_id(person_id)
internal_ids_data['person_id'] = person_id
internal_ids_data['user_pid'] = user_pid
internal_ids_data['ulevel'] = ulevel
return internal_ids_data
@staticmethod
def _hepnames_box(person_id):
return webapi.get_hepnames(person_id)
def tickets_admin(self, req, form):
'''
Generate SSO landing/welcome page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
webapi.session_bareinit(req)
no_access = self._page_access_permission_wall(req, req_level='admin')
if no_access:
return no_access
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "open_tickets", ln, is_owner, self._is_admin(pinfo))
title = "Open RT tickets"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
tickets = webapi.get_persons_with_open_tickets_list()
tickets = list(tickets)
for t in list(tickets):
tickets.remove(t)
tickets.append([webapi.get_most_frequent_name_from_pid(int(t[0])),
webapi.get_person_redirect_link(t[0]), t[0], t[1]])
content = TEMPLATE.tmpl_tickets_admin(tickets)
content = TEMPLATE.tmpl_person_detail_layout(content)
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def help(self, req, form):
argd = wash_urlargd(form, {'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE:
return page_not_authorized(req, text=_("This page is not accessible directly."))
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
cname = ''
is_owner = False
last_visited_pid = webapi.history_get_last_visited_pid(pinfo['visit_diary'])
if last_visited_pid is not None:
cname = webapi.get_canonical_id_from_person_id(last_visited_pid)
is_owner = self._is_profile_owner(last_visited_pid)
menu = WebProfileMenu(str(cname), "help", ln, is_owner, self._is_admin(pinfo))
title = "Help page"
profile_page = WebProfilePage("help", title, no_cache=True)
profile_page.add_profile_menu(menu)
content = TEMPLATE.tmpl_help_page()
body = profile_page.get_wrapped_body(content)
return page(title=title,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def export(self, req, form):
'''
Generate JSONized export of Person data
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
argd = wash_urlargd(
form,
{'ln': (str, CFG_SITE_LANG),
'request': (str, None),
'userid': (str, None)})
if not CFG_JSON_AVAILABLE:
return "500_json_not_found__install_package"
# session = get_session(req)
request = None
userid = None
if "userid" in argd and argd['userid']:
userid = argd['userid']
else:
return "404_user_not_found"
if "request" in argd and argd['request']:
request = argd["request"]
# find user from ID
user_email = get_email_from_username(userid)
if user_email == userid:
return "404_user_not_found"
uid = get_uid_from_email(user_email)
uinfo = collect_user_info(uid)
# find person by uid
pid = webapi.get_pid_from_uid(uid)
# find papers py pid that are confirmed through a human.
papers = webapi.get_papers_by_person_id(pid, 2)
# filter by request param, e.g. arxiv
if not request:
return "404__no_filter_selected"
if not request in VALID_EXPORT_FILTERS:
return "500_filter_invalid"
if request == "arxiv":
query = "(recid:"
query += " OR recid:".join(papers)
query += ") AND 037:arxiv"
db_docs = perform_request_search(p=query, rg=0)
nickmail = ""
nickname = ""
db_arxiv_ids = []
try:
nickname = uinfo["nickname"]
except KeyError:
pass
if not nickname:
try:
nickmail = uinfo["email"]
except KeyError:
nickmail = user_email
nickname = nickmail
db_arxiv_ids = get_fieldvalues(db_docs, "037__a")
construct = {"nickname": nickname,
"claims": ";".join(db_arxiv_ids)}
jsondmp = json.dumps(construct)
signature = webapi.sign_assertion("arXiv", jsondmp)
construct["digest"] = signature
return json.dumps(construct)
index = __call__
class WebInterfaceBibAuthorIDManageProfilePages(WebInterfaceDirectory):
_exports = ['',
'import_orcid_pubs',
'connect_author_with_hepname',
'connect_author_with_hepname_ajax',
'suggest_orcid',
'suggest_orcid_ajax']
def _lookup(self, component, path):
'''
This handler parses dynamic URLs:
- /author/profile/1332 shows the page of author with id: 1332
- /author/profile/100:5522,1431 shows the page of the author
identified by the bibrefrec: '100:5522,1431'
'''
if not component in self._exports:
return WebInterfaceBibAuthorIDManageProfilePages(component), path
def _is_profile_owner(self, pid):
return self.person_id == int(pid)
def _is_admin(self, pinfo):
return pinfo['ulevel'] == 'admin'
def __init__(self, identifier=None):
'''
Constructor of the web interface.
@param identifier: identifier of an author. Can be one of:
- an author id: e.g. "14"
- a canonical id: e.g. "J.R.Ellis.1"
- a bibrefrec: e.g. "100:1442,155"
@type identifier: str
'''
self.person_id = -1 # -1 is a non valid author identifier
if identifier is None or not isinstance(identifier, str):
self.original_identifier = " "
return
self.original_identifier = identifier
# check if it's a canonical id: e.g. "J.R.Ellis.1"
try:
pid = int(identifier)
except ValueError:
pid = int(webapi.get_person_id_from_canonical_id(identifier))
if pid >= 0:
self.person_id = pid
return
# check if it's an author id: e.g. "14"
try:
pid = int(identifier)
if webapi.author_has_papers(pid):
self.person_id = pid
return
except ValueError:
pass
# check if it's a bibrefrec: e.g. "100:1442,155"
if webapi.is_valid_bibref(identifier):
pid = int(webapi.get_person_id_from_paper(identifier))
if pid >= 0:
self.person_id = pid
return
def __call__(self, req, form):
'''
Generate SSO landing/author management page
@param req: Apache request object
@type req: Apache request object
@param form: GET/POST request params
@type form: dict
'''
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
person_id = self.person_id
uid = getUid(req)
pinfo['claim_in_process'] = True
argd = wash_urlargd(form, {
'ln': (str, CFG_SITE_LANG),
'verbose': (int, 0)})
debug = "verbose" in argd and argd["verbose"] > 0
ln = argd['ln']
_ = gettext_set_language(ln)
if not CFG_INSPIRE_SITE or self.person_id is None:
return page_not_authorized(req, text=_("This page is not accessible directly."))
if person_id < 0:
return self._error_page(req, message=("Identifier %s is not a valid person identifier or does not exist anymore!" % self.original_identifier))
# log the visit
webapi.history_log_visit(req, 'manage_profile', pid=person_id)
# store the arxiv papers the user owns
if uid > 0 and not pinfo['arxiv_status']:
uinfo = collect_user_info(req)
arxiv_papers = list()
if 'external_arxivids' in uinfo and uinfo['external_arxivids']:
arxiv_papers = uinfo['external_arxivids'].split(';')
if arxiv_papers:
webapi.add_arxiv_papers_to_author(arxiv_papers, person_id)
pinfo['arxiv_status'] = True
params = WebInterfaceBibAuthorIDClaimPages.get_params_to_check_login_info(session)
login_info = webapi.get_login_info(uid, params)
title_message = _('Profile management')
ssl_param = 0
if req.is_https():
ssl_param = 1
# Create Wrapper Page Markup
cname = webapi.get_canonical_id_from_person_id(self.person_id)
if cname == self.person_id:
return page_not_authorized(req, text=_("This page is not accessible directly."))
menu = WebProfileMenu(cname, "manage_profile", ln, self._is_profile_owner(pinfo['pid']), self._is_admin(pinfo))
profile_page = WebProfilePage("manage_profile", webapi.get_longest_name_from_pid(self.person_id), no_cache=True)
profile_page.add_profile_menu(menu)
gboxstatus = self.person_id
gpid = self.person_id
gNumOfWorkers = 3 # to do: read it from conf file
gReqTimeout = 3000
gPageTimeout = 12000
profile_page.add_bootstrapped_data(json.dumps({
"other": "var gBOX_STATUS = '%s';var gPID = '%s'; var gNumOfWorkers= '%s'; var gReqTimeout= '%s'; var gPageTimeout= '%s';" % (gboxstatus, gpid, gNumOfWorkers, gReqTimeout, gPageTimeout),
"backbone": """
(function(ticketbox) {
var app = ticketbox.app;
app.userops.set(%s);
app.bodyModel.set({userLevel: "%s"});
})(ticketbox);""" % (WebInterfaceAuthorTicketHandling.bootstrap_status(pinfo, "user"), ulevel)
}))
if debug:
profile_page.add_debug_info(pinfo)
user_pid = webapi.get_user_pid(login_info['uid'])
person_data = webapi.get_person_info_by_pid(person_id)
# proccess and collect data for every box [LEGACY]
arxiv_data = WebInterfaceBibAuthorIDClaimPages._arxiv_box(req, login_info, person_id, user_pid)
orcid_data = WebInterfaceBibAuthorIDClaimPages._orcid_box(arxiv_data['login'], person_id, user_pid, ulevel)
claim_paper_data = WebInterfaceBibAuthorIDClaimPages._claim_paper_box(person_id)
support_data = WebInterfaceBibAuthorIDClaimPages._support_box()
ext_ids_data = None
int_ids_data = None
if ulevel == 'admin':
ext_ids_data = WebInterfaceBibAuthorIDClaimPages._external_ids_box(person_id, user_pid, ulevel)
int_ids_data = WebInterfaceBibAuthorIDClaimPages._internal_ids_box(person_id, user_pid, ulevel)
autoclaim_data = WebInterfaceBibAuthorIDClaimPages._autoclaim_papers_box(req, person_id, user_pid, login_info['logged_in_to_remote_systems'])
merge_data = WebInterfaceBibAuthorIDClaimPages._merge_box(person_id)
hepnames_data = WebInterfaceBibAuthorIDClaimPages._hepnames_box(person_id)
content = ''
# display status for any previously attempted merge
if pinfo['merge_info_message']:
teaser_key, message = pinfo['merge_info_message']
content += TEMPLATE.tmpl_merge_transaction_box(teaser_key, [message])
pinfo['merge_info_message'] = None
session.dirty = True
content += TEMPLATE.tmpl_profile_management(ln, person_data, arxiv_data,
orcid_data, claim_paper_data,
int_ids_data, ext_ids_data,
autoclaim_data, support_data,
merge_data, hepnames_data)
body = profile_page.get_wrapped_body(content)
return page(title=title_message,
metaheaderadd=profile_page.get_head().encode('utf-8'),
body=body.encode('utf-8'),
req=req,
language=ln,
show_title_p=False)
def import_orcid_pubs(self, req, form):
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
orcid_info = pinfo['orcid']
# author should have already an orcid if this method was triggered
try:
orcid_id = get_orcid_id_of_author(pinfo['pid'])[0][0]
except IndexError:
#weird, no orcid id in the database? Let's not do anything...
orcid_id = None
orcid_dois = get_dois_from_orcid(orcid_id)
# TODO: what to do in case some ORCID server error occurs?
if orcid_id is None or orcid_dois is None:
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
# TODO: it would be smarter if:
# 1. we save in the db the orcid_dois
# 2. to expire only the external pubs box in the profile page
webauthorapi.expire_all_cache_for_personid(pinfo['pid'])
orcid_info['imported_pubs'] = orcid_dois
orcid_info['import_pubs'] = True
session.dirty = True
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_SECURE_URL, pinfo['pid']))
def connect_author_with_hepname(self, req, form):
argd = wash_urlargd(form, {'cname':(str, None),
'hepname': (str, None),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['cname'] is not None:
cname = argd['cname']
else:
return self._error_page(req, ln, "Fatal: cannot associate a hepname without a person id.")
if argd['hepname'] is not None:
hepname = argd['hepname']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid hepname.")
webapi.connect_author_with_hepname(cname, hepname)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
last_visited_page = webapi.history_get_last_visited_url(pinfo['visit_diary'], just_page=True)
redirect_to_url(req, "%s/author/%s/%s" % (CFG_SITE_URL, last_visited_page, cname))
def connect_author_with_hepname_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
cname = json_data['cname']
hepname = json_data['hepname']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
if not self._is_admin(pinfo):
webapi.connect_author_with_hepname(cname, hepname)
else:
uid = getUid(req)
add_cname_to_hepname_record(cname, hepname, uid)
def suggest_orcid(self, req, form):
argd = wash_urlargd(form, {'orcid':(str, None),
'pid': (int, -1),
'ln': (str, CFG_SITE_LANG)})
ln = argd['ln']
if argd['pid'] > -1:
pid = argd['pid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an orcid without a person id.")
if argd['orcid'] is not None and is_valid_orcid(argd['orcid']):
orcid = argd['orcid']
else:
return self._error_page(req, ln, "Fatal: cannot associate an author with a non valid ORCiD.")
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
redirect_to_url(req, "%s/author/manage_profile/%s" % (CFG_SITE_URL, pid))
def suggest_orcid_ajax(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
orcid = json_data['orcid']
pid = json_data['pid']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
if not is_valid_orcid(orcid):
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.connect_author_with_orcid(webapi.get_canonical_id_from_person_id(pid), orcid)
def _fail(self, req, code):
req.status = code
return
def _error_page(self, req, ln=CFG_SITE_LANG, message=None, intro=True):
'''
Create a page that contains a message explaining the error.
@param req: Apache Request Object
@type req: Apache Request Object
@param ln: language
@type ln: string
@param message: message to be displayed
@type message: string
'''
body = []
_ = gettext_set_language(ln)
if not message:
message = "No further explanation available. Sorry."
if intro:
body.append(_("<p>We're sorry. An error occurred while "
"handling your request. Please find more information "
"below:</p>"))
body.append("<p><strong>%s</strong></p>" % message)
return page(title=_("Notice"),
body="\n".join(body),
description="%s - Internal Error" % CFG_SITE_NAME,
keywords="%s, Internal Error" % CFG_SITE_NAME,
language=ln,
req=req)
index = __call__
class WebInterfaceAuthorTicketHandling(WebInterfaceDirectory):
_exports = ['get_status',
'update_status',
'add_operation',
'modify_operation',
'remove_operation',
'commit',
'abort']
@staticmethod
def bootstrap_status(pinfo, on_ticket):
'''
Function used for generating get_status json bootstrapping.
@param pinfo: person_info
@type req: dict
@param on_ticket: ticket target
@type on_ticket: str
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
author_ticketing = WebInterfaceAuthorTicketHandling()
ticket = author_ticketing._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return "{}"
ticket_status = webapi.get_ticket_status(ticket)
return json.dumps(ticket_status)
def get_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket_status = webapi.get_ticket_status(ticket)
session.dirty = True
req.content_type = 'application/json'
req.write(json.dumps(ticket_status))
def update_status(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.update_ticket_status(ticket)
session.dirty = True
def add_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_added = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_added is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.add_operation_to_ticket(operation_to_be_added, ticket)
session.dirty = True
def modify_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_modified = webapi.construct_operation(operation_parts, pinfo, uid, should_have_bibref=False)
if operation_to_be_modified is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_modified = webapi.modify_operation_from_ticket(operation_to_be_modified, ticket)
if not operation_is_modified:
# Operation couldn't be modified because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def remove_operation(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
operation_parts = {'pid': int(json_data['pid']),
'action': json_data['action'],
'bibrefrec': json_data['bibrefrec']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
uid = getUid(req)
operation_to_be_removed = webapi.construct_operation(operation_parts, pinfo, uid)
if operation_to_be_removed is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
operation_is_removed = webapi.remove_operation_from_ticket(operation_to_be_removed, ticket)
if not operation_is_removed:
# Operation couldn't be removed because it doesn't exist in the
# ticket. Wrong parameters were given hence we should fail!
return self._fail(req, apache.HTTP_NOT_FOUND)
session.dirty = True
def commit(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
additional_info = {'first_name': json_data.get('first_name',"Default"),
'last_name': json_data.get('last_name',"Default"),
'email': json_data.get('email',"Default"),
'comments': json_data['comments']}
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ulevel = pinfo['ulevel']
uid = getUid(req)
user_is_guest = isGuestUser(uid)
if not user_is_guest:
try:
additional_info['first_name'] = session['user_info']['external_firstname']
additional_info['last_name'] = session['user_info']['external_familyname']
additional_info['email'] = session['user_info']['email']
except KeyError:
additional_info['first_name'] = additional_info['last_name'] = additional_info['email'] = str(uid)
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a guest is claiming we should not commit if he
# doesn't provide us his full personal information
strict_check = user_is_guest
userinfo = webapi.fill_out_userinfo(additional_info, uid, req.remote_ip, ulevel, strict_check=strict_check)
if userinfo is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.commit_operations_from_ticket(ticket, userinfo, uid, ulevel)
session.dirty = True
def abort(self, req, form):
'''
Function used for handling Ajax requests.
@param req: apache request object
@type req: apache request object
@param form: parameters sent via Ajax request
@type form: dict
@return:
@rtype: json data
'''
# Abort if the simplejson module isn't available
assert CFG_JSON_AVAILABLE, "Json not available"
# Fail if no json data exists in the Ajax request
if not form.has_key('jsondata'):
return self._fail(req, apache.HTTP_NOT_FOUND)
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
try:
on_ticket = json_data['on']
except:
return self._fail(req, apache.HTTP_NOT_FOUND)
webapi.session_bareinit(req)
session = get_session(req)
pinfo = session['personinfo']
ticket = self._get_according_ticket(on_ticket, pinfo)
if ticket is None:
return self._fail(req, apache.HTTP_NOT_FOUND)
# When a user is claiming we should completely delete his ticket if he
# aborts the claiming procedure
delete_ticket = (on_ticket == 'user')
webapi.abort_ticket(ticket, delete_ticket=delete_ticket)
session.dirty = True
def _get_according_ticket(self, on_ticket, pinfo):
ticket = None
if on_ticket == 'user':
ticket = pinfo['ticket']
elif on_ticket == 'autoclaim':
ticket = pinfo['autoclaim']['ticket']
return ticket
def _fail(self, req, code):
req.status = code
return
class WebAuthorSearch(WebInterfaceDirectory):
"""
Provides an interface to profile search using AJAX queries.
"""
_exports = ['list',
'details']
# This class requires JSON libraries
assert CFG_JSON_AVAILABLE, "[WebAuthorSearch] JSON must be enabled."
class QueryPerson(WebInterfaceDirectory):
_exports = ['']
MIN_QUERY_LENGTH = 2
QUERY_REGEX = re.compile(r"[\w\s\.\-,@]+$", re.UNICODE)
def __init__(self, query=None):
self.query = query
def _lookup(self, component, path):
if component not in self._exports:
return WebAuthorSearch.QueryPerson(component), path
def __call__(self, req, form):
if self.query is None or len(self.query) < self.MIN_QUERY_LENGTH:
req.status = apache.HTTP_BAD_REQUEST
return "Query too short"
if not self.QUERY_REGEX.match(self.query):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
pid_results = [{"pid": pid[0]} for pid in webapi.search_person_ids_by_name(self.query)]
req.content_type = 'application/json'
return json.dumps(pid_results)
# Request for index handled by __call__
index = __call__
def _JSON_received(self, form):
try:
return "jsondata" in form
except TypeError:
return False
def _extract_JSON(self, form):
try:
json_data = json.loads(str(form['jsondata']))
json_data = json_unicode_to_utf8(json_data)
return json_data
except ValueError:
return None
def _get_pid_details(self, pid):
details = webapi.get_person_info_by_pid(pid)
details.update({
"names": [{"name": x, "paperCount": y} for x, y in webapi.get_person_names_from_id(pid)],
"externalIds": [{x: y} for x, y in webapi.get_external_ids_from_person_id(pid).items()]
})
details['cname'] = details.pop("canonical_name", None)
return details
def details(self, req, form):
if self._JSON_received(form):
try:
json_data = self._extract_JSON(form)
pids = json_data['pids']
req.content_type = 'application/json'
details = [self._get_pid_details(pid) for pid in pids]
return json.dumps(details)
except (TypeError, KeyError):
req.status = apache.HTTP_BAD_REQUEST
return "Invalid query."
else:
req.status = apache.HTTP_BAD_REQUEST
return "Incorrect query format."
list = QueryPerson()
class WebInterfaceAuthor(WebInterfaceDirectory):
'''
Handles /author/* pages.
Supplies the methods:
/author/choose_profile
/author/claim/
/author/help
/author/manage_profile
/author/merge_profiles
/author/profile/
/author/search
/author/ticket/
'''
_exports = ['',
'choose_profile',
'claim',
'help',
'manage_profile',
'merge_profiles',
'profile',
'search',
'search_ajax',
'ticket']
from invenio.webauthorprofile_webinterface import WebAuthorPages
claim = WebInterfaceBibAuthorIDClaimPages()
profile = WebAuthorPages()
choose_profile = claim.choose_profile
help = claim.help
manage_profile = WebInterfaceBibAuthorIDManageProfilePages()
merge_profiles = claim.merge_profiles
search = claim.search
search_ajax = WebAuthorSearch()
ticket = WebInterfaceAuthorTicketHandling()
def _lookup(self, component, path):
if component not in self._exports:
return WebInterfaceAuthor(component), path
def __init__(self, component=None):
self.path = component
def __call__(self, req, form):
if self.path is None or len(self.path) < 1:
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
# Check if canonical id: e.g. "J.R.Ellis.1"
pid = get_person_id_from_canonical_id(self.path)
if pid >= 0:
url = "%s/author/profile/%s" % (CFG_BASE_URL, get_person_redirect_link(pid))
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
else:
try:
pid = int(self.path)
except ValueError:
redirect_to_url(req, "%s/author/search?q=%s" % (CFG_BASE_URL, self.path))
return
else:
if author_has_papers(pid):
cid = get_person_redirect_link(pid)
if is_valid_canonical_id(cid):
redirect_id = cid
else:
redirect_id = pid
url = "%s/author/profile/%s" % (CFG_BASE_URL, redirect_id)
redirect_to_url(req, url, redirection_type=apache.HTTP_MOVED_PERMANENTLY)
return
redirect_to_url(req, "%s/author/search" % CFG_BASE_URL)
return
index = __call__
class WebInterfacePerson(WebInterfaceDirectory):
'''
Handles /person/* pages.
Supplies the methods:
/person/welcome
'''
_exports = ['welcome','update', 'you']
def welcome(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def you(self, req, form):
redirect_to_url(req, "%s/author/choose_profile" % CFG_SITE_SECURE_URL)
def update(self, req, form):
"""
Generate hepnames update form
"""
argd = wash_urlargd(form,
{'ln': (str, CFG_SITE_LANG),
'email': (str, ''),
'IRN': (str, ''),
})
# Retrieve info for HEP name based on email or IRN
recids = []
if argd['email']:
recids = perform_request_search(p="371__m:%s" % argd['email'], cc="HepNames")
elif argd['IRN']:
recids = perform_request_search(p="001:%s" % argd['IRN'], cc="HepNames")
else:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
if not recids:
redirect_to_url(req, "%s/collection/HepNames" % (CFG_SITE_URL))
else:
hepname_bibrec = get_bibrecord(recids[0])
# Extract all info from recid that should be included in the form
full_name = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="a")
display_name = record_get_field_value(hepname_bibrec, tag="880", ind1="", ind2="", code="a")
email = record_get_field_value(hepname_bibrec, tag="371", ind1="", ind2="", code="m")
status = record_get_field_value(hepname_bibrec, tag="100", ind1="", ind2="", code="g")
keynumber = record_get_field_value(hepname_bibrec, tag="970", ind1="", ind2="", code="a")
try:
keynumber = keynumber.split('-')[1]
except IndexError:
pass
research_field_list = record_get_field_values(hepname_bibrec, tag="650", ind1="1", ind2="7", code="a")
institution_list = []
for instance in record_get_field_instances(hepname_bibrec, tag="371", ind1="", ind2=""):
if not instance or field_get_subfield_values(instance, "m"):
continue
institution_info = ["", "", "", "", ""]
if field_get_subfield_values(instance, "a"):
institution_info[0] = field_get_subfield_values(instance, "a")[0]
if field_get_subfield_values(instance, "r"):
institution_info[1] = field_get_subfield_values(instance, "r")[0]
if field_get_subfield_values(instance, "s"):
institution_info[2] = field_get_subfield_values(instance, "s")[0]
if field_get_subfield_values(instance, "t"):
institution_info[3] = field_get_subfield_values(instance, "t")[0]
if field_get_subfield_values(instance, "z"):
institution_info[4] = field_get_subfield_values(instance, "z")[0]
institution_list.append(institution_info)
phd_advisor_list = record_get_field_values(hepname_bibrec, tag="701", ind1="", ind2="", code="a")
experiment_list = record_get_field_values(hepname_bibrec, tag="693", ind1="", ind2="", code="e")
web_page = record_get_field_value(hepname_bibrec, tag="856", ind1="1", ind2="", code="u")
# Create form and pass as parameters all the content from the record
body = TEMPLATE.tmpl_update_hep_name(full_name, display_name, email,
status, research_field_list,
institution_list, phd_advisor_list,
experiment_list, web_page)
title = "HEPNames"
return page(title=title,
metaheaderadd = TEMPLATE.tmpl_update_hep_name_headers(),
body=body,
req=req,
)
# pylint: enable=C0301
# pylint: enable=W0613
| kaplun/ops | modules/bibauthorid/lib/bibauthorid_webinterface.py | Python | gpl-2.0 | 148,959 | [
"VisIt"
] | 9bf077dfd1c2053898f17672848239df0a8eac223718bd79df28374f8b2d8efa |
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Provide classes for dealing with Training Neural Networks."""
# standard modules
import random
class TrainingExample(object):
"""Hold inputs and outputs of a training example."""
def __init__(self, inputs, outputs, name=""):
self.name = name
self.inputs = inputs
self.outputs = outputs
class ExampleManager(object):
"""Manage a grouping of Training Examples.
This is meant to make it easy to split a bunch of training examples
into three types of data:
o Training Data -- These are the data used to do the actual training
of the network.
o Validation Data -- These data are used to validate the network
while training. They provide an independent method to evaluate how
the network is doing, and make sure the network gets trained independent
of noise in the training data set.
o Testing Data -- The data which are used to verify how well a network
works. They should not be used at all in the training process, so they
provide a completely independent method of testing how well a network
performs.
"""
def __init__(self, training_percent=.4, validation_percent=.4):
"""Initialize the manager with the training examples.
Arguments:
o training_percent - The percentage of the training examples that
should be used for training the network.
o validation_percent - Percent of training examples for validating
a network during training.
Attributes:
o train_examples - A randomly chosen set of examples for training
purposes.
o valdiation_examples - Randomly chosesn set of examples for
use in validation of a network during training.
o test_examples - Examples for training purposes.
"""
assert training_percent + validation_percent <= 1.0, \
"Training and validation percentages more than 100 percent"
self.train_examples = []
self.validation_examples = []
self.test_examples = []
self.training_percent = training_percent
self.validation_percent = validation_percent
def add_examples(self, training_examples):
"""Add a set of training examples to the manager.
Arguments:
o training_examples - A list of TrainingExamples to manage.
"""
placement_rand = random.Random()
# assign exact example randomly to the example types
for example in training_examples:
chance_num = placement_rand.random()
# assign with the specified percentage
if chance_num <= self.training_percent:
self.train_examples.append(example)
elif chance_num <= (self.training_percent +
self.validation_percent):
self.validation_examples.append(example)
else:
self.test_examples.append(example)
| zjuchenyuan/BioWeb | Lib/Bio/NeuralNetwork/Training.py | Python | mit | 3,110 | [
"Biopython"
] | f88ef7dab2a1566c4bcc3cee1f4d688e91f4102171fe19bb027d3ad4d77c5831 |
from galaxy.util import topsort
def test_topsort_level_stability():
data = [
(0, 2),
(1, 2),
(2, 3),
(2, 4),
(3, 4),
(3, 5),
(6, 2),
]
assert topsort.topsort_levels( data )[ 0 ] == [ 0, 1, 6 ]
assert topsort.topsort( data ) == [ 0, 1, 6, 2, 3, 4, 5 ]
# Swap first two edges - so 1 appears first
swap( data, 0, 1 )
assert topsort.topsort_levels( data )[ 0 ] == [ 1, 0, 6 ]
assert topsort.topsort( data ) == [ 1, 0, 6, 2, 3, 4, 5 ]
# Shouldn't really affect sorting of 1 0 6
swap( data, 3, 4 )
assert topsort.topsort_levels( data )[ 0 ] == [ 1, 0, 6 ]
assert topsort.topsort( data ) == [ 1, 0, 6, 2, 3, 4, 5 ]
# Place 0 before 6 in original list
swap( data, 1, 6 )
assert topsort.topsort_levels( data )[ 0 ] == [ 1, 6, 0 ]
assert topsort.topsort( data ) == [ 1, 6, 0, 2, 3, 4, 5 ]
def test_topsort_doc():
assert topsort.topsort([(1, 2), (3, 3)]) == [1, 3, 2]
def swap(lst, i, j):
tmp = lst[j]
lst[j] = lst[i]
lst[i] = tmp
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/test/unit/test_topsort.py | Python | gpl-3.0 | 1,064 | [
"Galaxy"
] | 6d3292da64e22356caa152ad9612ef9e758ea5e01f3a9426b73dfb938b2dc9b6 |
#!/usr/bin/env python
from vtk import *
csv_source = vtkDelimitedTextReader()
csv_source.SetFieldDelimiterCharacters(",")
csv_source.SetHaveHeaders(True)
csv_source.SetDetectNumericColumns(True)
csv_source.SetFileName("authors.csv")
csv_source.Update()
T = csv_source.GetOutput()
print "Table loaded from CSV file:"
T.Dump(10)
| timkrentz/SunTracker | IMU/VTK-6.2.0/Examples/Infovis/Python/delimited_text_reader1.py | Python | mit | 344 | [
"VTK"
] | f53e7fe8a161dcdb89c000285b116c1dda420324aafca85daacd20836e1dfd04 |
"""
exodus.py v 1.03 (beta) is a python wrapper of some of the exodus II library
Copyright (c) 2013, 2014, 2015, 2016 Sandia Corporation.
Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
the U.S. Government retains certain rights in this software.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
3. Neither the name of the Sandia Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from __future__ import print_function
EXODUS_PY_COPYRIGHT_AND_LICENSE = __doc__
EXODUS_PY_VERSION = "1.03 (beta-cmake)"
EXODUS_PY_COPYRIGHT = """
You are using exodus.py v 1.03 (beta-cmake), a python wrapper of some of the
exodus II library. Copyright (c) 2013,2014,2015,2016 Sandia Corporation. Under
the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S.
Government retains certain rights in this software.
"""
EXODUS_PY_CONTACTS = """
Authors: Timothy Shelton (trshelt@sandia.gov)
Michael Veilleux (mgveill@sandia.gov)
David Littlewood (djlittl@sandia.gov)
Greg Sjaardema (gdsjaar@sandia.gov)
"""
import sys
sys.dont_write_bytecode = True
oneline = "Gather from or export to Exodus II files using the Exodus II library"
# Imports and external programs
import itertools
from ctypes import *
import ctypes.util
import os
import re
def find_shared_library(name):
"""
Does its very best and attempts to find the given library.
Fairly wild and brute-force but it should get the job done. If it does not
work, just set any environment variable to the folder containing the
libraries.
"""
# Ctypes internal functionality. The exact actions performed depend on the
# platform but it should be able to find libraries in the default system
# paths.
lib = ctypes.util.find_library(name)
if lib:
return lib
folders = [
# Prefix of the Python executable - might work for Anaconda, Homebrew,
# and other package managers. Should be covered by the environment
# variables below but who knows.
os.path.dirname(os.path.dirname(sys.executable)),
# Other paths where stuff might be located.
"/opt/petsc"]
# This is a bit brute force - just check all environment variables...
for value in os.environ.values():
folders.extend(value.split(":"))
# Make sure its a folder. This assures that directly passing the library
# name in some way also works.
folders = [_i if os.path.isdir(_i) else os.path.dirname(_i)
for _i in folders]
# Strip trailing /bin and to get to the prefix location.
folders = [_i if os.path.split(_i)[1] != "bin" else os.path.split(_i)[0]
for _i in folders]
# Search in each folder and its `/lib` subdirectory.
folders = itertools.chain.from_iterable(
(_i, os.path.join(_i, "lib")) for _i in folders)
folders = [os.path.abspath(_i) for _i in folders
if os.path.exists(_i) and os.path.isdir(_i)]
# Remove duplicates but preserve order.
_t = set()
folders = [i for i in folders if not (i in _t or _t.add(i))]
# Now try to find the library.
pattern = re.compile("^lib{name}\.?\d*\.(dylib|so)$".format(name=name))
for folder in folders:
_res = [i for i in os.listdir(folder) if pattern.match(i)]
if not _res:
continue
return os.path.join(folder, _res[0])
raise Exception("Could not find {name} library.".format(name=name))
def basename(file):
"""
Extract base name from file.
basename("test.e") -> "test"
"""
fileParts = file.split(".")
basename = ".".join(fileParts[:-1])
return basename
# Find the libraries.
NETCDF_SO = find_shared_library("netcdf")
# Might have different names.
try:
EXODUS_SO = find_shared_library("exoIIv2")
except:
EXODUS_SO = find_shared_library("exodus")
NETCDF_LIB = cdll.LoadLibrary(NETCDF_SO)
EXODUS_LIB = cdll.LoadLibrary(EXODUS_SO)
def getExodusVersion():
"""
Parse the exodusII.h header file and return the version number or 0 if not
found.
"""
# Find the include path. Assume it is located at ../include/exodusII.h
# relative to the libexodus shared library.
exodus_inc = os.path.join(os.path.dirname(EXODUS_SO), os.path.pardir,
"include", "exodusII.h")
if not os.path.exists(exodus_inc):
raise Exception("File '%s' does not exist." % exodus_inc)
for line in open(exodus_inc):
fields = line.split()
if (len(fields) == 3 and
fields[0] == '#define' and
fields[1] == 'EX_API_VERS_NODOT'):
return int(fields[2])
return 0
EX_API_VERSION_NODOT = getExodusVersion()
EX_VERBOSE = 1 #verbose mode message flag
if (EX_API_VERSION_NODOT >= 608):
EX_READ = 0x0002 # ex_open(): open file for reading (default)
else:
EX_READ = 0x0000 # ex_open(): open file for reading (default)
EX_WRITE = 0x0001 # ex_open(): open existing file for appending.
EX_NOCLOBBER = 0x0004 # does not overwrite existing exodus file
EX_CLOBBER = 0x0008 # overwrites existing exodus file
MAX_STR_LENGTH = 32 # match exodus default
MAX_LINE_LENGTH = 80 # match exodus default
EX_MAPS_INT64_API = 0x2000 # all maps (id, order, ...) store int64_t values
EX_IDS_INT64_API = 0x4000 # all entity ids (sets, blocks, maps) are int64_t
EX_BULK_INT64_API = 0x8000 # all integer bulk data (not ids) are int64_t
EX_INQ_INT64_API = 0x10000 # integers passed to/from ex_inquire are int64_t
# set exodus error output option
exErrPrintMode = c_int(EX_VERBOSE)
EXODUS_LIB.ex_opts(exErrPrintMode)
def ex_inquiry(inquiry):
# create dictionary for return types
inquiry_dictionary = {
'EX_INQ_FILE_TYPE': 1, # inquire EXODUS II file type
'EX_INQ_API_VERS': 2, # inquire API version number
'EX_INQ_DB_VERS': 3, # inquire database version number
'EX_INQ_TITLE': 4, # inquire database title
'EX_INQ_DIM': 5, # inquire number of dimensions
'EX_INQ_NODES': 6, # inquire number of nodes
'EX_INQ_ELEM': 7, # inquire number of elements
'EX_INQ_ELEM_BLK': 8, # inquire number of element blocks
'EX_INQ_NODE_SETS': 9, # inquire number of node sets
'EX_INQ_NS_NODE_LEN': 10, # inquire length of node set node list
'EX_INQ_SIDE_SETS': 11, # inquire number of side sets
'EX_INQ_SS_NODE_LEN': 12, # inquire length of side set node list
'EX_INQ_SS_ELEM_LEN': 13, # inquire length of side set element list
'EX_INQ_QA': 14, # inquire number of QA records
'EX_INQ_INFO': 15, # inquire number of info records
'EX_INQ_TIME': 16, # inquire number of time steps in the database
'EX_INQ_EB_PROP': 17, # inquire number of element block properties
'EX_INQ_NS_PROP': 18, # inquire number of node set properties
'EX_INQ_SS_PROP': 19, # inquire number of side set properties
'EX_INQ_NS_DF_LEN': 20, # inquire length of node set distribution factor list
'EX_INQ_SS_DF_LEN': 21, # inquire length of side set distribution factor list
'EX_INQ_LIB_VERS': 22, # inquire API Lib vers number
'EX_INQ_EM_PROP': 23, # inquire number of element map properties
'EX_INQ_NM_PROP': 24, # inquire number of node map properties
'EX_INQ_ELEM_MAP': 25, # inquire number of element maps
'EX_INQ_NODE_MAP': 26, # inquire number of node maps
'EX_INQ_EDGE': 27, # inquire number of edges
'EX_INQ_EDGE_BLK': 28, # inquire number of edge blocks
'EX_INQ_EDGE_SETS': 29, # inquire number of edge sets
'EX_INQ_ES_LEN': 30, # inquire length of concat edge set edge list
'EX_INQ_ES_DF_LEN': 31, # inquire length of concat edge set dist factor list
'EX_INQ_EDGE_PROP': 32, # inquire number of properties stored per edge block
'EX_INQ_ES_PROP': 33, # inquire number of properties stored per edge set
'EX_INQ_FACE': 34, # inquire number of faces
'EX_INQ_FACE_BLK': 35, # inquire number of face blocks
'EX_INQ_FACE_SETS': 36, # inquire number of face sets
'EX_INQ_FS_LEN': 37, # inquire length of concat face set face list
'EX_INQ_FS_DF_LEN': 38, # inquire length of concat face set dist factor list
'EX_INQ_FACE_PROP': 39, # inquire number of properties stored per face block
'EX_INQ_FS_PROP': 40, # inquire number of properties stored per face set
'EX_INQ_ELEM_SETS': 41, # inquire number of element sets
'EX_INQ_ELS_LEN': 42, # inquire length of concat element set element list
'EX_INQ_ELS_DF_LEN': 43, # inquire length of concat element set dist factor list
'EX_INQ_ELS_PROP': 44, # inquire number of properties stored per elem set
'EX_INQ_EDGE_MAP': 45, # inquire number of edge maps
'EX_INQ_FACE_MAP': 46, # inquire number of face maps
'EX_INQ_COORD_FRAMES': 47, # inquire number of coordinate frames
'EX_INQ_DB_MAX_ALLOWED_NAME_LENGTH': 48, # inquire size of MAX_NAME_LENGTH dimension on database
'EX_INQ_DB_MAX_USED_NAME_LENGTH': 49, # inquire size of MAX_NAME_LENGTH dimension on database
'EX_INQ_READ_NAME_LENGTH': 50, # inquire client-specified max size of returned names
'EX_INQ_DB_FLOAT_SIZE': 51 # inquire size of floating-point values stored on database
}
# search dictionary for the requested code
if inquiry in inquiry_dictionary:
return inquiry_dictionary[inquiry]
# none found, must be invalid
return -1 # EX_INQ_INVALID
def ex_entity_type(varType):
entity_dictionary = {
'EX_ELEM_BLOCK': 1, # element block property code
'EX_NODE_SET': 2, # node set property code
'EX_SIDE_SET': 3, # side set property code
'EX_ELEM_MAP': 4, # element map property code
'EX_NODE_MAP': 5, # node map property code
'EX_EDGE_BLOCK': 6, # edge block property code
'EX_EDGE_SET': 7, # edge set property code
'EX_FACE_BLOCK': 8, # face block property code
'EX_FACE_SET': 9, # face set property code
'EX_ELEM_SET': 10, # face set property code
'EX_EDGE_MAP': 11, # edge map property code
'EX_FACE_MAP': 12, # face map property code
'EX_GLOBAL': 13, # global 'block' for variables
'EX_NODAL': 14, # nodal 'block' for variables
'EX_NODE_BLOCK': 14, # alias for EX_NODAL
'EX_COORDINATE': 15, # kluge so some internal wrapper functions work
}
# search dictionary for the requested code
if varType in entity_dictionary:
return entity_dictionary[varType]
# none found, must be invalid`
return -1 ##EX_INVALID
#
# ----------------------------------------------------------------------
#
class exodus:
"""
exo = exodus(file_name, \\
mode=mode, \\
title=title, \\
array_type=array_type, \\
numDims=num_dims, \\
numNodes=num_nodes, \\
numElems=num_elems, \\
numBlocks=num_blocks, \\
numNodeSets=num_ns, \\
numSideSets=num_ss)
-> open exodus database for data insertion/extraction
input value(s):
<string> file_name name of exodus file to open
<string> mode 'r' for read,
'a' for append,
'w' for write
<string> title database title
<string> array_type 'ctype' for c-type arrays,
'numpy' for numpy arrays
<int> num_dims number of model dimensions ('w' mode only)
<int> num_nodes number of model nodes ('w' mode only)
<int> num_elems number of model elements ('w' mode only)
<int> num_blocks number of model element blocks ('w' mode only)
<int> num_ns number of model node sets ('w' mode only)
<int> num_ss number of model side sets ('w' mode only)
return value(s):
<exodus> exo the open exodus database
"""
#
# construction of a new exodus object
#
# --------------------------------------------------------------------
def __init__(self,file,mode=None,array_type='ctype',title=None,
numDims=None,numNodes=None,numElems=None,numBlocks=None,
numNodeSets=None,numSideSets=None,io_size=0):
print(EXODUS_PY_COPYRIGHT)
if mode == None:
mode = 'r'
if array_type == 'numpy':
#Import numpy to convert from c-type arrays to numpy arrays
import numpy as np
self.np = np
self.use_numpy = True
#Warnings module is needed to suppress the invalid warning when
#converting from c-type arrays to numpy arrays
#http://stackoverflow.com/questions/4964101/pep-3118-warning-when-using-ctypes-array-as-numpy-array
import warnings
self.warnings = warnings
else:
self.use_numpy = False
self.EXODUS_LIB = EXODUS_LIB
self.fileName = str(file)
self.basename = basename(file)
self.modeChar = mode
self.__open(io_size=io_size)
if mode.lower() == 'w':
info = [title,numDims,numNodes,numElems,numBlocks,
numNodeSets,numSideSets]
assert None not in info
self.__ex_put_info(info)
self.numTimes = c_int(0)
else:
self.__ex_get_info()
self.numTimes = c_int(self.__ex_inquire_int(ex_inquiry("EX_INQ_TIME")))
#
# copy to a new database
#
# --------------------------------------------------------------------
def copy(self,fileName):
"""
exo_copy = exo.copy(file_name)
-> copies exodus database to file_name and returns this copy as a
new exodus object
input value(s):
<string> file_name name of exodus file to open
return value(s):
<exodus> exo_copy the copy
"""
new = exodus( fileName,
mode="w",
title=self.title(),
numDims=self.num_dimensions(),
numNodes=self.num_nodes(),
numElems=self.num_elems(),
numBlocks=self.num_blks(),
numNodeSets=self.num_node_sets(),
numSideSets=self.num_side_sets() )
self.__copy_file(new)
return new
#
# general info
#
# --------------------------------------------------------------------
def title(self):
"""
title = exo.title()
-> get the database title
return value(s):
<string> title
"""
return self.Title.value
# --------------------------------------------------------------------
def version_num(self):
"""
version = exo.version_num()
-> get exodus version number used to create the database
return value(s):
<string> version string representation of version number
"""
return "%1.2f" % self.version.value
# --------------------------------------------------------------------
def put_info(self, Title, numDim, numNodes, numElem, numElemBlk,
numNodeSets, numSideSets):
"""
status = exo.put_info(self, \\
title, \\
num_dims, \\
num_nodes, \\
num_elems, \\
num_blocks, \\
num_ns, \\
num_ss)
-> initialize static metadata for the database
input value(s):
<string> title database title
<int> num_dims number of model dimensions
<int> num_nodes number of model nodes
<int> num_elems number of model elements
<int> num_blocks number of model element blocks
<int> num_ns number of model node sets
<int> num_ss number of model side sets
return value(s):
<bool> status True = successful execution
"""
self.__ex_put_info([Title, numDim, numNodes, numElem, \
numElemBlk, numNodeSets, numSideSets])
return True
# --------------------------------------------------------------------
def get_qa_records(self):
"""
qa_recs = exo.get_qa_records()
-> get a list of QA records where each QA record is a length-4
tuple of strings:
1) the software name that accessed/modified the database
2) the software descriptor, e.g. version
3) additional software data
4) time stamp
return value(s):
<list<tuple[4]<string>>> qa_recs
"""
return self.__ex_get_qa()
# --------------------------------------------------------------------
def put_qa_records(self,records):
"""
status = exo.put_qa_records()
-> store a list of QA records where each QA record is a length-4
tuple of strings:
1) the software name that accessed/modified the database
2) the software descriptor, e.g. version
3) additional software data
4) time stamp
input value(s):
<list<tuple[4]<string>>> qa_recs
return value(s):
<bool> status True = successful execution
"""
for rec in records:
assert len(rec) == 4
for recEntry in rec:
assert len(str(recEntry)) < MAX_STR_LENGTH
if self.__ex_put_qa(records):
return True
else:
return False
# --------------------------------------------------------------------
def num_info_records(self):
"""
num_info_recs = exo.num_info_records()
-> get the number of info records
return value(s):
<int> num_info_recs
"""
return int(self.__ex_inquire_int(ex_inquiry("EX_INQ_INFO")))
# --------------------------------------------------------------------
def get_info_records(self):
"""
info_recs = exo.get_info_records()
-> get a list info records where each entry in the list is one
info record, e.g. a line of an input deck
return value(s):
<list<string>> info_recs
"""
info_recs = self.__ex_get_info_recs()
return info_recs
# --------------------------------------------------------------------
def put_info_records(self,info):
"""
status = exo.put_info_records(info)
-> store a list of info records where each entry in the list is
one line of info, e.g. a line of an input deck
input value(s):
<list<tuple[4]<string>>> info_recs
return value(s):
<bool> status True = successful execution
"""
for rec in info:
if len(str(rec)) > MAX_LINE_LENGTH:
print("WARNING: max line length reached for one or more info records;")
print(" info stored to exodus file is incomplete for these records")
break
if self.__ex_put_info_recs(info):
return True
else:
return False
# --------------------------------------------------------------------
def get_sierra_input(self,inpFileName=None):
"""
inp = exo.get_sierra_input(inpFileName=inp_file_name)
-> parse sierra input deck from the info records
if inp_file_name is passed the deck is written to this file;
otherwise a list of input deck file lines is returned
input value(s):
(optional)<string> inp_file_name
return value(s):
list<string> inp file lines if inp_file_name not provided;
otherwise, an empty list
"""
info_recs = self.__ex_get_info_recs()
sierra_inp = []
begin = False
for rec in info_recs:
vals = rec.split()
if not begin: # have not reached Sierra block
if len(vals) >= 2 and vals[0].lower() == 'begin' and vals[1].lower() == "sierra":
begin = True
if begin: # inside Sierra block
sierra_inp.append(rec)
if len(rec) > MAX_LINE_LENGTH:
print("WARNING: max line length reached for one or more input lines;")
print(" input data might be incomplete for these lines")
break
if len(vals) >= 2 and vals[0].lower() == "end" and vals[1].lower() == "sierra":
break # end of Sierra block
if inpFileName:
fd = open(inpFileName,"w")
for fileLine in sierra_inp:
fd.write(fileLine + "\n")
fd.close()
return []
else:
return sierra_inp
#
# time steps
#
# --------------------------------------------------------------------
def num_times(self):
"""
num_times = exo.num_times()
-> get the number of time steps
return value(s):
<int> num_times
"""
return self.numTimes.value
# --------------------------------------------------------------------
def get_times(self):
"""
time_vals = exo.get_times()
-> get the time values
return value(s):
if array_type == 'ctype':
<list<c_double>> time_vals
if array_type == 'numpy':
<np_array<double>> time_vals
"""
if self.numTimes.value == 0:
self.times = []
else:
self.__ex_get_all_times()
if self.use_numpy:
self.times = ctype_to_numpy(self, self.times)
return self.times
# --------------------------------------------------------------------
def put_time(self,step,value):
"""
exo.put_time(time_step, time_val)
-> store a new time
input value(s):
<int> time_step time step index (1-based)
<float> time_val time value for this step
return value(s):
<bool> status True = successful execution
"""
self.__ex_put_time(step,value)
self.numTimes = c_int(self.__ex_inquire_int(ex_inquiry("EX_INQ_TIME")))
return True
#
# coordinate system
#
# --------------------------------------------------------------------
def num_dimensions(self):
"""
num_dims = exo.num_dimensions()
-> get the number of model dimensions
return value(s):
<int> num_dims
"""
return self.numDim.value
# --------------------------------------------------------------------
def get_coord_names(self):
"""
coord_names = exo.get_coord_names()
-> get a list of length exo.num_dimensions() that has the name
of each model coordinate direction, e.g. ['x', 'y', 'z']
return value(s):
<list<string>> coord_names
"""
names = self.__ex_get_coord_names()
return names
# --------------------------------------------------------------------
def put_coord_names(self,names):
"""
exo.put_coord_names()
-> store a list of length exo.num_dimensions() that has the name
of each model coordinate direction, e.g. ['x', 'y', 'z']
input value(s):
<list<string>> coord_names
"""
self.__ex_put_coord_names(names)
#
# nodes
#
# --------------------------------------------------------------------
def num_nodes(self):
"""
num_nodes = exo.num_nodes()
-> get the number of nodes in the model
return value(s):
<int> num_nodes
"""
return self.numNodes.value
# --------------------------------------------------------------------
def get_coords(self):
"""
x_coords, y_coords, z_coords = exo.get_coords()
-> get model coordinates of all nodes; for each coordinate
direction, a length exo.num_nodes() list is returned
return value(s):
if array_type == 'ctype':
<list<c_double>> x_coords global x-direction coordinates
<list<c_double>> y_coords global y-direction coordinates
<list<c_double>> z_coords global z-direction coordinates
if array_type == 'numpy':
<np_array<double>> x_coords global x-direction coordinates
<np_array<double>> y_coords global y-direction coordinates
<np_array<double>> z_coords global z-direction coordinates
"""
self.__ex_get_coord()
if self.use_numpy:
self.coordsX = ctype_to_numpy(self, self.coordsX)
self.coordsY = ctype_to_numpy(self, self.coordsY)
self.coordsZ = ctype_to_numpy(self, self.coordsZ)
return (self.coordsX,self.coordsY,self.coordsZ)
# --------------------------------------------------------------------
def get_coord(self,i):
"""
x_coord, y_coord, z_coord = exo.get_coord(node_index)
-> get model coordinates of a single node
input value(s):
<int> node_index the 1-based node index
(indexing is from 1 to exo.num_nodes())
return value(s):
<c_double> x_coord global x-direction coordinate
<c_double> y_coord global y-direction coordinate
<c_double> z_coord global z-direction coordinate
note:
>>> x_coords, y_coords, z_coords = exo.get_coords()
>>> x_coord = x_coords[node_index-1]
>>> y_coord = y_coords[node_index-1]
>>> z_coord = z_coords[node_index-1]
... is equivalent to ...
>>> x_coord, y_coord, z_coord = exo.get_coords(node_index)
"""
listX,listY,listZ = self.__ex_get_n_coord(i,1)
return (listX[0],listY[0],listZ[0])
# --------------------------------------------------------------------
def put_coords(self,xCoords,yCoords,zCoords):
"""
status = exo.put_coords(x_coords, y_coords, z_coords)
-> store model coordinates of all nodes; for each coordinate
direction, a length exo.num_nodes() list is input
input value(s):
<list<float>> x_coords global x-direction coordinates
<list<float>> y_coords global y-direction coordinates
<list<float>> z_coords global z-direction coordinates
return value(s):
<bool> status True = successful execution
"""
self.__ex_put_coord(xCoords,yCoords,zCoords)
return True
# --------------------------------------------------------------------
def get_node_num_map(self):
"""
node_id_map = exo.get_node_num_map()
-> **DEPRECATED** use: exo.get_node_id_map()
get mapping of exodus node index to user- or application-
defined node id; node_id_map is ordered the same as the nodal
coordinate arrays returned by exo.get_coords() -- this ordering
follows the exodus node *INDEX* order, a 1-based system going
from 1 to exo.num_nodes(); a user or application can optionally
use a separate node *ID* numbering system, so the node_id_map
points to the node *ID* for each node *INDEX*
return value(s):
<list<c_int>> node_id_map
"""
nodeNumMap = self.__ex_get_node_num_map()
return nodeNumMap
# --------------------------------------------------------------------
def get_node_id_map(self):
"""
node_id_map = exo.get_node_id_map()
-> get mapping of exodus node index to user- or application-
defined node id; node_id_map is ordered the same as the nodal
coordinate arrays returned by exo.get_coords() -- this ordering
follows the exodus node *INDEX* order, a 1-based system going
from 1 to exo.num_nodes(); a user or application can optionally
use a separate node *ID* numbering system, so the node_id_map
points to the node *ID* for each node *INDEX*
return value(s):
if array_type == 'ctype':
<list<int>> node_id_map
if array_type == 'numpy':
<np_array<int>> node_id_map
"""
objType = ex_entity_type("EX_NODE_MAP")
inqType = ex_inquiry("EX_INQ_NODES")
nodeIdMap = self.__ex_get_id_map(objType,inqType)
if self.use_numpy:
nodeIdMap = self.np.array(nodeIdMap)
return nodeIdMap
# --------------------------------------------------------------------
def put_node_id_map(self,map):
"""
status = exo.put_node_id_map(node_id_map)
-> store mapping of exodus node index to user- or application-
defined node id; node_id_map is ordered the same as the nodal
coordinate arrays returned by exo.get_coords() -- this ordering
follows the exodus node *INDEX* order, a 1-based system going
from 1 to exo.num_nodes(); a user or application can optionally
use a separate node *ID* numbering system, so the node_id_map
points to the node *ID* for each node *INDEX*
input value(s):
<list<int>> node_id_map
return value(s):
<bool> status True = successful execution
"""
objType = ex_entity_type("EX_NODE_MAP")
inqType = ex_inquiry("EX_INQ_NODES")
return self.__ex_put_id_map(objType,inqType,map)
# --------------------------------------------------------------------
def get_node_variable_names(self):
"""
nvar_names = exo.get_node_variable_names()
-> get the list of nodal variable names in the model
return value(s):
<list<string>> nvar_names
"""
if self.__ex_get_var_param('n').value == 0:
return []
return self.__ex_get_var_names("n")
# --------------------------------------------------------------------
def get_node_variable_number(self):
"""
num_nvars = exo.get_node_variable_number()
-> get the number of nodal variables in the model
return value(s):
<int> num_nvars
"""
ndType = ex_entity_type("EX_NODAL")
num = self.__ex_get_variable_param(ndType)
return num.value
# --------------------------------------------------------------------
def set_node_variable_number(self,number):
"""
status = exo.set_node_variable_number(num_nvars)
-> update the number of nodal variables in the model
input value(s):
<int> num_nvars
return value(s):
<bool> status True = successful execution
"""
ndType = ex_entity_type("EX_NODAL")
self.__ex_put_variable_param(ndType,number)
return True
# --------------------------------------------------------------------
def put_node_variable_name(self,name,index):
"""
status = exo.put_node_variable_name(nvar_name, nvar_index)
-> add the name and index of a new nodal variable to the model;
nodal variable indexing goes from 1 to
exo.get_node_variable_number()
input value(s):
<string> nvar_name name of new nodal variable
<int> nvar_index 1-based index of new nodal variable
return value(s):
<bool> status True = successful execution
NOTE:
this method is often called within the following sequence:
>>> num_nvars = exo.get_node_variable_number()
>>> new_nvar_index = num_nvars + 1
>>> num_nvars += 1
>>> exo.set_node_variable_number(num_nvars)
>>> exo.put_node_variable_name("new_nvar_name", new_nvar_index)
"""
ndType = ex_entity_type("EX_NODAL")
NDvarNames = self.get_node_variable_names()
if name in NDvarNames:
print("WARNING:node variable \"", name, "\" already exists.")
if index > len(NDvarNames):
raise Exception("ERROR: variable index out of range.")
self.__ex_put_variable_name(ndType,index,name)
return True
# --------------------------------------------------------------------
def get_node_variable_values(self,name,step):
"""
nvar_vals = exo.get_node_variable_values(nvar_name, time_step)
-> get list of nodal variable values for a nodal variable name
and time step
input value(s):
<string> nvar_name name of nodal variable
<int> time_step 1-based index of time step
return value(s):
if array_type == 'ctype':
<list<c_double>> nvar_vals
if array_type == 'numpy':
<np_array<double>> nvar_vals
"""
names = self.get_node_variable_names()
var_id = names.index(name) + 1
ndType = ex_entity_type("EX_NODAL")
numVals = self.num_nodes()
values = self.__ex_get_var(step,ndType,var_id,0,numVals)
if self.use_numpy:
values = ctype_to_numpy(self, values)
return values
# --------------------------------------------------------------------
def put_node_variable_values(self,name,step,values):
"""
status = exo.put_node_variable_values(nvar_name, \\
time_step, \\
nvar_vals)
-> store a list of nodal variable values for a nodal variable
name and time step
input value(s):
<string> nvar_name name of nodal variable
<int> time_step 1-based index of time step
<list<float>> nvar_vals
return value(s):
<bool> status True = successful execution
"""
names = self.get_node_variable_names()
var_id = names.index(name) + 1
ndType = ex_entity_type("EX_NODAL")
numVals = self.num_nodes()
self.__ex_put_var(step,ndType,var_id,0,numVals,values)
return True
#
# elements
#
# --------------------------------------------------------------------
def num_elems(self):
"""
num_elems = exo.num_elems()
-> get the number of elements in the model
return value(s):
<int> num_elems
"""
return self.numElem.value
# --------------------------------------------------------------------
def get_elem_num_map(self):
"""
elem_id_map = exo.get_elem_num_map()
-> **DEPRECATED** use: exo.get_elem_id_map()
get mapping of exodus element index to user- or application-
defined element id; elem_id_map is orderd by the element
*INDEX* ordering, a 1-based system going from 1 to
exo.num_elems(), used by exodus for storage and input/output
of array data stored on the elements; a user or application
can optionally use a separate element *ID* numbering system,
so the elem_id_map points to the element *ID* for each
element *INDEX*
return value(s):
<list<c_int>> elem_id_map
"""
elemNumMap = self.__ex_get_elem_num_map()
return elemNumMap
# --------------------------------------------------------------------
def get_elem_id_map(self):
"""
elem_id_map = exo.get_elem_id_map()
-> get mapping of exodus element index to user- or application-
defined element id; elem_id_map is ordered by the element
*INDEX* ordering, a 1-based system going from 1 to
exo.num_elems(), used by exodus for storage and input/output
of array data stored on the elements; a user or application
can optionally use a separate element *ID* numbering system,
so the elem_id_map points to the element *ID* for each
element *INDEX*
return value(s):
if array_type == 'ctype':
<list<int>> elem_id_map
if array_type == 'numpy':
<np_array<int>> elem_id_map
"""
objType = ex_entity_type("EX_ELEM_MAP")
inqType = ex_inquiry("EX_INQ_ELEM")
elemIdMap = self.__ex_get_id_map(objType,inqType)
if self.use_numpy:
elemIdMap = self.np.array(elemIdMap)
return elemIdMap
# --------------------------------------------------------------------
def put_elem_id_map(self,map):
"""
status = exo.put_elem_id_map(elem_id_map)
-> store mapping of exodus element index to user- or application-
defined element id; elem_id_map is ordered by the element
*INDEX* ordering, a 1-based system going from 1 to
exo.num_elems(), used by exodus for storage and input/output
of array data stored on the elements; a user or application
can optionally use a separate element *ID* numbering system,
so the elem_id_map points to the element *ID* for each
element *INDEX*
input value(s):
<list<int>> elem_id_map
return value(s):
<bool> status True = successful execution
"""
objType = ex_entity_type("EX_ELEM_MAP")
inqType = ex_inquiry("EX_INQ_ELEM")
return self.__ex_put_id_map(objType,inqType,map)
# --------------------------------------------------------------------
def get_elem_order_map(self):
"""
elem_order_map = exo.get_elem_order_map()
-> get mapping of exodus element index to application-defined
optimal ordering; elem_order_map is ordered by the element
index ordering used by exodus for storage and input/output
of array data stored on the elements; a user or application
can optionally use a separate element ordering, e.g. for
optimal solver performance, so the elem_order_map points to
the index used by the application for each exodus element
index
return value(s):
if array_type == 'ctype':
<list<int>> elem_order_map
if array_type == 'numpy':
<np_array<int>> elem_order_map
"""
elemOrderMap = self.__ex_get_elem_order_map()
if self.use_numpy:
elemOrderMap = ctype_to_numpy(self, elemOrderMap)
return elemOrderMap
#
# element blocks
#
# --------------------------------------------------------------------
def num_blks(self):
"""
num_elem_blks = exo.num_blks()
-> get the number of element blocks in the model
return value(s):
<int> num_elem_blks
"""
return self.numElemBlk.value
# --------------------------------------------------------------------
def get_elem_blk_ids(self):
"""
elem_blk_ids = exo.get_elem_blk_ids()
-> get mapping of exodus element block index to user- or
application-defined element block id; elem_blk_ids is ordered
by the element block *INDEX* ordering, a 1-based system going
from 1 to exo.num_blks(), used by exodus for storage
and input/output of array data stored on the element blocks; a
user or application can optionally use a separate element block
*ID* numbering system, so the elem_blk_ids array points to the
element block *ID* for each element block *INDEX*
return value(s):
if array_type == 'ctype':
<list<int>> elem_blk_ids
if array_type == 'numpy':
<np_array<int>> elem_blk_ids
"""
self.__ex_get_elem_blk_ids()
elemBlkIds = self.elemBlkIds
if self.use_numpy:
elemBlkIds = ctype_to_numpy(self, elemBlkIds)
return elemBlkIds
# --------------------------------------------------------------------
def get_elem_blk_name(self,id):
"""
elem_blk_name = exo.get_elem_blk_name(elem_blk_id)
-> get the element block name
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<string> elem_blk_name
"""
objType = ex_entity_type("EX_ELEM_BLOCK")
elemBlkName = self.__ex_get_name(objType,id)
return elemBlkName
# --------------------------------------------------------------------
def put_elem_blk_name(self,id,name):
"""
exo.put_elem_blk_name(elem_blk_id, elem_blk_name)
-> store the element block name
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<string> elem_blk_name
"""
objType = ex_entity_type("EX_ELEM_BLOCK")
self.__ex_put_name(objType,id,name)
# --------------------------------------------------------------------
def get_elem_blk_names(self):
"""
elem_blk_names = exo.get_elem_blk_names()
-> get a list of all element block names ordered by block *INDEX*;
(see description of get_elem_blk_ids() for explanation of the
difference between block *ID* and block *INDEX*)
return value(s):
<list<string>> elem_blk_names
"""
objType = ex_entity_type("EX_ELEM_BLOCK")
inqType = ex_inquiry("EX_INQ_ELEM_BLK")
elemBlkNames = self.__ex_get_names(objType,inqType)
return elemBlkNames
# --------------------------------------------------------------------
def put_elem_blk_names(self,names):
"""
exo.put_elem_blk_names(elem_blk_names)
-> store a list of all element block names ordered by block *INDEX*;
(see description of get_elem_blk_ids() for explanation of the
difference between block *ID* and block *INDEX*)
input value(s):
<list<string>> elem_blk_names
"""
objType = ex_entity_type("EX_ELEM_BLOCK")
inqType = ex_inquiry("EX_INQ_ELEM_BLK")
self.__ex_put_names(objType,inqType,names)
# --------------------------------------------------------------------
def elem_blk_info(self,id):
"""
elem_type, \\
num_blk_elems, \\
num_elem_nodes, \\
num_elem_attrs = exo.elem_blk_info(elem_blk_id)
-> get the element block info
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<string> elem_type element type, e.g. 'HEX8'
<int> num_blk_elems number of elements in the block
<int> num_elem_nodes number of nodes per element
<int> num_elem_attrs number of attributes per element
"""
(elemType,numElem,nodesPerElem,numAttr) = self.__ex_get_elem_block(id)
return elemType.value, numElem.value, nodesPerElem.value, numAttr.value
# --------------------------------------------------------------------
def put_elem_blk_info(self,id,elemType,numElems,
numNodesPerElem,numAttrsPerElem):
"""
exo.put_elem_blk_info(elem_blk_id, \\
elem_type, \\
num_blk_elems, \\
num_elem_nodes, \\
num_elem_attrs)
-> store the element block *ID* and element block info
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<string> elem_type element type (all caps), e.g. 'HEX8'
<int> num_blk_elems number of elements in the block
<int> num_elem_nodes number of nodes per element
<int> num_elem_attrs number of attributes per element
"""
self.__ex_put_elem_block(id,elemType,numElems,
numNodesPerElem,numAttrsPerElem)
# --------------------------------------------------------------------
def put_concat_elem_blk(self,elemBlkIDs, elemType, numElemThisBlk,\
numNodesPerElem,numAttr,defineMaps):
"""
status = exo.put_concat_elem_blk(elem_blk_ids, \\
elem_types, \\
num_blk_elems, \\
num_elem_nodes, \\
num_elem_attrs)
-> same as exo.put_elem_blk_info() but for all blocks at once
input value(s):
<list<int>> elem_blk_ids element block *ID* (not *INDEX*)
for each block
<list<string>> elem_types element type for each block
<list<int>> num_blk_elems number of elements for each
block
<list<int>> num_elem_nodes number of nodes per element
for each block
<list<int>> num_elem_attrs number of attributes per
element for each block
return value(s):
<bool> status True = successful execution
"""
self.__ex_put_concat_elem_blk(elemBlkIDs,elemType,numElemThisBlk, \
numNodesPerElem,numAttr,defineMaps)
return True
# --------------------------------------------------------------------
def get_elem_connectivity(self,id):
"""
elem_conn, \\
num_blk_elems, \\
num_elem_nodes = exo.get_elem_connectivity(elem_blk_id)
-> get the nodal connectivity, number of elements, and
number of nodes per element for a single block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
if array_type == 'ctype':
<list<int>> elem_conn ordered list of node *INDICES* that
define the connectivity of each element
in the block; the list cycles through
all nodes of the first element, then
all nodes of the second element, etc.
(see get_node_id_map() for explanation
of node *INDEX* versus node *ID*)
if array_type == 'numpy':
<np_array<int>> elem_conn (same description)
<int> num_blk_elems number of elements in the block
<int> num_elem_nodes number of nodes per element
"""
(elem_block_connectivity,num_elem_this_blk,num_nodes_per_elem) = self.__ex_get_elem_conn(id);
if self.use_numpy:
elem_block_connectivity = ctype_to_numpy(self, elem_block_connectivity)
return (elem_block_connectivity,num_elem_this_blk.value,num_nodes_per_elem.value)
# --------------------------------------------------------------------
def put_elem_connectivity(self,id,connectivity):
"""
exo.put_elem_connectivity(elem_blk_id, elem_conn)
-> store the nodal connectivity, number of elements, and
number of nodes per element for a single block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<list<int>> elem_conn ordered list of node *INDICES* that
define the connectivity of each
element in the block; the list cycles
through all nodes of the first element,
then all nodes of the second element,
etc.
(see get_node_id_map() for explanation
of node *INDEX* versus node *ID*)
"""
d1,numBlkElems,numNodesPerElem,d2 = self.elem_blk_info(id)
assert len(connectivity) == (numBlkElems * numNodesPerElem)
self.__ex_put_elem_conn(id,connectivity)
# --------------------------------------------------------------------
def get_elem_attr(self,elemBlkID):
"""
elem_attrs = exo.get_elem_attr(elem_blk_id)
-> get attributes of all elements in a block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<list<float>> elem_attrs list of attribute values for all
elements in the block; the list cycles
through all attributes of the first
element, then all attributes of the
second element, etc. Attributes are
ordered by the ordering of the names
returned by
exo.get_element_attribute_names()
"""
attribute = self.__ex_get_elem_attr(elemBlkID)
return attribute
# --------------------------------------------------------------------
def put_elem_attr(self,elemBlkID,Attr):
"""
exo.put_elem_attr(elem_blk_id, elem_attrs)
-> store attributes of all elements in a block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<list<float>> elem_attrs list of attribute values for all
elements in the block; the list
cycles through all attributes of
the first element, then all attributes
of the second element, etc. Attributes
are ordered by the ordering of the
names returned by
exo.get_element_attribute_names()
"""
self.__ex_put_elem_attr(elemBlkID,Attr)
# --------------------------------------------------------------------
def elem_type(self,id):
"""
elem_type = exo.elem_type(elem_blk_id)
-> get the element type, e.g. "HEX8", for an element block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<string> elem_type
"""
(elemType,numElem,nodesPerElem,numAttr) = self.__ex_get_elem_block(id)
return elemType.value
# --------------------------------------------------------------------
def num_attr(self,id):
"""
num_elem_attrs = exo.num_attr(elem_blk_id)
-> get the number of attributes per element for an element block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<int> num_elem_attrs
"""
(elemType,numElem,nodesPerElem,numAttr) = self.__ex_get_elem_block(id)
return numAttr.value
# --------------------------------------------------------------------
def num_elems_in_blk(self,id):
"""
num_blk_elems = exo.num_elems_in_blk(elem_blk_id)
-> get the number of elements in an element block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<int> num_blk_elems
"""
(elemType,numElem,nodesPerElem,numAttr) = self.__ex_get_elem_block(id)
return numElem.value
# --------------------------------------------------------------------
def num_nodes_per_elem(self,id):
"""
num_elem_nodes = exo.num_nodes_per_elem(elem_blk_id)
-> get the number of nodes per element for an element block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<int> num_elem_nodes
"""
(elemType,numElem,nodesPerElem,numAttr) = self.__ex_get_elem_block(id)
return nodesPerElem.value
# --------------------------------------------------------------------
def get_element_variable_truth_table(self,blockId=None):
"""
evar_truth_tab = \\
exo.get_element_variable_truth_table(blockID=elem_blk_id)
-> gets a truth table indicating which variables are defined for
a block; if elem_blk_id is not passed, then a concatenated
truth table for all blocks is returned with variable index
cycling faster than block index
input value(s):
(optional) <int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<list<bool>> evar_truth_tab True for variable defined in block,
False otherwise
"""
truthTable = self.__ex_get_elem_var_tab()
if blockId != None:
self.get_elem_blk_ids()
assert blockId in list(self.elemBlkIds)
indx = list(self.elemBlkIds).index(blockId)
numVars = self.__ex_get_var_param("e").value
start,stop = (indx * numVars, (indx + 1) * numVars)
return truthTable[start:stop]
return truthTable
# --------------------------------------------------------------------
def set_element_variable_truth_table(self,table):
"""
status = \\
exo.set_element_variable_truth_table(evar_truth_tab)
-> stores a truth table indicating which variables are defined for
all blocks and all element variables; variable index cycles
faster than block index
input value(s):
<list<bool>> evar_truth_tab True for variable defined in block,
False otherwise
return value(s):
<bool> status True = successful execution
"""
self.get_elem_blk_ids()
numBlks = len(self.elemBlkIds)
numVars = int(self.__ex_get_var_param("e").value)
assert len(table) == (numBlks * numVars)
return self.__ex_put_elem_var_tab(table)
# --------------------------------------------------------------------
def get_element_variable_values(self,blockId,name,step):
"""
evar_vals = \\
exo.get_element_variable_values(elem_blk_id, \\
evar_name, \\
time_step)
-> get list of element variable values for a specified element
block, element variable name, and time step
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<string> evar_name name of element variable
<int> time_step 1-based index of time step
return value(s):
if array_type == 'ctype':
<list<c_double>> evar_vals
if array_type == 'numpy':
<np_array<double>> evar_vals
"""
names = self.get_element_variable_names()
var_id = names.index(name) + 1
ebType = ex_entity_type("EX_ELEM_BLOCK")
numVals = self.num_elems_in_blk(blockId)
values = self.__ex_get_var(step,ebType,var_id,blockId,numVals)
if self.use_numpy:
values = ctype_to_numpy(self, values)
return values
# --------------------------------------------------------------------
def put_element_variable_values(self,blockId,name,step,values):
"""
status = \\
exo.put_element_variable_values(elem_blk_id, \\
evar_name, \\
time_step, \\
evar_vals)
-> store a list of element variable values for a specified element
block, element variable name, and time step
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<string> evar_name name of element variable
<int> time_step 1-based index of time step
<list<float>> evar_vals
return value(s):
<bool> status True = successful execution
"""
names = self.get_element_variable_names()
var_id = names.index(name) + 1
ebType = ex_entity_type("EX_ELEM_BLOCK")
numVals = self.num_elems_in_blk(blockId)
self.__ex_put_var(step,ebType,var_id,blockId,numVals,values)
return True
# --------------------------------------------------------------------
def get_element_variable_number(self):
"""
num_evars = exo.get_element_variable_number()
-> get the number of element variables in the model
return value(s):
<int> num_evars
"""
ebType = ex_entity_type("EX_ELEM_BLOCK")
num = self.__ex_get_variable_param(ebType)
return num.value
# --------------------------------------------------------------------
def set_element_variable_number(self,number):
"""
status = exo.set_element_variable_number(num_evars)
-> update the number of element variables in the model
input value(s):
<int> num_evars
return value(s):
<bool> status True = successful execution
"""
ebType = ex_entity_type("EX_ELEM_BLOCK")
self.__ex_put_variable_param(ebType,number)
return True
# --------------------------------------------------------------------
def get_element_variable_names(self):
"""
evar_names = exo.get_element_variable_names()
-> get the list of element variable names in the model
return value(s):
<list<string>> evar_names
"""
if self.__ex_get_var_param("e").value == 0:
return []
return self.__ex_get_var_names("e")
# --------------------------------------------------------------------
def put_element_variable_name(self,name,index):
"""
status = exo.put_element_variable_name(evar_name, evar_index)
-> add the name and index of a new element variable to the model;
element variable indexing goes from 1 to
exo.get_element_variable_number()
input value(s):
<string> evar_name name of new element variable
<int> evar_index 1-based index of new element variable
return value(s):
<bool> status True = successful execution
NOTE:
this method is often called within the following sequence:
>>> num_evars = exo.get_element_variable_number()
>>> new_evar_index = num_evars + 1
>>> num_evars += 1
>>> exo.set_element_variable_number(num_evars)
>>> exo.put_element_variable_name("new_evar", new_evar_index)
"""
ebType = ex_entity_type("EX_ELEM_BLOCK")
EBvarNames = self.get_element_variable_names()
if name in EBvarNames:
print("WARNING:element variable \"", name, "\" already exists.")
if index > len(EBvarNames):
print("index", index, "len", len(EBvarNames))
raise Exception("ERROR: variable index out of range.")
self.__ex_put_variable_name(ebType,index,name)
return True
# --------------------------------------------------------------------
def get_element_attribute_names(self,blkId):
"""
attr_names = exo.get_element_attribute_names(elem_blk_id)
-> get the list of element attribute names for a block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
return value(s):
<list<string>> attr_names
"""
names = self.__ex_get_elem_attr_names(blkId)
return list(names)
# --------------------------------------------------------------------
def put_element_attribute_names(self,blkId,names):
"""
status = exo.put_element_attribute_names(elem_blk_id, attr_names)
-> store the list of element attribute names for a block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<list<string>> attr_names
return value(s):
<bool> status True = successful execution
"""
return self.__ex_put_elem_attr_names(blkId,names)
# --------------------------------------------------------------------
def get_element_property_names(self):
"""
eprop_names = exo.get_element_property_names()
-> get the list of element property names for all element blocks
in the model
return value(s):
<list<string>> eprop_names
"""
names = []
ebType = ex_entity_type("EX_ELEM_BLOCK")
inqType = "EX_INQ_EB_PROP"
names = self.__ex_get_prop_names(ebType,inqType)
return list(names)
# --------------------------------------------------------------------
def get_element_property_value(self,id,name):
"""
eprop_val = exo.get_element_property_value(elem_blk_id, eprop_name)
-> get element property value (an integer) for a specified element
block and element property name
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<string> eprop_name
return value(s):
<int> eprop_val
"""
ebType = ex_entity_type("EX_ELEM_BLOCK")
propVal = self.__ex_get_prop(ebType,id,name)
return int(propVal)
# --------------------------------------------------------------------
def put_element_property_value(self,id,name,value):
"""
status = exo.put_element_property_value(elem_blk_id, \\
eprop_name, \\
eprop_val)
-> store an element property name and its integer value for an
element block
input value(s):
<int> elem_blk_id element block *ID* (not *INDEX*)
<string> eprop_name
<int> eprop_val
return value(s):
<bool> status True = successful execution
"""
ebType = ex_entity_type("EX_ELEM_BLOCK")
if self.__ex_put_prop(ebType,id,name,value):
return True
else:
return False
#
# nodesets
#
# --------------------------------------------------------------------
def num_node_sets(self):
"""
num_node_sets = exo.num_node_sets()
-> get the number of node sets in the model
return value(s):
<int> num_node_sets
"""
return self.numNodeSets.value
# --------------------------------------------------------------------
def get_node_set_ids(self):
"""
node_set_ids = exo.get_node_set_ids()
-> get mapping of exodus node set index to user- or application-
defined node set id; node_set_ids is ordered
by the *INDEX* ordering, a 1-based system going from
1 to exo.num_node_sets(), used by exodus for storage
and input/output of array data stored on the node sets; a
user or application can optionally use a separate node set
*ID* numbering system, so the node_set_ids array points to the
node set *ID* for each node set *INDEX*
return value(s):
if array_type == 'ctype':
<list<int>> node_set_ids
if array_type == 'numpy':
<np_array<int>> node_set_ids
"""
self.__ex_get_node_set_ids()
nodeSetIds = list(self.nodeSetIds)
if self.use_numpy:
nodeSetIds = self.np.array(nodeSetIds)
return nodeSetIds
# --------------------------------------------------------------------
def get_node_set_name(self,id):
"""
node_set_name = exo.get_node_set_name(node_set_id)
-> get the name of a node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
return value(s):
<string> node_set_name
"""
objType = ex_entity_type("EX_NODE_SET")
nodeSetName = self.__ex_get_name(objType,id)
return nodeSetName
# --------------------------------------------------------------------
def put_node_set_name(self,id,name):
"""
exo.put_node_set_name(node_set_id, node_set_name)
-> store the name of a node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<string> node_set_name
"""
objType = ex_entity_type("EX_NODE_SET")
self.__ex_put_name(objType,id,name)
# --------------------------------------------------------------------
def get_node_set_names(self):
"""
node_set_names = exo.get_node_set_names()
-> get a list of all node set names ordered by node set *INDEX*;
(see description of get_node_set_ids() for explanation of the
difference between node set *ID* and node set *INDEX*)
return value(s):
<list<string>> node_set_names
"""
objType = ex_entity_type("EX_NODE_SET")
inqType = ex_inquiry("EX_INQ_NODE_SETS")
nodeSetNames = self.__ex_get_names(objType,inqType)
return nodeSetNames
# --------------------------------------------------------------------
def put_node_set_names(self,names):
"""
exo.put_node_set_names(node_set_names)
-> store a list of all node set names ordered by node set *INDEX*;
(see description of get_node_set_ids() for explanation of the
difference between node set *ID* and node set *INDEX*)
input value(s):
<list<string>> node_set_names
"""
objType = ex_entity_type("EX_NODE_SET")
inqType = ex_inquiry("EX_INQ_NODE_SETS")
self.__ex_put_names(objType,inqType,names)
# --------------------------------------------------------------------
def num_nodes_in_node_set(self,id):
"""
num_ns_nodes = exo.num_nodes_in_node_set(node_set_id)
-> get the number of nodes in a node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
return value(s):
<int> num_ns_nodes
"""
node_set_nodes = self.get_node_set_nodes(id)
return len(node_set_nodes)
# --------------------------------------------------------------------
def get_node_set_nodes(self,id):
"""
ns_nodes = exo.get_node_set_nodes(node_set_id)
-> get the list of node *INDICES* in a node set
(see exo.get_node_id_map() for explanation of node *INDEX*
versus node *ID*)
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
return value(s):
if array_type == 'ctype':
<list<int>> ns_nodes
if array_type == 'numpy':
<np_array<int>> ns_nodes
"""
node_set_ids = self.get_node_set_ids()
assert id in node_set_ids
node_set_nodes = self.__ex_get_node_set(id)
node_set_nodes = list(node_set_nodes)
if self.use_numpy:
node_set_nodes = self.np.array(node_set_nodes)
return node_set_nodes
# --------------------------------------------------------------------
def put_node_set(self,id,nodeSetNodes):
"""
exo.put_node_set(node_set_id, ns_nodes)
-> store a node set by its id and the list of node *INDICES* in
the node set (see exo.get_node_id_map() for explanation of node
*INDEX* versus node *ID*)
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<list<int>> ns_nodes
"""
self.__ex_put_node_set(id,nodeSetNodes)
# --------------------------------------------------------------------
def get_node_set_dist_facts(self,id):
"""
ns_dist_facts = exo.get_node_set_dist_facts(node_set_id)
-> get the list of distribution factors for nodes in a node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
return value(s):
if array_type == 'ctype':
<list<float>> ns_dist_facts a list of distribution factors,
e.g. nodal 'weights'
if array_type == 'numpy':
<np_array<double>> ns_dist_facts a list of distribution
factors, e.g. nodal
'weights'
"""
node_set_dfs = self.__ex_get_node_set_dist_fact(id)
node_set_dfs = list(node_set_dfs)
if self.use_numpy:
node_set_dfs = self.np.array(node_set_dfs)
return node_set_dfs
# --------------------------------------------------------------------
def put_node_set_dist_fact(self,id,nodeSetDistFact):
"""
exo.put_node_set_dist_fact(node_set_id, ns_dist_facts)
-> store the list of distribution factors for nodes in a node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<list<float>> ns_dist_facts a list of distribution factors,
e.g. nodal 'weights'
"""
self.__ex_put_node_set_dist_fact(id,nodeSetDistFact)
# --------------------------------------------------------------------
def get_node_set_variable_number(self):
"""
num_nsvars = exo.get_node_set_variable_number()
-> get the number of node set variables in the model
return value(s):
<int> num_nsvars
"""
nsType = ex_entity_type("EX_NODE_SET")
num = self.__ex_get_variable_param(nsType)
return num.value
# --------------------------------------------------------------------
def set_node_set_variable_number(self,number):
"""
status = exo.set_node_set_variable_number(num_nsvars)
-> update the number of node set variables in the model
input value(s):
<int> num_nsvars
return value(s):
<bool> status True = successful execution
"""
nsType = ex_entity_type("EX_NODE_SET")
self.__ex_put_variable_param(nsType,number)
return True
# --------------------------------------------------------------------
def get_node_set_variable_truth_table(self,nodeSetId=None):
"""
nsvar_truth_tab = \\
exo.get_node_set_variable_truth_table(nodeSetID=node_set_id)
-> gets a truth table indicating which variables are defined for
a node set; if node_set_id is not passed, then a concatenated
truth table for all node sets is returned with variable index
cycling faster than node set index
input value(s):
(optional) <int> node_set_id node set *ID* (not *INDEX*)
return value(s):
<list<bool>> nsvar_truth_tab True if variable is defined in
a node set, False otherwise
"""
truthTable = self.__ex_get_nset_var_tab()
if nodeSetId != None:
self.get_node_set_ids()
assert nodeSetId in list(self.nodeSetIds)
indx = list(self.nodeSetIds).index(nodeSetId)
numVars = self.__ex_get_var_param("m").value
start,stop = (indx * numVars, (indx + 1) * numVars)
return truthTable[start:stop]
return truthTable
# --------------------------------------------------------------------
def set_node_set_variable_truth_table(self,table):
"""
status = \\
exo.set_node_set_variable_truth_table(nsvar_truth_tab)
-> stores a truth table indicating which variables are defined for
all node sets and all node set variables; variable index cycles
faster than node set index
input value(s):
<list<bool>> nsvar_truth_tab True if variable is defined in
a node set, False otherwise
return value(s):
<bool> status True = successful execution
"""
self.get_node_set_ids()
numBlks = len(self.nodeSetIds)
numVars = int(self.__ex_get_var_param("m").value)
assert len(table) == (numBlks * numVars)
return self.__ex_put_nset_var_tab(table)
# --------------------------------------------------------------------
def get_node_set_variable_names(self):
"""
nsvar_names = exo.get_node_set_variable_names()
-> get the list of node set variable names in the model
return value(s):
<list<string>> nsvar_names
"""
names = []
nsType = ex_entity_type("EX_NODE_SET")
num_vars = self.__ex_get_variable_param(nsType)
for varid in range(num_vars.value):
varid += 1
name = self.__ex_get_variable_name(nsType,varid)
names.append(name.value)
return names
# --------------------------------------------------------------------
def put_node_set_variable_name(self,name,index):
"""
status = exo.put_node_set_variable_name(nsvar_name, nsvar_index)
-> add the name and index of a new node set variable to the model;
node set variable indexing goes from 1 to
exo.get_node_set_variable_number()
input value(s):
<string> nsvar_name name of new node set variable
<int> nsvar_index 1-based index of new node set variable
return value(s):
<bool> status True = successful execution
NOTE:
this method is often called within the following sequence:
>>> num_nsvars = exo.get_node_set_variable_number()
>>> new_nsvar_index = num_nsvars + 1
>>> num_nsvars += 1
>>> exo.set_node_set_variable_number(num_nsvars)
>>> exo.put_node_set_variable_name("new_nsvar", new_nsvar_index)
"""
nsType = ex_entity_type("EX_NODE_SET")
NSvarNames = self.get_node_set_variable_names()
if name in NSvarNames:
print("WARNING: Node set variable \"", name, "\" already exists.")
if index > len(NSvarNames):
raise Exception("ERROR: variable index out of range.")
self.__ex_put_variable_name(nsType,index,name)
return True
# --------------------------------------------------------------------
def get_node_set_variable_values(self,id,name,step):
"""
nsvar_vals = \\
exo.get_node_set_variable_values(node_set_id, \\
nsvar_name, \\
time_step)
-> get list of node set variable values for a specified node
set, node set variable name, and time step; the list has
one variable value per node in the set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<string> nsvar_name name of node set variable
<int> time_step 1-based index of time step
return value(s):
if array_type == 'ctype':
<list<c_double>> nsvar_vals
if array_type == 'numpy':
<np_array<double>> nsvar_vals
"""
names = self.get_node_set_variable_names()
var_id = names.index(name) + 1
values = self.__ex_get_nset_var(step,var_id,id)
if self.use_numpy:
values = ctypes_to_numpy(self, values)
return values
# --------------------------------------------------------------------
def put_node_set_variable_values(self,id,name,step,values):
"""
status = \\
exo.put_node_set_variable_values(node_set_id, \\
nsvar_name, \\
time_step, \\
nsvar_vals)
-> store a list of node set variable values for a specified node
set, node set variable name, and time step; the list has one
variable value per node in the set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<string> nsvar_name name of node set variable
<int> time_step 1-based index of time step
<list<float>> nsvar_vals
return value(s):
<bool> status True = successful execution
"""
names = self.get_node_set_variable_names()
var_id = names.index(name) + 1
self.__ex_put_nset_var(step,var_id,id,values)
return True
# --------------------------------------------------------------------
def get_all_node_set_params(self):
"""
tot_num_ns_nodes, \\
tot_num_ns_dist_facts = exo.get_all_node_set_params()
-> get total number of nodes and distribution factors (e.g. nodal
'weights') combined among all node sets
return value(s):
<int> tot_num_ns_nodes
<int> tot_num_ns_dist_facts
"""
self.__ex_get_node_set_ids()
totNumSetNodes, totNumSetDistFacts = 0, 0
for nodeSetId in self.nodeSetIds:
(numSetNodes,numSetDistFacts) = self.__ex_get_node_set_param(int(nodeSetId))
totNumSetNodes += numSetNodes
totNumSetDistFacts += numSetDistFacts
return (totNumSetNodes, totNumSetDistFacts)
# --------------------------------------------------------------------
def get_node_set_params(self,id):
"""
num_ns_nodes, num_ns_dist_facts = \\
exo.get_node_set_params(node_set_id)
-> get number of nodes and distribution factors (e.g. nodal
'weights') in a node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
return value(s):
<int> num_ns_nodes
<int> num_ns_dist_facts
"""
(numSetNodes,numSetDistFacts) = self.__ex_get_node_set_param(int(id))
return (numSetNodes, numSetDistFacts)
# --------------------------------------------------------------------
def put_node_set_params(self,id,numSetNodes,numSetDistFacts=None):
"""
exo.put_node_set_params(node_set_id, \\
num_ns_nodes, \\
num_ns_dist_facts)
-> initialize a new node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<int> num_ns_nodes number of nodes to be added to set
<int> num_ns_dist_facts (optional) number of distribution
factors (e.g. nodal 'weights') --
must be equal to zero or
num_ns_nodes
"""
if numSetDistFacts == None: numSetDistFacts = numSetNodes
assert numSetDistFacts == 0 or numSetDistFacts == numSetNodes
self.__ex_put_node_set_param(id,numSetNodes,numSetDistFacts)
# --------------------------------------------------------------------
def get_node_set_property_names(self):
"""
nsprop_names = exo.get_node_set_property_names()
-> get the list of node set property names for all node sets in
the model
return value(s):
<list<string>> nsprop_names
"""
names = []
nsType = ex_entity_type("EX_NODE_SET")
inqType = "EX_INQ_NS_PROP"
names = self.__ex_get_prop_names(nsType,inqType)
return list(names)
# --------------------------------------------------------------------
def get_node_set_property_value(self,id,name):
"""
nsprop_val = \\
exo.get_node_set_property_value(node_set_id, nsprop_name)
-> get node set property value (an integer) for a specified node
set and node set property name
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<string> nsprop_name
return value(s):
<int> nsprop_val
"""
nsType = ex_entity_type("EX_NODE_SET")
propVal = self.__ex_get_prop(nsType,id,name)
return int(propVal)
# --------------------------------------------------------------------
def put_node_set_property_value(self,id,name,value):
"""
status = exo.put_node_set_property_value(node_set_id, \\
nsprop_name, \\
nsprop_val)
-> store a node set property name and its integer value for a
node set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<string> nsprop_name
<int> nsprop_val
return value(s):
<bool> status True = successful execution
"""
nsType = ex_entity_type("EX_NODE_SET")
if self.__ex_put_prop(nsType,id,name,value):
return True
else:
return False
#
# sidesets
#
# --------------------------------------------------------------------
def num_side_sets(self):
"""
num_side_sets = exo.num_side_sets()
-> get the number of side sets in the model
return value(s):
<int> num_side_sets
"""
return self.numSideSets.value
# --------------------------------------------------------------------
def get_side_set_ids(self):
"""
side_set_ids = exo.get_side_set_ids()
-> get mapping of exodus side set index to user- or application-
defined side set id; side_set_ids is ordered
by the *INDEX* ordering, a 1-based system going from
1 to exo.num_side_sets(), used by exodus for storage
and input/output of array data stored on the side sets; a
user or application can optionally use a separate side set
*ID* numbering system, so the side_set_ids array points to the
side set *ID* for each side set *INDEX*
return value(s):
if array_type == 'ctype':
<list<int>> side_set_ids
if array_type == 'numpy':
<np_array<int>> side_set_ids
"""
self.__ex_get_side_set_ids()
sideSetIds = list(self.sideSetIds)
if self.use_numpy:
sideSetIds = self.np.array(sideSetIds)
return sideSetIds
# --------------------------------------------------------------------
def get_side_set_name(self,id):
"""
side_set_name = exo.get_side_set_name(side_set_id)
-> get the name of a side set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
return value(s):
<string> side_set_name
"""
objType = ex_entity_type("EX_SIDE_SET")
sideSetName = self.__ex_get_name(objType,id)
return sideSetName
# --------------------------------------------------------------------
def put_side_set_name(self,id,name):
"""
exo.put_side_set_name(side_set_id, side_set_name)
-> store the name of a side set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
<string> side_set_name
"""
objType = ex_entity_type("EX_SIDE_SET")
self.__ex_put_name(objType,id,name)
# --------------------------------------------------------------------
def get_side_set_names(self):
"""
side_set_names = exo.get_side_set_names()
-> get a list of all side set names ordered by side set *INDEX*;
(see description of get_side_set_ids() for explanation of the
difference between side set *ID* and side set *INDEX*)
return value(s):
<list<string>> side_set_names
"""
objType = ex_entity_type("EX_SIDE_SET")
inqType = ex_inquiry("EX_INQ_SIDE_SETS")
sideSetNames = self.__ex_get_names(objType,inqType)
return sideSetNames
# --------------------------------------------------------------------
def put_side_set_names(self,names):
"""
exo.put_side_set_names(side_set_names)
-> store a list of all side set names ordered by side set *INDEX*;
(see description of get_side_set_ids() for explanation of the
difference between side set *ID* and side set *INDEX*)
input value(s):
<list<string>> side_set_names
"""
objType = ex_entity_type("EX_SIDE_SET")
inqType = ex_inquiry("EX_INQ_SIDE_SETS")
self.__ex_put_names(objType,inqType,names)
# --------------------------------------------------------------------
def num_faces_in_side_set(self,id):
"""
num_ss_faces = exo.num_faces_in_side_set(side_set_id)
-> get the number of faces in a side set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
return value(s):
<int> num_ss_faces
"""
ssids = self.get_side_set_ids()
if ( id not in ssids ):
print("WARNING: queried side set ID does not exist in database")
return 0
(num_side_in_set,num_dist_fact_in_set) = self.__ex_get_side_set_param(id)
return num_side_in_set
# --------------------------------------------------------------------
def get_all_side_set_params(self):
"""
tot_num_ss_sides, \\
tot_num_ss_nodes, \\
tot_num_ss_dist_facts = exo.get_all_side_set_params()
-> get total number of sides, nodes, and distribution factors
(e.g. nodal 'weights') combined among all side sets
return value(s):
<int> tot_num_ss_sides
<int> tot_num_ss_nodes
<int> tot_num_ss_dist_facts
NOTE:
The number of nodes (and distribution factors) in a side set is
the sum of all face nodes. A single node can be counted more
than once, i.e. once for each face it belongs to in the side set.
"""
self.__ex_get_side_set_ids()
totNumSetSides, totNumSetDistFacts = 0, 0 # totNumSetDistFacts = totNumSetNodes
for sideSetId in self.sideSetIds:
(numSetSides,numSetDistFacts) = self.__ex_get_side_set_param(int(sideSetId))
totNumSetSides += numSetSides
totNumSetDistFacts += numSetDistFacts
totNumSetNodes = totNumSetDistFacts
return (totNumSetSides, totNumSetNodes, totNumSetDistFacts)
# --------------------------------------------------------------------
def get_side_set_params(self,id):
"""
num_ss_sides, num_ss_dist_facts = \\
exo.get_side_set_params(side_set_id)
-> get number of sides and nodal distribution factors (e.g. nodal
'weights') in a side set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
return value(s):
<int> num_ss_sides
<int> num_ss_dist_facts
NOTE:
The number of nodes (and distribution factors) in a side set is
the sum of all face nodes. A single node can be counted more
than once, i.e. once for each face it belongs to in the side set.
"""
(numSetSides,numSetDistFacts) = self.__ex_get_side_set_param(int(id))
return (numSetSides, numSetDistFacts)
# --------------------------------------------------------------------
def put_side_set_params(self,id,numSetSides,numSetDistFacts):
"""
exo.put_side_set_params(side_set_id, \\
num_ss_sides, \\
num_ss_dist_facts)
-> initialize a new side set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
<int> num_ss_sides number of sides to be added to set
<int> num_ss_dist_facts number of nodal distribution factors
(e.g. nodal 'weights')
NOTE:
The number of nodes (and distribution factors) in a side set is
the sum of all face nodes. A single node can be counted more
than once, i.e. once for each face it belongs to in the side set.
"""
self.__ex_put_side_set_param(id,numSetSides,numSetDistFacts)
# --------------------------------------------------------------------
def get_side_set(self,id):
"""
ss_elems, ss_sides = exo.get_side_set(side_set_id)
-> get the lists of element and side indices in a side set; the
two lists correspond: together, ss_elems[i] and ss_sides[i]
define the face of an element
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
return value(s):
if array_type == 'ctype':
<list<int>> ss_elems
<list<int>> ss_sides
if array_type == 'numpy':
<np_array<int>> ss_elems
<np_array<int>> ss_sides
"""
(side_set_elem_list,side_set_side_list) = self.__ex_get_side_set(id)
if self.use_numpy:
side_set_elem_list = ctype_to_numpy(self, side_set_elem_list)
side_set_side_list = ctype_to_numpy(self, side_set_side_list)
return (side_set_elem_list,side_set_side_list)
# --------------------------------------------------------------------
def put_side_set(self,id,sideSetElements,sideSetSides):
"""
exo.put_side_set(side_set_id, ss_elems, ss_sides)
-> store a side set by its id and the lists of element and side
indices in the side set; the two lists correspond: together,
ss_elems[i] and ss_sides[i] define the face of an element
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
<list<int>> ss_elems
<list<int>> ss_sides
"""
self.__ex_put_side_set(id,sideSetElements,sideSetSides)
# --------------------------------------------------------------------
def get_side_set_dist_fact(self,id):
"""
ss_dist_facts = exo.get_side_set_dist_fact(side_set_id)
-> get the list of distribution factors for nodes in a side set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
return value(s):
if array_type == 'ctype':
<list<float>> ss_dist_facts a list of distribution factors,
e.g. nodal 'weights'
if array_type == 'numpy':
<np_array<double>> ss_dist_facts a list of distribution
factors, e.g. nodal
'weights'
NOTE:
The number of nodes (and distribution factors) in a side set is
the sum of all face nodes. A single node can be counted more
than once, i.e. once for each face it belongs to in the side set.
"""
side_set_dfs = list(self.__ex_get_side_set_dist_fact(id))
if self.use_numpy:
side_set_dfs = self.np.array(side_set_dfs)
return side_set_dfs
# --------------------------------------------------------------------
def put_side_set_dist_fact(self,id,sideSetDistFact):
"""
exo.put_side_set_dist_fact(side_set_id, ss_dist_facts)
-> store the list of distribution factors for nodes in a side set
input value(s):
<int> node_set_id node set *ID* (not *INDEX*)
<list<float>> ns_dist_facts a list of distribution factors,
e.g. nodal 'weights'
NOTE:
The number of nodes (and distribution factors) in a side set is
the sum of all face nodes. A single node can be counted more
than once, i.e. once for each face it belongs to in the side set.
"""
self.__ex_put_side_set_dist_fact(id,sideSetDistFact)
# --------------------------------------------------------------------
def get_side_set_node_list(self,id):
"""
ss_num_nodes_per_side, \\
ss_nodes = exo.get_side_set_node_list(side_set_id)
-> get two lists:
1. number of nodes for each side in the set
2. concatenation of the nodes for each side in the set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
return value(s):
if array_type == 'ctype':
<list<int>> ss_num_side_nodes
<list<int>> ss_nodes
if array_type == 'numpy':
<np_array<int>> ss_num_side_nodes
<np_array<int>> ss_nodes
NOTE:
The number of nodes (and distribution factors) in a side set is
the sum of the entries in ss_num_nodes_per_side. A single node
can be counted more than once, i.e. once for each face it
belongs to in the side set.
"""
(side_set_node_cnt_list,side_set_node_list) = self.__ex_get_side_set_node_list(id)
if self.use_numpy:
side_set_node_cnt_list = ctype_to_numpy(self, side_set_node_cnt_list)
side_set_node_list = ctype_to_numpy(self, side_set_node_list)
return (side_set_node_cnt_list,side_set_node_list)
# --------------------------------------------------------------------
def get_side_set_variable_truth_table(self,sideSetId=None):
"""
ssvar_truth_tab = \\
exo.get_side_set_variable_truth_table(sideSetID=side_set_id)
-> gets a truth table indicating which variables are defined for
a side set; if side_set_id is not passed, then a concatenated
truth table for all side sets is returned with variable index
cycling faster than side set index
input value(s):
(optional) <int> side_set_id side set *ID* (not *INDEX*)
return value(s):
<list<bool>> ssvar_truth_tab True if variable is defined in
a side set, False otherwise
"""
truthTable = self.__ex_get_sset_var_tab()
if sideSetId != None:
self.get_side_set_ids()
assert sideSetId in list(self.sideSetIds)
indx = list(self.sideSetIds).index(sideSetId)
numVars = self.__ex_get_var_param("s").value
start,stop = (indx * numVars, (indx + 1) * numVars)
return truthTable[start:stop]
return truthTable
# --------------------------------------------------------------------
def set_side_set_variable_truth_table(self,table):
"""
status = \\
exo.set_side_set_variable_truth_table(ssvar_truth_tab)
-> stores a truth table indicating which variables are defined for
all side sets and all side set variables; variable index cycles
faster than side set index
input value(s):
<list<bool>> ssvar_truth_tab True if variable is defined in
a side set, False otherwise
return value(s):
<bool> status True = successful execution
"""
self.get_side_set_ids()
numBlks = len(self.sideSetIds)
numVars = int(self.__ex_get_var_param("s").value)
assert len(table) == (numBlks * numVars)
return self.__ex_put_sset_var_tab(table)
# --------------------------------------------------------------------
def get_side_set_variable_number(self):
"""
num_ssvars = exo.get_side_set_variable_number()
-> get the number of side set variables in the model
return value(s):
<int> num_ssvars
"""
ssType = ex_entity_type("EX_SIDE_SET")
num = self.__ex_get_variable_param(ssType)
return num.value
# --------------------------------------------------------------------
def set_side_set_variable_number(self,number):
"""
status = exo.set_side_set_variable_number(num_ssvars)
-> update the number of side set variables in the model
input value(s):
<int> num_ssvars
return value(s):
<bool> status True = successful execution
"""
ssType = ex_entity_type("EX_SIDE_SET")
self.__ex_put_variable_param(ssType,number)
return True
# --------------------------------------------------------------------
def get_side_set_variable_names(self):
"""
ssvar_names = exo.get_side_set_variable_names()
-> get the list of side set variable names in the model
return value(s):
<list<string>> ssvar_names
"""
names = []
ssType = ex_entity_type("EX_SIDE_SET")
num_vars = self.__ex_get_variable_param(ssType)
for varid in range(num_vars.value):
varid += 1
name = self.__ex_get_variable_name(ssType,varid)
names.append(name.value)
return names
# --------------------------------------------------------------------
def put_side_set_variable_name(self,name,index):
"""
status = exo.put_side_set_variable_name(ssvar_name, ssvar_index)
-> add the name and index of a new side set variable to the model;
side set variable indexing goes from 1 to
exo.get_side_set_variable_number()
input value(s):
<string> ssvar_name name of new side set variable
<int> ssvar_index 1-based index of new side set variable
return value(s):
<bool> status True = successful execution
NOTE:
this method is often called within the following sequence:
>>> num_ssvars = exo.get_side_set_variable_number()
>>> new_ssvar_index = num_ssvars + 1
>>> num_ssvars += 1
>>> exo.set_side_set_variable_number(num_ssvars)
>>> exo.put_side_set_variable_name("new_ssvar", new_ssvar_index)
"""
ssType = ex_entity_type("EX_SIDE_SET")
SSvarNames = self.get_side_set_variable_names()
if name in SSvarNames:
print("WARNING:Side set variable \"", name, "\" already exists.")
if index > len(SSvarNames):
raise Exception("ERROR: variable index out of range.")
self.__ex_put_variable_name(ssType,index,name)
return True
# --------------------------------------------------------------------
def get_side_set_variable_values(self,id,name,step):
"""
ssvar_vals = \\
exo.get_side_set_variable_values(side_set_id, \\
ssvar_name, \\
time_step)
-> get list of side set variable values for a specified side
set, side set variable name, and time step; the list has
one variable value per side in the set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
<string> ssvar_name name of side set variable
<int> time_step 1-based index of time step
return value(s):
if array_type == 'ctype':
<list<c_double>> ssvar_vals
if array_type == 'numpy':
<np_array<double>> ssvar_vals
"""
names = self.get_side_set_variable_names()
var_id = names.index(name) + 1
values = self.__ex_get_sset_var(step,var_id,id)
if self.use_numpy:
values = ctype_to_numpy(self,values)
return values
# --------------------------------------------------------------------
def put_side_set_variable_values(self,id,name,step,values):
"""
status = \\
exo.put_side_set_variable_values(side_set_id, \\
ssvar_name, \\
time_step, \\
ssvar_vals)
-> store a list of side set variable values for a specified side
set, side set variable name, and time step; the list has one
variable value per side in the set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
<string> ssvar_name name of side set variable
<int> time_step 1-based index of time step
<list<float>> ssvar_vals
return value(s):
<bool> status True = successful execution
"""
names = self.get_side_set_variable_names()
var_id = names.index(name) + 1
self.__ex_put_sset_var(step,var_id,id,values)
return True
# --------------------------------------------------------------------
def get_side_set_property_names(self):
"""
ssprop_names = exo.get_side_set_property_names()
-> get the list of side set property names for all side sets in
the model
return value(s):
<list<string>> ssprop_names
"""
names = []
ssType = ex_entity_type("EX_SIDE_SET")
inqType = "EX_INQ_SS_PROP"
names = self.__ex_get_prop_names(ssType,inqType)
return list(names)
# --------------------------------------------------------------------
def get_side_set_property_value(self,id,name):
"""
ssprop_val = \\
exo.get_side_set_property_value(side_set_id, ssprop_name)
-> get side set property value (an integer) for a specified side
set and side set property name
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
<string> ssprop_name
return value(s):
<int> ssprop_val
"""
ssType = ex_entity_type("EX_SIDE_SET")
propVal = self.__ex_get_prop(ssType,id,name)
return int(propVal)
# --------------------------------------------------------------------
def put_side_set_property_value(self,id,name,value):
"""
status = exo.put_side_set_property_value(side_set_id, \\
ssprop_name, \\
ssprop_val)
-> store a side set property name and its integer value for a
side set
input value(s):
<int> side_set_id side set *ID* (not *INDEX*)
<string> ssprop_name
<int> ssprop_val
return value(s):
<bool> status True = successful execution
"""
ssType = ex_entity_type("EX_SIDE_SET")
if self.__ex_put_prop(ssType,id,name,value):
return True
else:
return False
#
# global variables
#
# --------------------------------------------------------------------
def get_global_variable_number(self):
"""
num_gvars = exo.get_global_variable_number()
-> get the number of global variables in the model
return value(s):
<int> num_gvars
"""
gbType = ex_entity_type("EX_GLOBAL")
num = self.__ex_get_variable_param(gbType)
return num.value
# --------------------------------------------------------------------
def set_global_variable_number(self,number):
"""
status = exo.set_global_variable_number(num_gvars)
-> update the number of global variables in the model
input value(s):
<int> num_gvars
return value(s):
<bool> status True = successful execution
"""
gbType = ex_entity_type("EX_GLOBAL")
self.__ex_put_variable_param(gbType,number)
return True
# --------------------------------------------------------------------
def get_global_variable_names(self):
"""
gvar_names = exo.get_global_variable_names()
-> get the list of global variable names in the model
return value(s):
<list<string>> gvar_names
"""
if self.get_global_variable_number() == 0:
return []
return self.__ex_get_var_names("g")
# --------------------------------------------------------------------
def put_global_variable_name(self,name,index):
"""
status = exo.put_global_variable_name(gvar_name, gvar_index)
-> add the name and index of a new global variable to the model;
global variable indexing goes from 1 to
exo.get_global_variable_number()
input value(s):
<string> gvar_name name of new global variable
<int> gvar_index 1-based index of new global variable
return value(s):
<bool> status True = successful execution
NOTE:
this method is often called within the following sequence:
>>> num_gvars = exo.get_global_variable_number()
>>> new_gvar_index = num_gvars + 1
>>> num_gvars += 1
>>> exo.set_global_variable_number(num_gvars)
>>> exo.put_global_variable_name("new_gvar", new_gvar_index)
"""
gbType = ex_entity_type("EX_GLOBAL")
GlobVarNames = self.get_global_variable_names()
if name in GlobVarNames:
print("WARNING: global variable \"", name, "\" already exists.")
if index > len(GlobVarNames):
print("index", index, "len", len(GlobVarNames))
raise Exception("ERROR: variable index out of range.")
self.__ex_put_variable_name(gbType,index,name)
return True
# --------------------------------------------------------------------
def get_global_variable_value(self,name,step):
"""
gvar_val = exo.get_global_variable_value(gvar_name, time_step)
-> get a global variable value for a specified global variable
name and time step
input value(s):
<string> gvar_name name of global variable
<int> time_step 1-based index of time step
return value(s):
<float> gvar_val
"""
names = self.get_global_variable_names()
var_id = names.index(name)
gbType = ex_entity_type("EX_GLOBAL")
num = self.__ex_get_variable_param(gbType)
gvalues = self.__ex_get_var(step,gbType,0,1,num.value)
return gvalues[var_id]
# --------------------------------------------------------------------
def get_all_global_variable_values(self,step):
"""
gvar_vals = exo.get_all_global_variable_values(time_step)
-> get all global variable values (one for each global variable
name, and in the order given by exo.get_global_variable_names())
at a specified time step
input value(s):
<int> time_step 1-based index of time step
return value(s):
if array_type == 'ctype':
<list<float>> gvar_vals
if array_type == 'numpy':
<np_array<double>> gvar_vals
"""
gbType = ex_entity_type("EX_GLOBAL")
num = self.__ex_get_variable_param(gbType)
gvalues = self.__ex_get_var(step,gbType,0,1,num.value)
values = []
for i in xrange(num.value):
values.append(gvalues[i])
if self.use_numpy:
values = self.np.array(values)
return values
# --------------------------------------------------------------------
def put_global_variable_value(self,name,step,value):
"""
status = exo.put_global_variable_value(gvar_name, \\
time_step, \\
gvar_val)
-> store a global variable value for a specified global variable
name and time step
input value(s):
<string> gvar_name name of global variable
<int> time_step 1-based index of time step
<float> gvar_val
return value(s):
<bool> status True = successful execution
"""
# we must write all values at once, not individually
names = self.get_global_variable_names()
# get all values
numVals = self.get_global_variable_number()
values = (c_double * numVals)()
for i in xrange(numVals):
values[i] = c_double(self.get_global_variable_value(names[i], step))
# adjust one of them
values[names.index(name)] = c_double(value)
# write them all
EXODUS_LIB.ex_put_glob_vars(self.fileId,
c_int(step),
c_int(numVals),
values)
return True
# --------------------------------------------------------------------
def put_all_global_variable_values(self,step,values):
"""
status = exo.put_all_global_variable_values(time_step, gvar_vals)
-> store all global variable values (one for each global variable
name, and in the order given by exo.get_global_variable_names())
at a specified time step
input value(s):
<int> time_step 1-based index of time step
<list<float>> gvar_vals
return value(s):
<bool> status True = successful execution
"""
numVals = self.get_global_variable_number()
gvalues = (c_double * numVals)()
for i in xrange(numVals):
gvalues[i] = c_double(values[i])
EXODUS_LIB.ex_put_glob_vars(self.fileId,
c_int(step),
c_int(numVals),
gvalues)
return True
# --------------------------------------------------------------------
def get_global_variable_values(self,name):
"""
gvar_vals = exo.get_global_variable_values(gvar_name)
-> get global variable values over all time steps for one global
variable name
input value(s):
<string> gvar_name name of global variable
return value(s):
if array_type == 'ctype':
<list<float>> gvar_vals
if array_type == 'numpy':
<np_array<double>> gvar_vals
"""
names = self.get_global_variable_names()
var_id = names.index(name)
gbType = ex_entity_type("EX_GLOBAL")
num = self.__ex_get_variable_param(gbType)
values = []
for i in range(self.numTimes.value):
gvalues = self.__ex_get_var(i+1,gbType,0,1,num.value)
values.append( gvalues[var_id] )
if self.use_numpy:
values = self.np.array(values)
return values
# --------------------------------------------------------------------
def close(self):
"""
exo.close()
-> close the exodus file
NOTE:
Can only be called once for an exodus object, and once called
all methods for that object become inoperable
"""
print("Closing exodus file: " + self.fileName)
errorInt = EXODUS_LIB.ex_close(self.fileId)
if errorInt != 0:
raise Exception("ERROR: Closing file " + self.fileName + " had problems.")
# --------------------------------------------------------------------
#
# Private Exodus API calls
#
# --------------------------------------------------------------------
def __open(self, io_size=0):
print("Opening exodus file: " + self.fileName)
self.mode = EX_READ
if self.modeChar.lower() == "a": self.mode = EX_WRITE
if self.modeChar.lower() in ["a","r"] and not os.path.isfile(self.fileName):
raise Exception("ERROR: Cannot open " + self.fileName + " for read. Does not exist.")
elif self.modeChar.lower() == "w" and os.path.isfile(self.fileName):
raise Exception("ERROR: Cowardly not opening " + self.fileName + \
" for write. File already exists.")
elif self.modeChar.lower() not in ["a","r","w"]:
raise Exception("ERROR: File open mode " + self.modeChar + " unrecognized.")
self.comp_ws = c_int(8)
self.io_ws = c_int(io_size)
self.version = c_float(0.0)
if self.modeChar.lower() in ["a","r"]: # open existing file
self.fileId = EXODUS_LIB.ex_open_int(self.fileName,self.mode,
byref(self.comp_ws),
byref(self.io_ws),
byref(self.version),
EX_API_VERSION_NODOT)
else: # create file
if io_size == 0:
io_size = 8
self.io_ws = c_int(io_size)
self.__create()
# --------------------------------------------------------------------
def __create(self):
cMode = c_int(EX_NOCLOBBER)
self.fileId = EXODUS_LIB.ex_create_int(self.fileName,cMode,
byref(self.comp_ws),
byref(self.io_ws),
EX_API_VERSION_NODOT)
# --------------------------------------------------------------------
def __copy_file(self,other):
EXODUS_LIB.ex_copy(self.fileId,other.fileId)
# --------------------------------------------------------------------
def __ex_get_info(self):
self.Title = create_string_buffer(MAX_LINE_LENGTH+1)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
self.numDim = c_longlong(0)
self.numNodes = c_longlong(0)
self.numElem = c_longlong(0)
self.numElemBlk = c_longlong(0)
self.numNodeSets = c_longlong(0)
self.numSideSets = c_longlong(0)
else:
self.numDim = c_int(0)
self.numNodes = c_int(0)
self.numElem = c_int(0)
self.numElemBlk = c_int(0)
self.numNodeSets = c_int(0)
self.numSideSets = c_int(0)
EXODUS_LIB.ex_get_init(self.fileId,self.Title,byref(self.numDim),byref(self.numNodes),
byref(self.numElem),byref(self.numElemBlk),byref(self.numNodeSets),
byref(self.numSideSets))
# --------------------------------------------------------------------
def __ex_put_info(self,info):
self.Title = create_string_buffer(info[0],MAX_LINE_LENGTH+1)
self.numDim = c_longlong(info[1])
self.numNodes = c_longlong(info[2])
self.numElem = c_longlong(info[3])
self.numElemBlk = c_longlong(info[4])
self.numNodeSets = c_longlong(info[5])
self.numSideSets = c_longlong(info[6])
EXODUS_LIB.ex_put_init(self.fileId,self.Title,self.numDim,self.numNodes,self.numElem,
self.numElemBlk,self.numNodeSets,self.numSideSets)
self.version = self.__ex_inquire_float(ex_inquiry("EX_INQ_DB_VERS"))
# --------------------------------------------------------------------
def __ex_put_concat_elem_blk(self,elemBlkIDs, elemType, numElemThisBlk,\
numNodesPerElem,numAttr,defineMaps):
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_IDS_INT64_API):
elem_blk_ids = (c_longlong * len(elemBlkIDs))()
elem_blk_ids[:] = elemBlkIDs
num_elem_this_blk = (c_longlong * len(elemBlkIDs))()
num_elem_this_blk[:] = numElemThisBlk
num_nodes_per_elem = (c_longlong * len(elemBlkIDs))()
num_nodes_per_elem[:] = numNodesPerElem
num_attr = (c_longlong * len(elemBlkIDs))()
num_attr[:] = numAttr
else:
elem_blk_ids = (c_int * len(elemBlkIDs))()
elem_blk_ids[:] = elemBlkIDs
num_elem_this_blk = (c_int * len(elemBlkIDs))()
num_elem_this_blk[:] = numElemThisBlk
num_nodes_per_elem = (c_int * len(elemBlkIDs))()
num_nodes_per_elem[:] = numNodesPerElem
num_attr = (c_int * len(elemBlkIDs))()
num_attr[:] = numAttr
elem_type = (c_char_p * len(elemBlkIDs))()
elem_type[:] = elemType
define_maps = c_int(defineMaps)
EXODUS_LIB.ex_put_concat_elem_block(self.fileId,elem_blk_ids,elem_type, \
num_elem_this_blk,num_nodes_per_elem,num_attr,define_maps)
# --------------------------------------------------------------------
def __ex_get_qa(self):
num_qa_recs = c_int(self.__ex_inquire_int(ex_inquiry("EX_INQ_QA")))
qa_rec_ptrs = ((POINTER(c_char * (MAX_STR_LENGTH+1)) * 4) * num_qa_recs.value)()
for i in range(num_qa_recs.value):
for j in range(4):
qa_rec_ptrs[i][j] = pointer(create_string_buffer(MAX_STR_LENGTH+1))
if num_qa_recs.value:
EXODUS_LIB.ex_get_qa(self.fileId,byref(qa_rec_ptrs))
qa_recs = []
for qara in qa_rec_ptrs:
qa_rec_list = []
for ptr in qara:
qa_rec_list.append(ptr.contents.value)
qa_rec_tuple = tuple(qa_rec_list)
assert len(qa_rec_tuple) == 4
qa_recs.append(qa_rec_tuple)
return qa_recs
# --------------------------------------------------------------------
def __ex_put_qa(self,qaRecs):
num_qa_recs = c_int(len(qaRecs))
qa_rec_ptrs = ((POINTER(c_char * (MAX_STR_LENGTH+1)) * 4) * num_qa_recs.value)()
for i in range(num_qa_recs.value):
for j in range(4):
qa_rec_ptrs[i][j] = pointer(create_string_buffer(str(qaRecs[i][j]),MAX_STR_LENGTH+1))
EXODUS_LIB.ex_put_qa(self.fileId,num_qa_recs,byref(qa_rec_ptrs))
return True
# --------------------------------------------------------------------
def _ex_get_info_recs_quietly(self):
num_infos = c_int(self.__ex_inquire_int(ex_inquiry("EX_INQ_INFO")))
info_ptrs = (POINTER(c_char * (MAX_LINE_LENGTH+1)) * num_infos.value)()
for i in range(num_infos.value):
info_ptrs[i] = pointer(create_string_buffer(MAX_LINE_LENGTH+1))
if num_infos.value:
EXODUS_LIB.ex_get_info(self.fileId,byref(info_ptrs))
info_recs = []
for irp in info_ptrs:
info_recs.append(irp.contents.value)
return info_recs
# --------------------------------------------------------------------
def __ex_get_info_recs(self):
num_infos = c_int(self.__ex_inquire_int(ex_inquiry("EX_INQ_INFO")))
info_ptrs = (POINTER(c_char * (MAX_LINE_LENGTH+1)) * num_infos.value)()
for i in range(num_infos.value):
info_ptrs[i] = pointer(create_string_buffer(MAX_LINE_LENGTH+1))
EXODUS_LIB.ex_get_info(self.fileId,byref(info_ptrs))
info_recs = []
for irp in info_ptrs:
info_recs.append(irp.contents.value)
for rec in info_recs:
if len(rec) > MAX_LINE_LENGTH:
print("WARNING: max line length reached for one or more info records;")
print(" info might be incomplete for these records")
break
return info_recs
# --------------------------------------------------------------------
def __ex_put_info_recs(self,infoRecs):
num_infos = c_int(len(infoRecs))
info_ptrs = (POINTER(c_char * (MAX_LINE_LENGTH+1)) * num_infos.value)()
for i in range(num_infos.value):
info_ptrs[i] = pointer(create_string_buffer(str(infoRecs[i]),MAX_LINE_LENGTH+1))
EXODUS_LIB.ex_put_info(self.fileId,num_infos,byref(info_ptrs))
return True
# --------------------------------------------------------------------
def __ex_inquire_float(self,id):
val = c_int(0)
dummy_char = create_string_buffer(MAX_LINE_LENGTH+1)
ret_float = c_float(0.0)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_INQ_INT64_API):
dummy_int = c_longlong(0)
else:
dummy_int = c_int(0)
val = EXODUS_LIB.ex_inquire(self.fileId,id,byref(dummy_int),byref(ret_float),dummy_char)
if val < 0:
raise Exception("ERROR: ex_inquire(" + str(id) + ") failed on " + self.fileName)
return ret_float
# --------------------------------------------------------------------
def __ex_inquire_int(self,id):
val = c_longlong(0)
val = EXODUS_LIB.ex_inquire_int(self.fileId,id)
if val < 0:
raise Exception("ERROR: ex_inquire_int(" + str(id) + ") failed on " + self.fileName)
return val
# --------------------------------------------------------------------
def __ex_get_coord_names(self):
coord_name_ptrs = (POINTER(c_char * (MAX_STR_LENGTH+1)) * self.numDim.value)()
for i in range(self.numDim.value):
coord_name_ptrs[i] = pointer(create_string_buffer(MAX_STR_LENGTH+1))
EXODUS_LIB.ex_get_coord_names(self.fileId,byref(coord_name_ptrs))
coord_names = []
for cnp in coord_name_ptrs:
coord_names.append(cnp.contents.value)
return coord_names
# --------------------------------------------------------------------
def __ex_put_coord_names(self,names):
coord_name_ptrs = (POINTER(c_char * (MAX_STR_LENGTH+1)) * self.numDim.value)()
assert len(names) == self.numDim.value
for i in range(self.numDim.value):
coord_name_ptrs[i] = pointer(create_string_buffer(names[i],MAX_STR_LENGTH+1))
EXODUS_LIB.ex_put_coord_names(self.fileId,byref(coord_name_ptrs))
# --------------------------------------------------------------------
def __ex_get_all_times(self):
self.times = (c_double * self.numTimes.value)()
EXODUS_LIB.ex_get_all_times(self.fileId,byref(self.times))
# --------------------------------------------------------------------
def __ex_get_time(self,timeStep):
time_step = c_int(timeStep)
time_val = c_double(0.0)
EXODUS_LIB.ex_get_time(self.fileId,time_step,byref(time_val))
return time_val.value()
# --------------------------------------------------------------------
def __ex_put_time(self,timeStep,timeVal):
time_step = c_int(timeStep)
time_val = c_double(timeVal)
EXODUS_LIB.ex_put_time(self.fileId,time_step,byref(time_val))
return True
# --------------------------------------------------------------------
def __ex_get_name(self,objType,objId):
obj_type = c_int(objType)
obj_id = c_int(objId)
obj_name = create_string_buffer(MAX_STR_LENGTH+1)
EXODUS_LIB.ex_get_name(self.fileId,obj_type,obj_id,byref(obj_name))
return obj_name.value
# --------------------------------------------------------------------
def __ex_put_name(self,objType,objId,objName):
obj_type = c_int(objType)
obj_id = c_int(objId)
obj_name = create_string_buffer(objName,MAX_STR_LENGTH+1)
EXODUS_LIB.ex_put_name(self.fileId,obj_type,obj_id,obj_name)
# --------------------------------------------------------------------
def __ex_get_names(self,objType,inqType):
obj_type = c_int(objType)
num_objs = c_int(self.__ex_inquire_int(inqType))
numObjs = num_objs.value
obj_name_ptrs = (POINTER(c_char * (MAX_STR_LENGTH+1)) * numObjs)()
for i in range(numObjs):
obj_name_ptrs[i] = pointer(create_string_buffer(MAX_STR_LENGTH+1))
EXODUS_LIB.ex_get_names(self.fileId,obj_type,byref(obj_name_ptrs))
obj_names = []
for onp in obj_name_ptrs:
obj_names.append(onp.contents.value)
return obj_names
# --------------------------------------------------------------------
def __ex_put_names(self,objType,inqType,objNames):
num_objs = c_int(self.__ex_inquire_int(inqType))
numObjs = num_objs.value
assert numObjs == len(objNames)
obj_name_ptrs = (POINTER(c_char * (MAX_STR_LENGTH+1)) * numObjs)()
obj_type = c_int(objType)
for i in range(numObjs):
obj_name_ptrs[i] = pointer(create_string_buffer(objNames[i],MAX_STR_LENGTH+1))
EXODUS_LIB.ex_put_names(self.fileId,obj_type,byref(obj_name_ptrs))
# --------------------------------------------------------------------
def __ex_get_elem_blk_ids(self):
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_IDS_INT64_API):
self.elemBlkIds = (c_longlong * self.numElemBlk.value)()
else:
self.elemBlkIds = (c_int * self.numElemBlk.value)()
if self.numElemBlk.value > 0:
EXODUS_LIB.ex_get_elem_blk_ids(self.fileId,byref(self.elemBlkIds))
# --------------------------------------------------------------------
def __ex_get_side_set_ids(self):
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_IDS_INT64_API):
self.sideSetIds = (c_longlong * self.numSideSets.value)()
else:
self.sideSetIds = (c_int * self.numSideSets.value)()
if self.num_side_sets() > 0:
EXODUS_LIB.ex_get_side_set_ids(self.fileId,byref(self.sideSetIds))
# --------------------------------------------------------------------
def __ex_get_node_set_ids(self):
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_IDS_INT64_API):
self.nodeSetIds = (c_longlong * self.numNodeSets.value)()
else:
self.nodeSetIds = (c_int * self.numNodeSets.value)()
if self.num_node_sets() > 0:
EXODUS_LIB.ex_get_node_set_ids(self.fileId,byref(self.nodeSetIds))
# --------------------------------------------------------------------
def __ex_get_node_set_param(self, nodeSetId):
node_set_id = c_longlong(nodeSetId)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
num_set_nodes = c_longlong(0)
num_set_dist_facts = c_longlong(0)
else:
num_set_nodes = c_int(0)
num_set_dist_facts = c_int(0)
EXODUS_LIB.ex_get_node_set_param(self.fileId,node_set_id,byref(num_set_nodes),byref(num_set_dist_facts))
return (int(num_set_nodes.value),int(num_set_dist_facts.value))
# --------------------------------------------------------------------
def __ex_put_node_set_param(self,nodeSetId,numNodes,numDistFacts):
node_set_id = c_longlong(nodeSetId)
num_set_nodes = c_longlong(numNodes)
num_set_dist_facts = c_longlong(numDistFacts)
EXODUS_LIB.ex_put_node_set_param(self.fileId,node_set_id,num_set_nodes,num_set_dist_facts)
# --------------------------------------------------------------------
def __ex_get_node_set(self, nodeSetId):
node_set_id = c_longlong(nodeSetId)
num_node_set_nodes = self.__ex_get_node_set_param(nodeSetId)[0]
if num_node_set_nodes == 0:
return []
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
set_nodes = (c_longlong * num_node_set_nodes)()
else:
set_nodes = (c_int * num_node_set_nodes)()
EXODUS_LIB.ex_get_node_set(self.fileId,node_set_id,byref(set_nodes))
return set_nodes
# --------------------------------------------------------------------
def __ex_put_node_set(self,nodeSetId,nodeSetNodes):
node_set_id = c_longlong(nodeSetId)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
node_set_nodes = (c_longlong * len(nodeSetNodes))()
for i in range(len(nodeSetNodes)):
node_set_nodes[i] = c_longlong(nodeSetNodes[i])
else:
node_set_nodes = (c_int * len(nodeSetNodes))()
for i in range(len(nodeSetNodes)):
node_set_nodes[i] = c_int(nodeSetNodes[i])
EXODUS_LIB.ex_put_node_set(self.fileId,node_set_id,node_set_nodes)
# --------------------------------------------------------------------
def __ex_get_node_set_dist_fact(self, nodeSetId):
node_set_id = c_longlong(nodeSetId)
num_node_set_nodes = self.__ex_get_node_set_param(nodeSetId)[0]
set_dfs = (c_double * num_node_set_nodes)()
EXODUS_LIB.ex_get_node_set_dist_fact(self.fileId,node_set_id,byref(set_dfs))
return set_dfs
# --------------------------------------------------------------------
def __ex_put_node_set_dist_fact(self,nodeSetId,nodeSetDistFact):
node_set_id = c_longlong(nodeSetId)
node_set_dist_fact = (c_double * len(nodeSetDistFact))()
for i in range(len(nodeSetDistFact)):
node_set_dist_fact[i] = c_double(nodeSetDistFact[i])
EXODUS_LIB.ex_put_node_set_dist_fact(self.fileId,node_set_id,node_set_dist_fact)
# --------------------------------------------------------------------
def __ex_get_nset_var(self,timeStep,varId,id):
step = c_int(timeStep)
var_id = c_int(varId)
node_set_id = c_longlong(id)
(numNodeInSet,numDistFactInSet) = self.__ex_get_node_set_param(id)
num_node_in_set = c_longlong(numNodeInSet)
ns_var_vals = (c_double * numNodeInSet)()
EXODUS_LIB.ex_get_nset_var(self.fileId,step,var_id,node_set_id,num_node_in_set,ns_var_vals)
return list(ns_var_vals)
# --------------------------------------------------------------------
def __ex_get_nset_var_tab(self):
self.__ex_get_node_set_ids()
node_set_count = c_int(len(self.nodeSetIds))
variable_count = self.__ex_get_var_param("m")
truth_table = (c_int * (node_set_count.value * variable_count.value))()
EXODUS_LIB.ex_get_nset_var_tab(self.fileId,
node_set_count,
variable_count,
byref(truth_table))
truthTab = []
for val in truth_table:
if val:
truthTab.append(True)
else:
truthTab.append(False)
return truthTab
# --------------------------------------------------------------------
def __ex_put_nset_var_tab(self,truthTab):
self.__ex_get_node_set_ids()
num_blks = c_int(len(self.nodeSetIds))
num_vars = self.__ex_get_var_param("m")
truth_tab = (c_int * (num_blks.value*num_vars.value))()
for i in xrange(len(truthTab)):
boolVal = truthTab[i]
if boolVal:
truth_tab[i] = c_int(1)
else:
truth_tab[i] = c_int(0)
EXODUS_LIB.ex_put_nset_var_tab(self.fileId,num_blks,num_vars,truth_tab)
return True
# --------------------------------------------------------------------
def __ex_put_nset_var(self,timeStep,varId,id,values):
step = c_int(timeStep)
var_id = c_int(varId)
node_set_id = c_longlong(id)
(numNodeInSet,numDistFactInSet) = self.__ex_get_node_set_param(id)
num_node_in_set = c_longlong(numNodeInSet)
ns_var_vals = (c_double * numNodeInSet)()
for i in range(numNodeInSet):
ns_var_vals[i] = float(values[i])
EXODUS_LIB.ex_put_nset_var(self.fileId,step,var_id,node_set_id,num_node_in_set,ns_var_vals)
return True
# --------------------------------------------------------------------
def __ex_get_coord(self):
self.coordsX = (c_double * self.numNodes.value)()
self.coordsY = (c_double * self.numNodes.value)()
self.coordsZ = (c_double * self.numNodes.value)()
EXODUS_LIB.ex_get_coord(self.fileId,byref(self.coordsX),byref(self.coordsY),byref(self.coordsZ))
# --------------------------------------------------------------------
def __ex_put_coord(self,xCoords,yCoords,zCoords):
self.coordsX = (c_double * self.numNodes.value)()
self.coordsY = (c_double * self.numNodes.value)()
self.coordsZ = (c_double * self.numNodes.value)()
for i in range(self.numNodes.value):
self.coordsX[i] = float(xCoords[i])
self.coordsY[i] = float(yCoords[i])
self.coordsZ[i] = float(zCoords[i])
EXODUS_LIB.ex_put_coord(self.fileId,byref(self.coordsX),byref(self.coordsY),byref(self.coordsZ))
# --------------------------------------------------------------------
def __ex_get_n_coord(self,startNodeId,numNodes):
start_node_num = c_longlong(startNodeId)
num_nodes = c_longlong(numNodes)
coordsX = (c_double * numNodes)()
coordsY = (c_double * numNodes)()
coordsZ = (c_double * numNodes)()
EXODUS_LIB.ex_get_n_coord(self.fileId,start_node_num,num_nodes,byref(coordsX),byref(coordsY),byref(coordsZ))
return list(coordsX),list(coordsY),list(coordsZ)
# --------------------------------------------------------------------
def __ex_get_id_map(self,objType,inqType):
obj_type = c_int(objType)
num_objs = c_int(self.__ex_inquire_int(inqType))
numObjs = num_objs.value
id_map = []
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_IDS_INT64_API):
id_map = (c_longlong * numObjs)()
else:
id_map = (c_int * numObjs)()
EXODUS_LIB.ex_get_id_map(self.fileId,obj_type,byref(id_map))
idMap = []
for i in xrange(numObjs):
idMap.append(id_map[i])
return idMap
# --------------------------------------------------------------------
def __ex_put_id_map(self,objType,inqType,map):
obj_type = c_int(objType)
num_objs = c_int(self.__ex_inquire_int(inqType))
numObjs = num_objs.value
assert numObjs == len(map)
id_map = []
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_IDS_INT64_API):
id_map = (c_longlong * numObjs)()
for i in xrange(numObjs):
id_map[i] = c_longlong( map[i] )
else:
id_map = (c_int * numObjs)()
for i in xrange(numObjs):
id_map[i] = c_int( map[i] )
EXODUS_LIB.ex_put_id_map(self.fileId,obj_type,byref(id_map))
return True
# --------------------------------------------------------------------
def __ex_get_elem_num_map(self):
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_MAPS_INT64_API):
elemNumMap = (c_longlong * self.numElem.value)()
else:
elemNumMap = (c_int * self.numElem.value)()
EXODUS_LIB.ex_get_elem_num_map(self.fileId,byref(elemNumMap))
return elemNumMap
# --------------------------------------------------------------------
def __ex_get_node_num_map(self):
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_MAPS_INT64_API):
nodeNumMap = (c_longlong * self.numNodes.value)()
else:
nodeNumMap = (c_int * self.numNodes.value)()
EXODUS_LIB.ex_get_node_num_map(self.fileId,byref(nodeNumMap))
return nodeNumMap
# --------------------------------------------------------------------
def __ex_get_elem_order_map(self):
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_MAPS_INT64_API):
elemOrderMap = (c_longlong * self.numElem.value)()
else:
elemOrderMap = (c_int * self.numElem.value)()
EXODUS_LIB.ex_get_map(self.fileId,byref(elemOrderMap))
return elemOrderMap
# --------------------------------------------------------------------
def __ex_get_elem_block(self,id):
elem_block_id = c_longlong(id)
elem_type = create_string_buffer(MAX_STR_LENGTH+1)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
num_elem_this_blk = c_longlong(0)
num_nodes_per_elem = c_longlong(0)
num_attr = c_longlong(0)
else:
num_elem_this_blk = c_int(0)
num_nodes_per_elem = c_int(0)
num_attr = c_int(0)
EXODUS_LIB.ex_get_elem_block(self.fileId,elem_block_id,elem_type,
byref(num_elem_this_blk),byref(num_nodes_per_elem),\
byref(num_attr))
return(elem_type,num_elem_this_blk,num_nodes_per_elem,num_attr)
# --------------------------------------------------------------------
def __ex_put_elem_block(self,id,eType,numElems,numNodesPerElem,numAttrsPerElem):
elem_block_id = c_longlong(id)
elem_type = create_string_buffer(eType.upper(),MAX_STR_LENGTH+1)
num_elem_this_blk = c_longlong(numElems)
num_nodes_per_elem = c_longlong(numNodesPerElem)
num_attr = c_longlong(numAttrsPerElem)
EXODUS_LIB.ex_put_elem_block(self.fileId,elem_block_id,elem_type,
num_elem_this_blk,num_nodes_per_elem,
num_attr)
# --------------------------------------------------------------------
def __ex_get_elem_conn(self,id):
(elem_type,num_elem_this_blk,num_nodes_per_elem,num_attr) = self.__ex_get_elem_block(id)
elem_block_id = c_longlong(id)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
elem_block_connectivity = (c_longlong * (num_elem_this_blk.value * num_nodes_per_elem.value))()
else:
elem_block_connectivity = (c_int * (num_elem_this_blk.value * num_nodes_per_elem.value))()
EXODUS_LIB.ex_get_elem_conn(self.fileId,elem_block_id,byref(elem_block_connectivity))
return (elem_block_connectivity,num_elem_this_blk,num_nodes_per_elem)
# --------------------------------------------------------------------
def __ex_put_elem_conn(self,id,connectivity):
(elem_type,num_elem_this_blk,num_nodes_per_elem,num_attr) = self.__ex_get_elem_block(id)
elem_block_id = c_longlong(id)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
elem_block_connectivity = (c_longlong * (num_elem_this_blk.value * num_nodes_per_elem.value))()
for i in range(num_elem_this_blk.value * num_nodes_per_elem.value):
elem_block_connectivity[i] = c_longlong(connectivity[i])
else:
elem_block_connectivity = (c_int * (num_elem_this_blk.value * num_nodes_per_elem.value))()
for i in range(num_elem_this_blk.value * num_nodes_per_elem.value):
elem_block_connectivity[i] = c_int(connectivity[i])
EXODUS_LIB.ex_put_elem_conn(self.fileId,elem_block_id,elem_block_connectivity)
# --------------------------------------------------------------------
def __ex_put_elem_attr(self,elemBlkID,Attr):
elem_blk_id = c_longlong(elemBlkID)
attrib = (c_double * len(Attr))()
for i in range(len(Attr)):
attrib[i] = c_double(Attr[i])
EXODUS_LIB.ex_put_elem_attr(self.fileId,elem_blk_id,attrib)
# --------------------------------------------------------------------
def __ex_get_elem_attr(self,elemBlkID):
elem_blk_id = c_longlong(elemBlkID)
numAttrThisBlk = self.num_attr(elemBlkID)
numElemsThisBlk = self.num_elems_in_blk(elemBlkID)
totalAttr = numAttrThisBlk*numElemsThisBlk
attrib = (c_double * totalAttr)()
EXODUS_LIB.ex_get_elem_attr(self.fileId,elem_blk_id,byref(attrib))
return attrib
# --------------------------------------------------------------------
def __ex_get_var_param(self,varChar):
assert varChar.lower() in 'ngems'
var_char = c_char(varChar)
num_vars = c_int()
EXODUS_LIB.ex_get_var_param(self.fileId,byref(var_char),byref(num_vars))
return num_vars
# --------------------------------------------------------------------
def __ex_get_var_names(self,varChar):
assert varChar.lower() in 'ngems'
var_char = c_char(varChar)
num_vars = self.__ex_get_var_param(varChar)
var_name_ptrs = (POINTER(c_char * (MAX_STR_LENGTH+1)) * num_vars.value)()
for i in range(num_vars.value):
var_name_ptrs[i] = pointer(create_string_buffer(MAX_STR_LENGTH+1))
EXODUS_LIB.ex_get_var_names(self.fileId,byref(var_char),num_vars,byref(var_name_ptrs))
var_names = []
for vnp in var_name_ptrs: var_names.append(vnp.contents.value)
return var_names
# --------------------------------------------------------------------
def __ex_get_elem_var_tab(self):
self.__ex_get_elem_blk_ids()
num_blks = c_int(len(self.elemBlkIds))
num_vars = self.__ex_get_var_param("e")
truth_tab = (c_int * (num_blks.value * num_vars.value))()
EXODUS_LIB.ex_get_elem_var_tab(self.fileId, num_blks, num_vars, byref(truth_tab))
truthTab = []
for val in truth_tab:
if val:
truthTab.append(True)
else:
truthTab.append(False)
return truthTab
# --------------------------------------------------------------------
def __ex_put_elem_var_tab(self,truthTab):
self.__ex_get_elem_blk_ids()
num_blks = c_int(len(self.elemBlkIds))
num_vars = self.__ex_get_var_param("e")
truth_tab = (c_int * (num_blks.value*num_vars.value))()
for i in xrange(len(truthTab)):
boolVal = truthTab[i]
if boolVal:
truth_tab[i] = c_int(1)
else:
truth_tab[i] = c_int(0)
EXODUS_LIB.ex_put_elem_var_tab(self.fileId,num_blks,num_vars,truth_tab)
return True
# --------------------------------------------------------------------
def __ex_get_var(self,timeStep,varType,varId,blkId,numValues):
step = c_int(timeStep)
var_type = c_int(varType)
var_id = c_int(varId)
block_id = c_longlong(blkId)
num_values = c_longlong(numValues)
var_vals = (c_double * num_values.value)()
EXODUS_LIB.ex_get_var(self.fileId,step,var_type,var_id,block_id,num_values,var_vals)
return var_vals
# --------------------------------------------------------------------
def __ex_put_var(self,timeStep,varType,varId,blkId,numValues,values):
step = c_int(timeStep)
var_type = c_int(varType)
var_id = c_int(varId)
block_id = c_longlong(blkId)
num_values = c_longlong(numValues)
var_vals = (c_double * num_values.value)()
for i in range(num_values.value):
var_vals[i] = float(values[i])
EXODUS_LIB.ex_put_var(self.fileId,step,var_type,var_id,block_id,num_values,var_vals)
return True
# --------------------------------------------------------------------
def __ex_get_side_set_node_list_len(self,id):
side_set_id = c_longlong(id)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
side_set_node_list_len = c_longlong(0)
else:
side_set_node_list_len = c_int(0)
EXODUS_LIB.ex_get_side_set_node_list_len(self.fileId,side_set_id,byref(side_set_node_list_len))
return side_set_node_list_len
# --------------------------------------------------------------------
def __ex_get_side_set_param(self,id):
side_set_id = c_longlong(id)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
num_side_in_set = c_longlong(0)
num_dist_fact_in_set = c_longlong(0)
else:
num_side_in_set = c_int(0)
num_dist_fact_in_set = c_int(0)
EXODUS_LIB.ex_get_side_set_param(self.fileId,side_set_id,byref(num_side_in_set),\
byref(num_dist_fact_in_set))
return (int(num_side_in_set.value),int(num_dist_fact_in_set.value))
# --------------------------------------------------------------------
def __ex_put_side_set_param(self,id,numSides,numDistFacts):
side_set_id = c_longlong(id)
num_side_in_set = c_longlong(numSides)
num_dist_fact_in_set = c_longlong(numDistFacts)
EXODUS_LIB.ex_put_side_set_param(self.fileId,side_set_id,num_side_in_set,num_dist_fact_in_set)
return True
# --------------------------------------------------------------------
def __ex_get_side_set(self,sideSetId):
side_set_id = c_longlong(sideSetId)
(num_side_in_set,num_dist_fact_in_set) = self.__ex_get_side_set_param(sideSetId)
if num_side_in_set == 0:
return ([], [])
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
side_set_elem_list = (c_longlong * num_side_in_set)()
side_set_side_list = (c_longlong * num_side_in_set)()
else:
side_set_elem_list = (c_int * num_side_in_set)()
side_set_side_list = (c_int * num_side_in_set)()
EXODUS_LIB.ex_get_side_set(self.fileId,side_set_id,\
byref(side_set_elem_list),\
byref(side_set_side_list) )
return (side_set_elem_list,side_set_side_list)
# --------------------------------------------------------------------
def __ex_put_side_set(self,id,sideSetElements,sideSetSides):
side_set_id = c_longlong(id)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
side_set_elem_list = (c_longlong * len(sideSetElements))()
side_set_side_list = (c_longlong * len(sideSetSides))()
for i in range(len(sideSetElements)):
side_set_elem_list[i] = c_longlong(sideSetElements[i])
side_set_side_list[i] = c_longlong(sideSetSides[i])
else:
side_set_elem_list = (c_int * len(sideSetElements))()
side_set_side_list = (c_int * len(sideSetSides))()
for i in range(len(sideSetElements)):
side_set_elem_list[i] = c_int(sideSetElements[i])
side_set_side_list[i] = c_int(sideSetSides[i])
EXODUS_LIB.ex_put_side_set(self.fileId,side_set_id,side_set_elem_list,side_set_side_list)
return True
# --------------------------------------------------------------------
def __ex_get_sset_var_tab(self):
self.__ex_get_side_set_ids()
side_set_count = c_int(len(self.sideSetIds))
variable_count = self.__ex_get_var_param("s")
truth_table = (c_int * (side_set_count.value * variable_count.value))()
EXODUS_LIB.ex_get_sset_var_tab(self.fileId,
side_set_count,
variable_count,
byref(truth_table))
truthTab = []
for val in truth_table:
if val:
truthTab.append(True)
else:
truthTab.append(False)
return truthTab
# --------------------------------------------------------------------
def __ex_put_sset_var_tab(self,truthTab):
self.__ex_get_side_set_ids()
num_blks = c_int(len(self.sideSetIds))
num_vars = self.__ex_get_var_param("s")
truth_tab = (c_int * (num_blks.value*num_vars.value))()
for i in xrange(len(truthTab)):
boolVal = truthTab[i]
if boolVal:
truth_tab[i] = c_int(1)
else:
truth_tab[i] = c_int(0)
EXODUS_LIB.ex_put_sset_var_tab(self.fileId,num_blks,num_vars,truth_tab)
return True
# --------------------------------------------------------------------
def __ex_get_side_set_dist_fact(self, sideSetId):
side_set_id = c_longlong(sideSetId)
side_set_node_list_len = self.__ex_get_side_set_node_list_len(sideSetId)
set_dfs = (c_double * side_set_node_list_len.value)()
EXODUS_LIB.ex_get_side_set_dist_fact(self.fileId,side_set_id,byref(set_dfs))
return set_dfs
# --------------------------------------------------------------------
def __ex_put_side_set_dist_fact(self,sideSetId,sideSetDistFact):
side_set_id = c_longlong(sideSetId)
side_set_dist_fact = (c_double * len(sideSetDistFact))()
for i in range(len(sideSetDistFact)):
side_set_dist_fact[i] = c_double(sideSetDistFact[i])
EXODUS_LIB.ex_put_side_set_dist_fact(self.fileId,side_set_id,side_set_dist_fact)
# --------------------------------------------------------------------
def __ex_get_side_set_node_list(self,id):
side_set_id = c_longlong(id)
side_set_node_list_len = self.__ex_get_side_set_node_list_len(id)
(num_side_in_set,num_dist_fact_in_set) = self.__ex_get_side_set_param(id)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_BULK_INT64_API):
side_set_node_cnt_list = (c_longlong * num_side_in_set)()
side_set_node_list = (c_longlong * side_set_node_list_len.value)()
else:
side_set_node_cnt_list = (c_int * num_side_in_set)()
side_set_node_list = (c_int * side_set_node_list_len.value)()
EXODUS_LIB.ex_get_side_set_node_list(self.fileId,side_set_id,\
byref(side_set_node_cnt_list),\
byref(side_set_node_list) )
return (side_set_node_cnt_list,side_set_node_list)
# --------------------------------------------------------------------
def __ex_get_sset_var(self,timeStep,varId,id):
step = c_int(timeStep)
var_id = c_int(varId)
side_set_id = c_longlong(id)
(numSideInSet,numDistFactInSet) = self.__ex_get_side_set_param(id)
ss_var_vals = (c_double * numSideInSet)()
num_side_in_set = c_longlong(numSideInSet)
EXODUS_LIB.ex_get_sset_var(self.fileId,step,var_id,side_set_id,num_side_in_set,ss_var_vals)
return ss_var_vals
# --------------------------------------------------------------------
def __ex_put_sset_var(self,timeStep,varId,id,values):
step = c_int(timeStep)
var_id = c_int(varId)
side_set_id = c_longlong(id)
(numSideInSet,numDistFactInSet) = self.__ex_get_side_set_param(id)
num_side_in_set = c_longlong(numSideInSet)
ss_var_vals = (c_double * numSideInSet)()
for i in range(numSideInSet):
ss_var_vals[i] = float(values[i])
EXODUS_LIB.ex_put_sset_var(self.fileId,step,var_id,side_set_id,num_side_in_set,ss_var_vals)
return True
# --------------------------------------------------------------------
def __ex_get_variable_param(self,varType):
var_type = c_int(varType)
num_vars = c_int(0)
EXODUS_LIB.ex_get_variable_param(self.fileId,var_type,byref(num_vars))
return num_vars
# --------------------------------------------------------------------
def __ex_put_variable_param(self,varType,numVars):
var_type = c_int(varType)
num_vars = c_int(numVars)
current_num = self.__ex_get_variable_param(varType)
if current_num.value == num_vars.value:
##print "value already set"
return True
errorInt = EXODUS_LIB.ex_put_variable_param(self.fileId,var_type,num_vars)
if errorInt != 0:
print("ERROR code =", errorInt)
raise Exception("ERROR: ex_put_variable_param had problems. This can only be called once per varType.")
return True
# --------------------------------------------------------------------
def __ex_get_variable_name(self,varType,varId):
var_type = c_int(varType)
var_id = c_int(varId)
name = create_string_buffer(MAX_STR_LENGTH+1)
EXODUS_LIB.ex_get_variable_name(self.fileId,var_type,var_id,name)
return name
# --------------------------------------------------------------------
def __ex_put_variable_name(self,varType,varId,varName):
var_type = c_int(varType)
var_id = c_int(varId)
name = create_string_buffer(varName,MAX_STR_LENGTH+1)
EXODUS_LIB.ex_put_variable_name(self.fileId,var_type,var_id,name)
return True
# --------------------------------------------------------------------
def __ex_get_elem_attr_names(self,blkId):
object_id = c_int(blkId)
num_attr = c_int(self.num_attr(blkId))
len_name = self.__ex_inquire_int(ex_inquiry("EX_INQ_READ_NAME_LENGTH"))
attr_name_ptrs = (POINTER(c_char * (len_name+1)) * num_attr.value)()
for i in range(num_attr.value):
attr_name_ptrs[i] = pointer(create_string_buffer(len_name+1))
EXODUS_LIB.ex_get_elem_attr_names(self.fileId,object_id,byref(attr_name_ptrs))
attr_names = []
for cnp in attr_name_ptrs: attr_names.append(cnp.contents.value)
return attr_names
# --------------------------------------------------------------------
def __ex_put_elem_attr_names(self,blkId,varNames):
object_id = c_int(blkId)
num_attr = c_int(self.num_attr(blkId))
len_name = self.__ex_inquire_int(ex_inquiry("EX_INQ_READ_NAME_LENGTH"))
attr_name_ptrs = (POINTER(c_char * (len_name+1)) * num_attr.value)()
assert len(varNames) == num_attr.value
for i in range(num_attr.value):
attr_name_ptrs[i] = pointer(create_string_buffer(varNames[i],len_name+1))
EXODUS_LIB.ex_put_elem_attr_names(self.fileId,object_id,byref(attr_name_ptrs))
return True
# --------------------------------------------------------------------
def __ex_get_prop_names(self,varType,inqType):
var_type = c_int(varType)
num_props = c_int(self.__ex_inquire_int(ex_inquiry(inqType)))
prop_name_ptrs = (POINTER(c_char * (MAX_STR_LENGTH+1)) * num_props.value)()
for i in range(num_props.value):
prop_name_ptrs[i] = pointer(create_string_buffer(MAX_STR_LENGTH+1))
EXODUS_LIB.ex_get_prop_names(self.fileId,var_type,byref(prop_name_ptrs))
prop_names = []
for cnp in prop_name_ptrs: prop_names.append(cnp.contents.value)
return prop_names
# --------------------------------------------------------------------
def __ex_get_prop(self,objType,objId,propName):
obj_type = c_int(objType)
obj_id = c_longlong(objId)
prop_name = create_string_buffer(propName,MAX_STR_LENGTH+1)
if (EXODUS_LIB.ex_int64_status(self.fileId) & EX_IDS_INT64_API):
prop_val = c_longlong(0)
else:
prop_val = c_int(0)
EXODUS_LIB.ex_get_prop(self.fileId,obj_type,obj_id,byref(prop_name),byref(prop_val))
return prop_val.value
# --------------------------------------------------------------------
def __ex_put_prop(self,objType,objId,propName,propVal):
obj_type = c_int(objType)
obj_id = c_longlong(objId)
prop_name = create_string_buffer(propName,MAX_STR_LENGTH+1)
prop_val = c_longlong(propVal)
EXODUS_LIB.ex_put_prop(self.fileId,obj_type,obj_id,byref(prop_name),prop_val)
return True
# --------------------------------------------------------------------
def __ex_update(self):
EXODUS_LIB.ex_update(self.fileId)
return True
# --------------------------------------------------------------------
# Utility Functions
# --------------------------------------------------------------------
def collectElemConnectivity(exodusHandle,connectivity):
"""
This function generates a list of lists that represent the element connectivity.
Usage:
exodusHandle = exodus("file.g","r")
connectivity = []
collectElemConnectivity(exodusHandle,connectivity)
exodusHandle.close()
"""
if type(connectivity) is not list:
raise Exception("ERROR: connectivity is not a list in call to collectElemConnectivity().")
if connectivity:
raise Exception("ERROR: connectivity is not empty in call to collectElemConnectivity().")
blockIds = exodusHandle.get_elem_blk_ids()
for blId in blockIds:
(elem_block_conn,num_elem,num_nodes) = exodusHandle.get_elem_connectivity(blId)
for k in range(num_elem):
i = k * num_nodes
j = i + num_nodes
local_elem_conn = elem_block_conn[i:j]
connectivity.append( local_elem_conn )
# --------------------------------------------------------------------
def collectLocalNodeToLocalElems(exodusHandle,connectivity,localNodeToLocalElems):
"""
This function generates a list of lists to go from local node id
to local elem id.
Usage:
exodusHandle = exodus("file.g","r")
connectivity = [] ## If this is not empty it will assume it is already filled.
localNodeToLocalElems = []
collectLocalNodeToLocalElems(exodusHandle,connectivity,localNodeToLocalElems)
exodusHandle.close()
"""
if type(connectivity) is not list:
raise Exception("ERROR: connectivity is not a list in call to collectLocalNodeToLocalElems().")
if type(localNodeToLocalElems) is not list:
raise Exception("ERROR: localNodeToLocalElems is not a list in call to collectLocalNodeToLocalElems().")
if localNodeToLocalElems:
raise Exception("ERROR: localNodeToLocalElems is not empty in call to collectLocalNodeToLocalElems().")
if not connectivity:
collectElemConnectivity(exodusHandle,connectivity)
numNodes = exodusHandle.num_nodes()
for i in range(numNodes+1):
localNodeToLocalElems.append([])
localElemId = 0
for local_elem_conn in connectivity:
for n in local_elem_conn:
localNodeToLocalElems[n].append(localElemId)
localElemId = localElemId + 1
# --------------------------------------------------------------------
def collectLocalElemToLocalElems(exodusHandle,connectivity,localNodeToLocalElems,localElemToLocalElems):
"""
This function generates a list of lists to go from local elem id
to connected local elem ids.
Usage:
exodusHandle = exodus("file.g","r")
connectivity = [] ## If this is not empty it will assume it is already filled.
localNodeToLocalElems = [] ## If this is not empty it will assume it is already filled.
localElemToLocalElems = []
collectLocalElemToLocalElems(exodusHandle,connectivity,localNodeToLocalElems,localElemToLocalElems)
exodusHandle.close()
"""
if type(connectivity) is not list:
raise Exception("ERROR: connectivity is not a list in call to collectLocalElemToLocalElems().")
if type(localNodeToLocalElems) is not list:
raise Exception("ERROR: localNodeToLocalElems is not a list in call to collectLocalElemToLocalElems().")
if type(localElemToLocalElems) is not list:
raise Exception("ERROR: localElemToLocalElems is not a list in call to collectLocalElemToLocalElems().")
if localElemToLocalElems:
raise Exception("ERROR: localElemToLocalElems is not empty in call to collectLocalElemToLocalElems().")
if not connectivity:
collectElemConnectivity(exodusHandle,connectivity)
if not localNodeToLocalElems:
collectLocalNodeToLocalElems(exodusHandle,connectivity,localNodeToLocalElems)
numElems = exodusHandle.num_elems()
for i in range(numElems):
localElemToLocalElems.append([])
for localElemId in range(numElems):
nodeList = list(connectivity[localElemId])
newConnectedElems = []
for n in nodeList:
for elem in localNodeToLocalElems[n]:
newConnectedElems.append( elem )
localElemToLocalElems[localElemId] = list( set(newConnectedElems) )
# --------------------------------------------------------------------
def copy_mesh(fromFileName, toFileName, exoFromObj = None, array_type = 'ctype'):
"""
This function creates an exodus file toFileName and copies only the mesh
data from exodus file fromFileName, returning a file handle to toFileName.
The user can either supply an exodus filename (fromFileName) or an exodus
object (exoFromObj) to copy the mesh data from.
"""
debugPrint = False
#If the user did not supply a exodus object to copy from, attempt to read an
#exodus database with the name "fromFileName"
if exoFromObj is None:
exoFrom = exodus(fromFileName,"r", array_type = array_type)
else:
exoFrom = exoFromObj
if ( os.path.isfile(toFileName) ):
raise Exception("ERROR: ", toFileName, " file already exists cowardly exiting instead of overwriting in call to copy_mesh().")
title = exoFrom.title()
numDim = exoFrom.num_dimensions()
numNodes = exoFrom.num_nodes()
numElems = exoFrom.num_elems()
numBlks = exoFrom.num_blks()
numNodeSets = exoFrom.num_node_sets()
numSideSets = exoFrom.num_side_sets()
exoTo = exodus( toFileName, mode = "w", array_type = array_type, \
title = title, numDims =numDim, \
numNodes = numNodes, numElems = numElems, numBlocks = numBlks, \
numNodeSets = numNodeSets, numSideSets = numSideSets )
if debugPrint:
print("Transfer QA records")
qaRecords = exoFrom.get_qa_records()
exoTo.put_qa_records( qaRecords )
if debugPrint:
print("Transfer Nodal Coordinates and Names")
exoTo.put_coord_names( exoFrom.get_coord_names() )
(xCoords,yCoords,zCoords) = exoFrom.get_coords()
exoTo.put_coords(xCoords,yCoords,zCoords)
if debugPrint:
print("Transfer Node Id Map")
nodeIdMap = exoFrom.get_node_id_map()
exoTo.put_node_id_map(nodeIdMap)
if debugPrint:
print("Transfer Element Data")
blkIds = exoFrom.get_elem_blk_ids()
for blkId in blkIds:
(elemType,numElem,nodesPerElem,numAttr) = exoFrom.elem_blk_info(blkId)
exoTo.put_elem_blk_info(blkId,elemType,numElem,nodesPerElem,numAttr)
(connectivity,numElem,nodesPerElem) = exoFrom.get_elem_connectivity(blkId)
exoTo.put_elem_connectivity(blkId,connectivity)
if numAttr > 0:
attrNames = exoFrom.get_element_attribute_names(blkId)
exoTo.put_element_attribute_names(blkId,attrNames)
exoTo.put_elem_attr(blkId, exoFrom.get_elem_attr(blkId))
elemProps = exoFrom.get_element_property_names()
for elemProp in elemProps:
propVal = exoFrom.get_element_property_value(blkId,elemProp)
if elemProp == "ID" and propVal == blkId:
continue
else:
exoTo.put_element_property_value(blkId,elemProp,propVal)
blockName = exoFrom.get_elem_blk_name(blkId)
exoTo.put_elem_blk_name(blkId,blockName)
if debugPrint:
print("Transfer Element Id Map")
elemIdMap = exoFrom.get_elem_id_map()
exoTo.put_elem_id_map(elemIdMap)
if debugPrint:
print("Transfer Node Sets")
if numNodeSets > 0:
nodeSetIds = exoFrom.get_node_set_ids()
for nsId in nodeSetIds:
(numSetNodes,numSetDistFacts) = exoFrom.get_node_set_params(nsId)
exoTo.put_node_set_params(nsId,numSetNodes,numSetDistFacts)
nsNodes = exoFrom.get_node_set_nodes(nsId)
exoTo.put_node_set(nsId,nsNodes)
if numSetDistFacts > 0:
nsDF = exoFrom.get_node_set_dist_facts(nsId)
exoTo.put_node_set_dist_fact(nsId,nsDF)
nodeSetName = exoFrom.get_node_set_name(nsId)
exoTo.put_node_set_name(nsId,nodeSetName)
nodeSetProps = exoFrom.get_node_set_property_names()
for nodeSetProp in nodeSetProps:
propVal = exoFrom.get_node_set_property_value(nsId,nodeSetProp)
if nodeSetProp == "ID" and propVal == nsId:
continue
else:
exoTo.put_node_set_property_value(nsId,nodeSetProp,propVal)
if debugPrint:
print("Transfer Side Sets")
if numSideSets > 0:
sideSetIds = exoFrom.get_side_set_ids()
for ssId in sideSetIds:
(numSetSides,numSetDistFacts) = exoFrom.get_side_set_params(ssId)
exoTo.put_side_set_params(ssId,numSetSides,numSetDistFacts)
(elemList,sideList) = exoFrom.get_side_set(ssId)
exoTo.put_side_set(ssId,elemList,sideList)
if numSetDistFacts > 0:
ssDF = exoFrom.get_side_set_dist_fact(ssId)
exoTo.put_side_set_dist_fact(ssId,ssDF)
sideSetName = exoFrom.get_side_set_name(ssId)
exoTo.put_side_set_name(ssId,sideSetName)
sideSetProps = exoFrom.get_side_set_property_names()
for sideSetProp in sideSetProps:
propVal = exoFrom.get_side_set_property_value(ssId,sideSetProp)
if sideSetProp == "ID" and propVal == ssId:
continue
else:
exoTo.put_side_set_property_value(ssId,sideSetProp,propVal)
#If the user did not supply an exodus object to copy from, then close the
#database.
if exoFromObj is None:
exoFrom.close()
return(exoTo)
def transfer_variables(exoFrom, exoTo, array_type = 'ctype',
additionalGlobalVariables=[], additionalNodalVariables=[], \
additionalElementVariables=[]):
"""
This function transfers variables from exoFrom to exoTo and allows
additional variables to be added with additionalGlobalVariables,
additionalNodalVariables, and additionalElementVariables. Additional
variables values are set to their defaults so that the user can populate
them later.
exoFrom: exodus object to transfer from
exoTo: exodus object to transfer to
additionalGlobalVariables: list of global variable names to add.
additionalNodalVaraibles: list of nodal variable names to add.
additionalElementVariables: should be a list of element variable names to add to all blocks or
tuples ( name, blkIds ) where name is the element variable to add
and blkIds is a list of blkIds to add it to.
"""
## IDEA: It may make sense to make transfer_variables() strictly transfer
## variables, and use add_variables() to add new variables.
debugPrint = False
if type(additionalGlobalVariables) is not list:
raise Exception("ERROR: additionalGlobalVariables is not a list.")
if type(additionalNodalVariables) is not list:
raise Exception("ERROR: additionalNodalVariables is not a list.")
if type(additionalElementVariables) is not list:
raise Exception("ERROR: additionalElementVariables is not a list.")
if debugPrint:
print("Transfer Info records")
numInfoRecs = exoFrom.num_info_records()
if numInfoRecs > 0:
infoRecs = exoFrom.get_info_records()
exoTo.put_info_records( infoRecs )
if debugPrint:
print("Transfer time values")
nSteps = exoFrom.num_times()
if nSteps == 0:
return(exoTo)
timeVals = exoFrom.get_times()
for step in xrange(nSteps):
exoTo.put_time( step+1, timeVals[step] )
if debugPrint:
print("Add Global Variables")
nNewGlobalVars = len(additionalGlobalVariables)
nGlobalVars = exoFrom.get_global_variable_number() + nNewGlobalVars
defaultNewVarVals = []
for i in xrange(nNewGlobalVars):
defaultNewVarVals.append(0.0)
if nGlobalVars > 0:
exoTo.set_global_variable_number(nGlobalVars)
gVarNames = exoFrom.get_global_variable_names()
gVarNames.extend( additionalGlobalVariables )
for nameIndex in xrange(nGlobalVars):
globalVarName = gVarNames[nameIndex]
exoTo.put_global_variable_name(globalVarName,nameIndex+1)
for step in xrange(nSteps):
gValues = exoFrom.get_all_global_variable_values(step+1)
if array_type == 'numpy':
gValues = exoTo.np.append(gValues, defaultNewVarVals)
else:
gValues.extend( defaultNewVarVals )
exoTo.put_all_global_variable_values(step+1,gValues)
if debugPrint:
print("Add Nodal Variables")
nNewNodalVars = len(additionalNodalVariables)
nOrigNodalVars = exoFrom.get_node_variable_number()
nNodalVars = nOrigNodalVars + nNewNodalVars
if nNodalVars > 0:
exoTo.set_node_variable_number(nNodalVars)
nVarNames = exoFrom.get_node_variable_names()
nVarNames.extend( additionalNodalVariables )
for nameIndex in xrange(nNodalVars):
nodalVarName = nVarNames[nameIndex]
exoTo.put_node_variable_name(nodalVarName,nameIndex+1)
if nameIndex < nOrigNodalVars:
for step in xrange(nSteps):
nValues = exoFrom.get_node_variable_values(nodalVarName,step+1)
exoTo.put_node_variable_values(nodalVarName,step+1,nValues)
if debugPrint:
print("Construct Truth Table for additionalElementVariables")
blkIds = exoFrom.get_elem_blk_ids()
numBlks = exoFrom.num_blks()
newElemVariableNames = []
newElemVariableBlocks = []
for item in additionalElementVariables:
if type(item) is tuple:
newElemVariableNames.append( item[0] )
inBlks = []
for blkId in item[1]:
if blkId in blkIds:
inBlks.append(blkId)
newElemVariableBlocks.append( inBlks )
elif type(item) is str:
newElemVariableNames.append( item )
newElemVariableBlocks.append( blkIds )
else:
print("Warning additionalElementVariable item ", item, " is not right type to add.")
print("should be a string or tuple, skipping")
if debugPrint:
print("Add Element Variables")
nNewElemVars = len(newElemVariableNames)
nOrigElemVars = exoFrom.get_element_variable_number()
nElemVars = nOrigElemVars + nNewElemVars
if nElemVars > 0:
exoTo.set_element_variable_number(nElemVars)
origElemVarNames = exoFrom.get_element_variable_names()
eVarNames = exoFrom.get_element_variable_names()
eVarNames.extend( newElemVariableNames )
truthTable = []
if nOrigElemVars > 0:
truthTable = exoFrom.get_element_variable_truth_table()
if nNewElemVars > 0:
newTruth = []
for j in xrange(numBlks):
for k in xrange(nOrigElemVars):
index = j*nOrigElemVars + k
newTruth.append( truthTable[index] )
for m in xrange(nNewElemVars):
if blkIds[j] in newElemVariableBlocks[m]:
newTruth.append(True)
else:
newTruth.append(False)
truthTable = newTruth
exoTo.set_element_variable_truth_table(truthTable)
for nameIndex in xrange(nElemVars):
elemVarName = eVarNames[nameIndex]
exoTo.put_element_variable_name(elemVarName,nameIndex+1)
truthIndex = 0
for blkId in blkIds:
for eVarName in origElemVarNames:
if truthTable[truthIndex]:
for step in xrange(nSteps):
eValues = exoFrom.get_element_variable_values(blkId,eVarName,step+1)
exoTo.put_element_variable_values(blkId,eVarName,step+1,eValues)
truthIndex = truthIndex + 1
truthIndex = truthIndex + nNewElemVars
## TODO: Transfer Nodeset Variables
## TODO: Transfer Sideset Variables
return(exoTo)
def add_variables(exo, global_vars = [], nodal_vars = [], element_vars = [], \
array_type = 'ctype'):
"""
This function adds variables to the exodus object. The values of the variables
are set to their defaults so that the user can populate them later.
exo: exodus database object
global_vars: list of global variable names to add.
nodal_vars: list of nodal variable names to add.
element_vars: should be a list of element variable names to add to all blocks or
tuples ( name, blkIds ) where name is the element variable to add
and blkIds is a list of blkIds to add it to.
"""
debugPrint = False
if type(global_vars) is not list:
raise Exception("ERROR: global_vars is not a list.")
if type(nodal_vars) is not list:
raise Exception("ERROR: nodal_vars is not a list.")
if type(element_vars) is not list:
raise Exception("ERROR: element_vars is not a list.")
if exo.modeChar is 'r':
raise Exception("ERROR: variables cannot be added to an exodus object in read only mode")
if debugPrint:
print("Add Global Variables")
n_new_vars = len(global_vars)
n_old_vars = exo.get_global_variable_number()
n_vars = n_old_vars + n_new_vars
default_vals = [0.0] * n_new_vars
if n_new_vars > 0:
exo.set_global_variable_number(n_vars)
for i, var_name in enumerate(global_vars):
exo.put_global_variable_name(var_name, n_old_vars + i + 1)
#One might wish to put all the values for a given global variable in the
#database at once, but exo.put_global_variable_value() ends up loading
#all the global variables for a given step and then putting them all back
#in, so we might as well just use exo.put_all_global_variable_values().
nSteps = exo.num_times()
for step in xrange(nSteps):
gValues = exo.get_all_global_variable_values(step+1)
if array_type == 'numpy':
gValues = exo.np.append(gValues, default_vals)
else:
gValues.extend( default_vals )
exo.put_all_global_variable_values(step+1, gValues)
if debugPrint:
print("Add Nodal Variables")
n_new_vars = len(nodal_vars)
n_old_vars = exo.get_node_variable_number()
n_vars = n_old_vars + n_new_vars
if n_new_vars > 0:
exo.set_node_variable_number(n_vars)
for i, var_name in enumerate(nodal_vars):
exo.put_node_variable_name(var_name, i + n_old_vars + 1)
if debugPrint:
print("Construct Truth Table for additionalElementVariables")
new_e_var_names = []
new_e_var_blks = []
blk_ids = exo.get_elem_blk_ids()
for item in element_vars:
if type(item) is tuple:
new_e_var_names.append( item[0] )
in_blks = []
for blk_id in item[1]:
if blk_id in blk_ids:
in_blks.append(blk_id)
new_e_var_blks.append( in_blks )
elif type(item) is str:
new_e_var_names.append( item )
new_e_var_blks.append( blk_ids )
else:
print("Warning additionalElementVariable item ", item, " is not right type to add.")
print("should be a string or tuple, skipping")
if debugPrint:
print("Add Element Variables")
n_new_vars = len(new_e_var_names)
n_old_vars = exo.get_element_variable_number()
n_vars = n_old_vars + n_new_vars
if n_new_vars > 0:
exo.set_element_variable_number(n_vars)
old_truth_table = []
if n_old_vars > 0:
old_truth_table = exo.get_element_variable_truth_table()
truth_table = []
n_blks = exo.num_blks()
for j in xrange(n_blks):
for k in xrange(n_old_vars):
ndx = j * n_old_vars + k
truth_table.append( old_truth_table[ndx] )
for m in xrange(n_new_vars):
if blk_ids[j] in new_e_var_blks[m]:
truth_table.append(True)
else:
truth_table.append(False)
exo.set_element_variable_truth_table(truth_table)
for i, var_name in enumerate(new_e_var_names):
exo.put_element_variable_name(var_name, n_old_vars + i + 1)
## TODO: Add Nodeset Variables
## TODO: Add Sideset Variables
return(exo)
# --------------------------------------------------------------------
def copyTransfer(fromFileName, toFileName, array_type = 'ctype', \
additionalGlobalVariables=[], additionalNodalVariables=[], additionalElementVariables=[]):
"""
This function creates an exodus file toFileName and copies everything from exodus file fromFileName returning
a file handle to toFileName.
Additional space is allocated for additionalGlobalVariables, additionalNodalVariables and additionalElementVariables if specified.
additionalGlobalVariables: list of global variable names to add.
additionalNodalVaraibles: list of nodal variable names to add.
additionalElementVariables: should be a list of element variable names to add to all blocks or
tuples ( name, blkIds ) where name is the element variable to add
and blkIds is a list of blkIds to add it to.
Usage:
fromFileName = "input.e"
toFileName = "output.e"
addGlobalVariables = [] ## Do not add any new global variables
addNodeVariables = ["node_dummy1","node_dummy2"] ## Add node_dummy1 and node_dummy2 as new node variables
addElementVariables = [ ("elem_dummy1",[1,2,3]), "elem_dummy2" ] ## Add elem_dummy1 on blkIds 1,2,3 and elem_dummy2 on all blocks
toFileHandle = copyTranfer(fromFileName,toFileName,addGlobalVariables,addNodeVariables,addElementVariables)
## Fill in new variables
toFileHandle.close()
"""
debugPrint = False
exoFrom = exodus(fromFileName,"r", array_type = array_type)
exoTo = copy_mesh(fromFileName, toFileName, exoFromObj = exoFrom, array_type = array_type)
exoTo = transfer_variables(exoFrom, exoTo, \
additionalGlobalVariables = additionalGlobalVariables, \
additionalNodalVariables = additionalNodalVariables, \
additionalElementVariables = additionalElementVariables, \
array_type = array_type)
exoFrom.close()
return exoTo
def ctype_to_numpy(self, c_array):
"""
Converts a c-type array into a numpy array
"""
#ctypes currently produce invalid PEP 3118 type codes, which causes numpy
#to issue a warning. This is a bug and can be ignored.
#http://stackoverflow.com/questions/4964101/pep-3118-warning-when-using-ctypes-array-as-numpy-array
with self.warnings.catch_warnings():
self.warnings.simplefilter('ignore')
np_array = self.np.ctypeslib.as_array(c_array)
return(np_array)
| SalvusHub/salvus | src/py/pysalvus/model_handling/exodus.py | Python | mit | 170,412 | [
"NetCDF"
] | 33ea530cc6442313b89a981b22744166676a9e8663ad35cb944592364800a7a4 |
#!/usr/bin/env python
import os
from ase.io import read
from ase.neb import NEB
from ase.calculators.turbomole import Turbomole
from ase.optimize import BFGS
initial = read('initial.coord')
final = read('final.coord')
os.system('rm -f coord; cp initial.coord coord')
#restart
configs = read('neb.traj@-5:')
band = NEB(configs, climb=True)
#Set calculators
for config in configs:
config.set_calculator(Turbomole())
# Optimize:
relax = BFGS(band, trajectory='neb.traj')
relax.run(fmax=0.05)
| alexei-matveev/ase-local | doc/ase/calculators/turbomole_ex3_restart_diffuse_usingNEB.py | Python | gpl-2.0 | 501 | [
"ASE",
"TURBOMOLE"
] | 69110b7b4e9206392a2a6fe92871c228441702505267e8318b477d2fbdf17f8c |
import psi4
import numpy as np
import memory_profiler as mp
import time
import gc
"""
This is a simple script that verifies several ways of accessing numpy arrays
and ensures that their memory is properly cleaned.
"""
# If its too small, something odd happens with the memory manager
mat_size = 10000
def snapshot_memory():
return mp.memory_usage()[0] * 1048576
def check_leak(func, tol=1.e6):
start = snapshot_memory()
func()
diff = abs(start - snapshot_memory())
# A megabyte is excusable due to various GC funcs
if diff > tol:
raise MemoryError("Function did not correctly clean up")
else:
print("Function %s: PASSED" % func.__name__)
def build_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
return mat
def build_view_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
return mat, view
def build_viewh_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
return mat, view
def build_view_set_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = mat.np
view[:] = 5
return mat, view
def build_arr_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = np.asarray(mat)
return mat, view
def build_copy_mat():
mat = psi4.core.Matrix(mat_size, mat_size)
view = np.array(mat)
return mat, view
if __name__ == "__main__":
# Run the checks
start = snapshot_memory()
check_leak(build_mat)
check_leak(build_view_mat)
check_leak(build_viewh_mat)
check_leak(build_view_set_mat)
check_leak(build_arr_mat)
check_leak(build_copy_mat)
# Double check totals
diff = abs(start - snapshot_memory())
if diff > 1.e6:
raise MemoryError("\nA function leaked %d bytes of memory!" % diff)
else:
print("\nNo leaks detected!")
| jH0ward/psi4 | tests/pytest/test_np_views.py | Python | lgpl-3.0 | 1,834 | [
"Psi4"
] | 8705be81f68a9ac75438bca31d80f0326979cab97b8b66a069cca83d5e349aee |
from __future__ import (absolute_import, division, print_function)
from mantid.api import (DataProcessorAlgorithm, mtd, AlgorithmFactory,
FileProperty, FileAction,
MultipleFileProperty, WorkspaceProperty,
PropertyMode, Progress)
from mantid.simpleapi import (LoadIsawUB, MaskDetectors, ConvertUnits,
CropWorkspace, LoadInstrument,
SetGoniometer, SetUB, ConvertToMD,
MDNormSCD, DivideMD, MinusMD, Load,
DeleteWorkspace, RenameWorkspaces,
CreateSingleValuedWorkspace, LoadNexus,
MultiplyMD, LoadIsawDetCal, LoadMask)
from mantid.geometry import SpaceGroupFactory, SymmetryOperationFactory
from mantid.kernel import VisibleWhenProperty, PropertyCriterion, FloatArrayLengthValidator, FloatArrayProperty, Direction, Property
from mantid import logger
import numpy as np
class SingleCrystalDiffuseReduction(DataProcessorAlgorithm):
temp_workspace_list = ['__sa', '__flux', '__run', '__md', '__data', '__norm',
'__bkg', '__bkg_md', '__bkg_data', '__bkg_norm', '__scaled_background',
'PreprocessedDetectorsWS']
def category(self):
return "Diffraction\\Reduction"
def seeAlso(self):
return [ "ConvertToMD","MDNormSCD" ]
def name(self):
return "SingleCrystalDiffuseReduction"
def summary(self):
return "Single Crystal Diffuse Scattering Reduction, normalisation, symmetry and background substraction"
def PyInit(self):
# files to reduce
self.declareProperty(MultipleFileProperty(name="Filename",
extensions=["_event.nxs", ".nxs.h5", ".nxs"]),
"Files to combine in reduction")
# background
self.declareProperty(FileProperty(name="Background",defaultValue="",action=FileAction.OptionalLoad,
extensions=["_event.nxs", ".nxs.h5", ".nxs"]),
"Background run")
self.declareProperty("BackgroundScale", 1.0,
doc="The background will be scaled by this number before being subtracted.")
# Filter by TOF
self.copyProperties('LoadEventNexus', ['FilterByTofMin', 'FilterByTofMax'])
# Vanadium SA and flux
self.declareProperty(FileProperty(name="SolidAngle",defaultValue="",action=FileAction.Load,
extensions=[".nxs"]),
doc="An input workspace containing momentum integrated vanadium (a measure"
"of the solid angle). See :ref:`MDnormSCD <algm-MDnormSCD>` for details")
self.declareProperty(FileProperty(name="Flux",defaultValue="",action=FileAction.Load,
extensions=[".nxs"]),
"An input workspace containing momentum dependent flux. See :ref:`MDnormSCD <algm-MDnormSCD>` for details")
self.declareProperty('MomentumMin', Property.EMPTY_DBL,
doc="Minimum value in momentum. The max of this value and the flux momentum minimum will be used.")
self.declareProperty('MomentumMax', Property.EMPTY_DBL,
doc="Maximum value in momentum. The min of this value and the flux momentum maximum will be used.")
# UBMatrix
self.declareProperty(FileProperty(name="UBMatrix",defaultValue="",action=FileAction.Load,
extensions=[".mat", ".ub", ".txt"]),
doc="Path to an ISAW-style UB matrix text file. See :ref:`LoadIsawUB <algm-LoadIsawUB>`")
# Goniometer
self.declareProperty('SetGoniometer', False, "Set which Goniometer to use. See :ref:`SetGoniometer <algm-SetGoniometer>`")
condition = VisibleWhenProperty("SetGoniometer", PropertyCriterion.IsNotDefault)
self.copyProperties('SetGoniometer', ['Goniometers', 'Axis0', 'Axis1', 'Axis2'])
self.setPropertySettings("Goniometers", condition)
self.setPropertySettings('Axis0', condition)
self.setPropertySettings('Axis1', condition)
self.setPropertySettings('Axis2', condition)
# Corrections
self.declareProperty(FileProperty(name="LoadInstrument",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".xml"]),
"Load a different instrument IDF onto the data from a file. See :ref:`LoadInstrument <algm-LoadInstrument>`")
self.declareProperty(FileProperty(name="DetCal",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".detcal"]),
"Load an ISAW DetCal calibration onto the data from a file. See :ref:`LoadIsawDetCal <algm-LoadIsawDetCal>`")
self.declareProperty(FileProperty(name="MaskFile",defaultValue="",action=FileAction.OptionalLoad,
extensions=[".xml",".msk"]),
"Masking file for masking. Supported file format is XML and ISIS ASCII. See :ref:`LoadMask <algm-LoadMask>`")
# SymmetryOps, name, group unmber or list symmetries
self.declareProperty("SymmetryOps", "",
"If specified the symmetry will be applied, can be space group name or number, or list individual symmetries.")
# Binning output
self.copyProperties('ConvertToMD', ['Uproj', 'Vproj', 'Wproj'])
self.declareProperty(FloatArrayProperty("BinningDim0", [-5.05,5.05,101], FloatArrayLengthValidator(3), direction=Direction.Input),
"Binning parameters for the 0th dimension. Enter it as a"
"comma-separated list of values with the"
"format: 'minimum,maximum,number_of_bins'.")
self.declareProperty(FloatArrayProperty("BinningDim1", [-5.05,5.05,101], FloatArrayLengthValidator(3), direction=Direction.Input),
"Binning parameters for the 1st dimension. Enter it as a"
"comma-separated list of values with the"
"format: 'minimum,maximum,number_of_bins'.")
self.declareProperty(FloatArrayProperty("BinningDim2", [-5.05,5.05,101], FloatArrayLengthValidator(3), direction=Direction.Input),
"Binning parameters for the 2nd dimension. Enter it as a"
"comma-separated list of values with the"
"format: 'minimum,maximum,number_of_bins'.")
self.declareProperty('KeepTemporaryWorkspaces', False,
"If True the normalization and data workspaces in addition to the normalized data will be outputted")
self.declareProperty(WorkspaceProperty("OutputWorkspace", "",
optional=PropertyMode.Mandatory,
direction=Direction.Output),
"Output Workspace. If background is subtracted _data and _background workspaces will also be made.")
# Background
self.setPropertyGroup("Background","Background")
self.setPropertyGroup("BackgroundScale","Background")
# Vanadium
self.setPropertyGroup("SolidAngle","Vanadium")
self.setPropertyGroup("Flux","Vanadium")
self.setPropertyGroup("MomentumMin","Vanadium")
self.setPropertyGroup("MomentumMax","Vanadium")
# Goniometer
self.setPropertyGroup("SetGoniometer","Goniometer")
self.setPropertyGroup("Goniometers","Goniometer")
self.setPropertyGroup("Axis0","Goniometer")
self.setPropertyGroup("Axis1","Goniometer")
self.setPropertyGroup("Axis2","Goniometer")
# Corrections
self.setPropertyGroup("LoadInstrument","Corrections")
self.setPropertyGroup("DetCal","Corrections")
self.setPropertyGroup("MaskFile","Corrections")
# Projection and binning
self.setPropertyGroup("Uproj","Projection and binning")
self.setPropertyGroup("Vproj","Projection and binning")
self.setPropertyGroup("Wproj","Projection and binning")
self.setPropertyGroup("BinningDim0","Projection and binning")
self.setPropertyGroup("BinningDim1","Projection and binning")
self.setPropertyGroup("BinningDim2","Projection and binning")
def validateInputs(self):
issues = dict()
if self.getProperty("SymmetryOps").value:
syms=self.getProperty("SymmetryOps").value
try:
if not SpaceGroupFactory.isSubscribedNumber(int(syms)):
issues["SymmetryOps"] = 'Space group number '+syms+' is not valid'
except ValueError:
if not SpaceGroupFactory.isSubscribedSymbol(syms):
for sym in syms.split(';'):
if not SymmetryOperationFactory.exists(sym):
issues["SymmetryOps"] = sym+' is not valid symmetry or space group name'
return issues
def PyExec(self):
# remove possible old temp workspaces
[DeleteWorkspace(ws) for ws in self.temp_workspace_list if mtd.doesExist(ws)]
_background = bool(self.getProperty("Background").value)
_load_inst = bool(self.getProperty("LoadInstrument").value)
_detcal = bool(self.getProperty("DetCal").value)
_masking = bool(self.getProperty("MaskFile").value)
_outWS_name = self.getPropertyValue("OutputWorkspace")
UBList = self._generate_UBList()
dim0_min, dim0_max, dim0_bins = self.getProperty('BinningDim0').value
dim1_min, dim1_max, dim1_bins = self.getProperty('BinningDim1').value
dim2_min, dim2_max, dim2_bins = self.getProperty('BinningDim2').value
MinValues="{},{},{}".format(dim0_min,dim1_min,dim2_min)
MaxValues="{},{},{}".format(dim0_max,dim1_max,dim2_max)
AlignedDim0=",{},{},{}".format(dim0_min, dim0_max, int(dim0_bins))
AlignedDim1=",{},{},{}".format(dim1_min, dim1_max, int(dim1_bins))
AlignedDim2=",{},{},{}".format(dim2_min, dim2_max, int(dim2_bins))
LoadNexus(Filename=self.getProperty("SolidAngle").value, OutputWorkspace='__sa')
LoadNexus(Filename=self.getProperty("Flux").value, OutputWorkspace='__flux')
if _masking:
LoadMask(Instrument=mtd['__sa'].getInstrument().getName(),
InputFile=self.getProperty("MaskFile").value,
OutputWorkspace='__mask')
MaskDetectors(Workspace='__sa',MaskedWorkspace='__mask')
DeleteWorkspace('__mask')
XMin = mtd['__sa'].getXDimension().getMinimum()
XMax = mtd['__sa'].getXDimension().getMaximum()
newXMin = self.getProperty("MomentumMin").value
newXMax = self.getProperty("MomentumMax").value
if newXMin != Property.EMPTY_DBL or newXMax != Property.EMPTY_DBL:
if newXMin != Property.EMPTY_DBL:
XMin = max(XMin, newXMin)
if newXMax != Property.EMPTY_DBL:
XMax = min(XMax, newXMax)
logger.notice("Using momentum range {} to {} A^-1".format(XMin, XMax))
CropWorkspace(InputWorkspace='__flux',OutputWorkspace='__flux',XMin=XMin,XMax=XMax)
for spectrumNumber in range(mtd['__flux'].getNumberHistograms()):
Y = mtd['__flux'].readY(spectrumNumber)
mtd['__flux'].setY(spectrumNumber,(Y-Y.min())/(Y.max()-Y.min()))
if _background:
Load(Filename=self.getProperty("Background").value,
OutputWorkspace='__bkg',
FilterByTofMin=self.getProperty("FilterByTofMin").value,
FilterByTofMax=self.getProperty("FilterByTofMax").value)
if _load_inst:
LoadInstrument(Workspace='__bkg', Filename=self.getProperty("LoadInstrument").value, RewriteSpectraMap=False)
if _detcal:
LoadIsawDetCal(InputWorkspace='__bkg', Filename=self.getProperty("DetCal").value)
MaskDetectors(Workspace='__bkg',MaskedWorkspace='__sa')
ConvertUnits(InputWorkspace='__bkg',OutputWorkspace='__bkg',Target='Momentum')
CropWorkspace(InputWorkspace='__bkg',OutputWorkspace='__bkg',XMin=XMin,XMax=XMax)
progress = Progress(self, 0.0, 1.0, len(UBList)*len(self.getProperty("Filename").value))
for run in self.getProperty("Filename").value:
logger.notice("Working on " + run)
Load(Filename=run,
OutputWorkspace='__run',
FilterByTofMin=self.getProperty("FilterByTofMin").value,
FilterByTofMax=self.getProperty("FilterByTofMax").value)
if _load_inst:
LoadInstrument(Workspace='__run', Filename=self.getProperty("LoadInstrument").value, RewriteSpectraMap=False)
if _detcal:
LoadIsawDetCal(InputWorkspace='__run', Filename=self.getProperty("DetCal").value)
MaskDetectors(Workspace='__run',MaskedWorkspace='__sa')
ConvertUnits(InputWorkspace='__run',OutputWorkspace='__run',Target='Momentum')
CropWorkspace(InputWorkspace='__run',OutputWorkspace='__run',XMin=XMin,XMax=XMax)
if self.getProperty('SetGoniometer').value:
SetGoniometer(Workspace='__run',
Goniometers=self.getProperty('Goniometers').value,
Axis0=self.getProperty('Axis0').value,
Axis1=self.getProperty('Axis1').value,
Axis2=self.getProperty('Axis2').value)
# Set background Goniometer to be the same as data
if _background:
mtd['__bkg'].run().getGoniometer().setR(mtd['__run'].run().getGoniometer().getR())
for ub in UBList:
SetUB(Workspace='__run', UB=ub)
ConvertToMD(InputWorkspace='__run',
OutputWorkspace='__md',
QDimensions='Q3D',
dEAnalysisMode='Elastic',
Q3DFrames='HKL',
QConversionScales='HKL',
Uproj=self.getProperty('Uproj').value,
Vproj=self.getProperty('Vproj').value,
Wproj=self.getProperty('wproj').value,
MinValues=MinValues,
MaxValues=MaxValues)
MDNormSCD(InputWorkspace=mtd['__md'],
FluxWorkspace='__flux',
SolidAngleWorkspace='__sa',
OutputWorkspace='__data',
SkipSafetyCheck=True,
TemporaryDataWorkspace='__data' if mtd.doesExist('__data') else None,
OutputNormalizationWorkspace='__norm',
TemporaryNormalizationWorkspace='__norm' if mtd.doesExist('__norm') else None,
AlignedDim0=mtd['__md'].getDimension(0).name+AlignedDim0,
AlignedDim1=mtd['__md'].getDimension(1).name+AlignedDim1,
AlignedDim2=mtd['__md'].getDimension(2).name+AlignedDim2)
DeleteWorkspace('__md')
if _background:
SetUB(Workspace='__bkg', UB=ub)
ConvertToMD(InputWorkspace='__bkg',
OutputWorkspace='__bkg_md',
QDimensions='Q3D',
dEAnalysisMode='Elastic',
Q3DFrames='HKL',
QConversionScales='HKL',
Uproj=self.getProperty('Uproj').value,
Vproj=self.getProperty('Vproj').value,
Wproj=self.getProperty('Wproj').value,
MinValues=MinValues,
MaxValues=MaxValues)
MDNormSCD(InputWorkspace='__bkg_md',
FluxWorkspace='__flux',
SolidAngleWorkspace='__sa',
SkipSafetyCheck=True,
OutputWorkspace='__bkg_data',
TemporaryDataWorkspace='__bkg_data' if mtd.doesExist('__bkg_data') else None,
OutputNormalizationWorkspace='__bkg_norm',
TemporaryNormalizationWorkspace='__bkg_norm' if mtd.doesExist('__bkg_norm') else None,
AlignedDim0=mtd['__bkg_md'].getDimension(0).name+AlignedDim0,
AlignedDim1=mtd['__bkg_md'].getDimension(1).name+AlignedDim1,
AlignedDim2=mtd['__bkg_md'].getDimension(2).name+AlignedDim2)
DeleteWorkspace('__bkg_md')
progress.report()
DeleteWorkspace('__run')
if _background:
# outWS = data / norm - bkg_data / bkg_norm * BackgroundScale
DivideMD(LHSWorkspace='__data',RHSWorkspace='__norm',OutputWorkspace=_outWS_name+'_normalizedData')
DivideMD(LHSWorkspace='__bkg_data',RHSWorkspace='__bkg_norm',OutputWorkspace=_outWS_name+'_normalizedBackground')
CreateSingleValuedWorkspace(OutputWorkspace='__scale', DataValue=self.getProperty('BackgroundScale').value)
MultiplyMD(LHSWorkspace=_outWS_name+'_normalizedBackground',
RHSWorkspace='__scale',
OutputWorkspace='__scaled_background')
DeleteWorkspace('__scale')
MinusMD(LHSWorkspace=_outWS_name+'_normalizedData',RHSWorkspace='__scaled_background',OutputWorkspace=_outWS_name)
if self.getProperty('KeepTemporaryWorkspaces').value:
RenameWorkspaces(InputWorkspaces=['__data','__norm','__bkg_data','__bkg_norm'],
WorkspaceNames=[_outWS_name+'_data', _outWS_name+'_normalization',
_outWS_name+'_background_data',_outWS_name+'_background_normalization'])
else:
# outWS = data / norm
DivideMD(LHSWorkspace='__data',RHSWorkspace='__norm',OutputWorkspace=_outWS_name)
if self.getProperty('KeepTemporaryWorkspaces').value:
RenameWorkspaces(InputWorkspaces=['__data','__norm'],
WorkspaceNames=[_outWS_name+'_data', _outWS_name+'_normalization'])
self.setProperty("OutputWorkspace", mtd[_outWS_name])
# remove temp workspaces
[DeleteWorkspace(ws) for ws in self.temp_workspace_list if mtd.doesExist(ws)]
def _generate_UBList(self):
CreateSingleValuedWorkspace(OutputWorkspace='__ub')
LoadIsawUB('__ub',self.getProperty("UBMatrix").value)
ub=mtd['__ub'].sample().getOrientedLattice().getUB().copy()
DeleteWorkspace(Workspace='__ub')
symOps = self.getProperty("SymmetryOps").value
if symOps:
try:
symOps = SpaceGroupFactory.subscribedSpaceGroupSymbols(int(symOps))[0]
except ValueError:
pass
if SpaceGroupFactory.isSubscribedSymbol(symOps):
symOps = SpaceGroupFactory.createSpaceGroup(symOps).getSymmetryOperations()
else:
symOps = SymmetryOperationFactory.createSymOps(symOps)
logger.information('Using symmetries: '+str([sym.getIdentifier() for sym in symOps]))
ub_list=[]
for sym in symOps:
UBtrans = np.zeros((3,3))
UBtrans[0] = sym.transformHKL([1,0,0])
UBtrans[1] = sym.transformHKL([0,1,0])
UBtrans[2] = sym.transformHKL([0,0,1])
UBtrans=np.matrix(UBtrans.T)
ub_list.append(ub*UBtrans)
return ub_list
else:
return [ub]
AlgorithmFactory.subscribe(SingleCrystalDiffuseReduction)
| ScreamingUdder/mantid | Framework/PythonInterface/plugins/algorithms/WorkflowAlgorithms/SingleCrystalDiffuseReduction.py | Python | gpl-3.0 | 20,442 | [
"CRYSTAL"
] | cdd1c0b57eae1f12f77c0e3ee9076df7149eb419ae3a1d812edfe0a3f12dfe68 |
# coding=utf-8
# (The line above is necessary so that I can use 世界 in the
# *comment* below without Python getting all bent out of shape.)
# Copyright 2007-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Mercurial interface to codereview.appspot.com.
To configure, set the following options in
your repository's .hg/hgrc file.
[extensions]
codereview = /path/to/codereview.py
[codereview]
server = codereview.appspot.com
The server should be running Rietveld; see http://code.google.com/p/rietveld/.
In addition to the new commands, this extension introduces
the file pattern syntax @nnnnnn, where nnnnnn is a change list
number, to mean the files included in that change list, which
must be associated with the current client.
For example, if change 123456 contains the files x.go and y.go,
"hg diff @123456" is equivalent to"hg diff x.go y.go".
'''
import sys
if __name__ == "__main__":
print >>sys.stderr, "This is a Mercurial extension and should not be invoked directly."
sys.exit(2)
# We require Python 2.6 for the json package.
if sys.version < '2.6':
print >>sys.stderr, "The codereview extension requires Python 2.6 or newer."
print >>sys.stderr, "You are running Python " + sys.version
sys.exit(2)
import json
import os
import re
import stat
import subprocess
import threading
import time
from mercurial import commands as hg_commands
from mercurial import util as hg_util
# bind Plan 9 preferred dotfile location
if os.sys.platform == 'plan9':
try:
import plan9
n = plan9.bind(os.path.expanduser("~/lib"), os.path.expanduser("~"), plan9.MBEFORE|plan9.MCREATE)
except ImportError:
pass
defaultcc = None
codereview_disabled = None
real_rollback = None
releaseBranch = None
server = "codereview.appspot.com"
server_url_base = None
testing = None
#######################################################################
# Normally I would split this into multiple files, but it simplifies
# import path headaches to keep it all in one file. Sorry.
# The different parts of the file are separated by banners like this one.
#######################################################################
# Helpers
def RelativePath(path, cwd):
n = len(cwd)
if path.startswith(cwd) and path[n] == '/':
return path[n+1:]
return path
def Sub(l1, l2):
return [l for l in l1 if l not in l2]
def Add(l1, l2):
l = l1 + Sub(l2, l1)
l.sort()
return l
def Intersect(l1, l2):
return [l for l in l1 if l in l2]
#######################################################################
# RE: UNICODE STRING HANDLING
#
# Python distinguishes between the str (string of bytes)
# and unicode (string of code points) types. Most operations
# work on either one just fine, but some (like regexp matching)
# require unicode, and others (like write) require str.
#
# As befits the language, Python hides the distinction between
# unicode and str by converting between them silently, but
# *only* if all the bytes/code points involved are 7-bit ASCII.
# This means that if you're not careful, your program works
# fine on "hello, world" and fails on "hello, 世界". And of course,
# the obvious way to be careful - use static types - is unavailable.
# So the only way is trial and error to find where to put explicit
# conversions.
#
# Because more functions do implicit conversion to str (string of bytes)
# than do implicit conversion to unicode (string of code points),
# the convention in this module is to represent all text as str,
# converting to unicode only when calling a unicode-only function
# and then converting back to str as soon as possible.
def typecheck(s, t):
if type(s) != t:
raise hg_util.Abort("type check failed: %s has type %s != %s" % (repr(s), type(s), t))
# If we have to pass unicode instead of str, ustr does that conversion clearly.
def ustr(s):
typecheck(s, str)
return s.decode("utf-8")
# Even with those, Mercurial still sometimes turns unicode into str
# and then tries to use it as ascii. Change Mercurial's default.
def set_mercurial_encoding_to_utf8():
from mercurial import encoding
encoding.encoding = 'utf-8'
set_mercurial_encoding_to_utf8()
# Even with those we still run into problems.
# I tried to do things by the book but could not convince
# Mercurial to let me check in a change with UTF-8 in the
# CL description or author field, no matter how many conversions
# between str and unicode I inserted and despite changing the
# default encoding. I'm tired of this game, so set the default
# encoding for all of Python to 'utf-8', not 'ascii'.
def default_to_utf8():
import sys
stdout, __stdout__ = sys.stdout, sys.__stdout__
reload(sys) # site.py deleted setdefaultencoding; get it back
sys.stdout, sys.__stdout__ = stdout, __stdout__
sys.setdefaultencoding('utf-8')
default_to_utf8()
#######################################################################
# Status printer for long-running commands
global_status = None
def set_status(s):
if verbosity > 0:
print >>sys.stderr, time.asctime(), s
global global_status
global_status = s
class StatusThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
# pause a reasonable amount of time before
# starting to display status messages, so that
# most hg commands won't ever see them.
time.sleep(30)
# now show status every 15 seconds
while True:
time.sleep(15 - time.time() % 15)
s = global_status
if s is None:
continue
if s == "":
s = "(unknown status)"
print >>sys.stderr, time.asctime(), s
def start_status_thread():
t = StatusThread()
t.setDaemon(True) # allowed to exit if t is still running
t.start()
#######################################################################
# Change list parsing.
#
# Change lists are stored in .hg/codereview/cl.nnnnnn
# where nnnnnn is the number assigned by the code review server.
# Most data about a change list is stored on the code review server
# too: the description, reviewer, and cc list are all stored there.
# The only thing in the cl.nnnnnn file is the list of relevant files.
# Also, the existence of the cl.nnnnnn file marks this repository
# as the one where the change list lives.
emptydiff = """Index: ~rietveld~placeholder~
===================================================================
diff --git a/~rietveld~placeholder~ b/~rietveld~placeholder~
new file mode 100644
"""
class CL(object):
def __init__(self, name):
typecheck(name, str)
self.name = name
self.desc = ''
self.files = []
self.reviewer = []
self.cc = []
self.url = ''
self.local = False
self.web = False
self.copied_from = None # None means current user
self.mailed = False
self.private = False
self.lgtm = []
def DiskText(self):
cl = self
s = ""
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n\n"
if cl.private:
s += "Private: " + str(self.private) + "\n"
s += "Mailed: " + str(self.mailed) + "\n"
s += "Description:\n"
s += Indent(cl.desc, "\t")
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
typecheck(s, str)
return s
def EditorText(self):
cl = self
s = _change_prolog
s += "\n"
if cl.copied_from:
s += "Author: " + cl.copied_from + "\n"
if cl.url != '':
s += 'URL: ' + cl.url + ' # cannot edit\n\n'
if cl.private:
s += "Private: True\n"
s += "Reviewer: " + JoinComma(cl.reviewer) + "\n"
s += "CC: " + JoinComma(cl.cc) + "\n"
s += "\n"
s += "Description:\n"
if cl.desc == '':
s += "\t<enter description here>\n"
else:
s += Indent(cl.desc, "\t")
s += "\n"
if cl.local or cl.name == "new":
s += "Files:\n"
for f in cl.files:
s += "\t" + f + "\n"
s += "\n"
typecheck(s, str)
return s
def PendingText(self, quick=False):
cl = self
s = cl.name + ":" + "\n"
s += Indent(cl.desc, "\t")
s += "\n"
if cl.copied_from:
s += "\tAuthor: " + cl.copied_from + "\n"
if not quick:
s += "\tReviewer: " + JoinComma(cl.reviewer) + "\n"
for (who, line, _) in cl.lgtm:
s += "\t\t" + who + ": " + line + "\n"
s += "\tCC: " + JoinComma(cl.cc) + "\n"
s += "\tFiles:\n"
for f in cl.files:
s += "\t\t" + f + "\n"
typecheck(s, str)
return s
def Flush(self, ui, repo):
if self.name == "new":
self.Upload(ui, repo, gofmt_just_warn=True, creating=True)
dir = CodeReviewDir(ui, repo)
path = dir + '/cl.' + self.name
f = open(path+'!', "w")
f.write(self.DiskText())
f.close()
if sys.platform == "win32" and os.path.isfile(path):
os.remove(path)
os.rename(path+'!', path)
if self.web and not self.copied_from:
EditDesc(self.name, desc=self.desc,
reviewers=JoinComma(self.reviewer), cc=JoinComma(self.cc),
private=self.private)
def Delete(self, ui, repo):
dir = CodeReviewDir(ui, repo)
os.unlink(dir + "/cl." + self.name)
def Subject(self, ui, repo):
s = line1(self.desc)
if len(s) > 60:
s = s[0:55] + "..."
if self.name != "new":
s = "code review %s: %s" % (self.name, s)
typecheck(s, str)
return branch_prefix(ui, repo) + s
def Upload(self, ui, repo, send_mail=False, gofmt=True, gofmt_just_warn=False, creating=False, quiet=False):
if not self.files and not creating:
ui.warn("no files in change list\n")
if ui.configbool("codereview", "force_gofmt", True) and gofmt:
CheckFormat(ui, repo, self.files, just_warn=gofmt_just_warn)
set_status("uploading CL metadata + diffs")
os.chdir(repo.root)
form_fields = [
("content_upload", "1"),
("reviewers", JoinComma(self.reviewer)),
("cc", JoinComma(self.cc)),
("description", self.desc),
("base_hashes", ""),
]
if self.name != "new":
form_fields.append(("issue", self.name))
vcs = None
# We do not include files when creating the issue,
# because we want the patch sets to record the repository
# and base revision they are diffs against. We use the patch
# set message for that purpose, but there is no message with
# the first patch set. Instead the message gets used as the
# new CL's overall subject. So omit the diffs when creating
# and then we'll run an immediate upload.
# This has the effect that every CL begins with an empty "Patch set 1".
if self.files and not creating:
vcs = MercurialVCS(upload_options, ui, repo)
data = vcs.GenerateDiff(self.files)
files = vcs.GetBaseFiles(data)
if len(data) > MAX_UPLOAD_SIZE:
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
else:
uploaded_diff_file = [("data", "data.diff", emptydiff)]
if vcs and self.name != "new":
form_fields.append(("subject", "diff -r " + vcs.base_rev + " " + ui.expandpath("default")))
else:
# First upload sets the subject for the CL itself.
form_fields.append(("subject", self.Subject(ui, repo)))
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = MySend("/upload", body, content_type=ctype)
patchset = None
msg = response_body
lines = msg.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
print >>sys.stderr, "Server says there is nothing to upload (probably wrong):\n" + msg
if response_body.startswith("Issue updated.") and quiet:
pass
else:
ui.status(msg + "\n")
set_status("uploaded CL metadata + diffs")
if not response_body.startswith("Issue created.") and not response_body.startswith("Issue updated."):
raise hg_util.Abort("failed to update issue: " + response_body)
issue = msg[msg.rfind("/")+1:]
self.name = issue
if not self.url:
self.url = server_url_base + self.name
if not uploaded_diff_file:
set_status("uploading patches")
patches = UploadSeparatePatches(issue, rpc, patchset, data, upload_options)
if vcs:
set_status("uploading base files")
vcs.UploadBaseFiles(issue, rpc, patches, patchset, upload_options, files)
if patchset != "1":
MySend("/" + issue + "/upload_complete/" + patchset, payload="")
if send_mail:
set_status("sending mail")
MySend("/" + issue + "/mail", payload="")
self.web = True
set_status("flushing changes to disk")
self.Flush(ui, repo)
return
def Mail(self, ui, repo):
pmsg = "Hello " + JoinComma(self.reviewer)
if self.cc:
pmsg += " (cc: %s)" % (', '.join(self.cc),)
pmsg += ",\n"
pmsg += "\n"
repourl = ui.expandpath("default")
if not self.mailed:
pmsg += "I'd like you to review this change to"
branch = repo[None].branch()
if branch.startswith("dev."):
pmsg += " the " + branch + " branch of"
pmsg += "\n" + repourl + "\n"
else:
pmsg += "Please take another look.\n"
typecheck(pmsg, str)
PostMessage(ui, self.name, pmsg, subject=self.Subject(ui, repo))
self.mailed = True
self.Flush(ui, repo)
def GoodCLName(name):
typecheck(name, str)
return re.match("^[0-9]+$", name)
def ParseCL(text, name):
typecheck(text, str)
typecheck(name, str)
sname = None
lineno = 0
sections = {
'Author': '',
'Description': '',
'Files': '',
'URL': '',
'Reviewer': '',
'CC': '',
'Mailed': '',
'Private': '',
}
for line in text.split('\n'):
lineno += 1
line = line.rstrip()
if line != '' and line[0] == '#':
continue
if line == '' or line[0] == ' ' or line[0] == '\t':
if sname == None and line != '':
return None, lineno, 'text outside section'
if sname != None:
sections[sname] += line + '\n'
continue
p = line.find(':')
if p >= 0:
s, val = line[:p].strip(), line[p+1:].strip()
if s in sections:
sname = s
if val != '':
sections[sname] += val + '\n'
continue
return None, lineno, 'malformed section header'
for k in sections:
sections[k] = StripCommon(sections[k]).rstrip()
cl = CL(name)
if sections['Author']:
cl.copied_from = sections['Author']
cl.desc = sections['Description']
for line in sections['Files'].split('\n'):
i = line.find('#')
if i >= 0:
line = line[0:i].rstrip()
line = line.strip()
if line == '':
continue
cl.files.append(line)
cl.reviewer = SplitCommaSpace(sections['Reviewer'])
cl.cc = SplitCommaSpace(sections['CC'])
cl.url = sections['URL']
if sections['Mailed'] != 'False':
# Odd default, but avoids spurious mailings when
# reading old CLs that do not have a Mailed: line.
# CLs created with this update will always have
# Mailed: False on disk.
cl.mailed = True
if sections['Private'] in ('True', 'true', 'Yes', 'yes'):
cl.private = True
if cl.desc == '<enter description here>':
cl.desc = ''
return cl, 0, ''
def SplitCommaSpace(s):
typecheck(s, str)
s = s.strip()
if s == "":
return []
return re.split(", *", s)
def CutDomain(s):
typecheck(s, str)
i = s.find('@')
if i >= 0:
s = s[0:i]
return s
def JoinComma(l):
seen = {}
uniq = []
for s in l:
typecheck(s, str)
if s not in seen:
seen[s] = True
uniq.append(s)
return ", ".join(uniq)
def ExceptionDetail():
s = str(sys.exc_info()[0])
if s.startswith("<type '") and s.endswith("'>"):
s = s[7:-2]
elif s.startswith("<class '") and s.endswith("'>"):
s = s[8:-2]
arg = str(sys.exc_info()[1])
if len(arg) > 0:
s += ": " + arg
return s
def IsLocalCL(ui, repo, name):
return GoodCLName(name) and os.access(CodeReviewDir(ui, repo) + "/cl." + name, 0)
# Load CL from disk and/or the web.
def LoadCL(ui, repo, name, web=True):
typecheck(name, str)
set_status("loading CL " + name)
if not GoodCLName(name):
return None, "invalid CL name"
dir = CodeReviewDir(ui, repo)
path = dir + "cl." + name
if os.access(path, 0):
ff = open(path)
text = ff.read()
ff.close()
cl, lineno, err = ParseCL(text, name)
if err != "":
return None, "malformed CL data: "+err
cl.local = True
else:
cl = CL(name)
if web:
set_status("getting issue metadata from web")
d = JSONGet(ui, "/api/" + name + "?messages=true")
set_status(None)
if d is None:
return None, "cannot load CL %s from server" % (name,)
if 'owner_email' not in d or 'issue' not in d or str(d['issue']) != name:
return None, "malformed response loading CL data from code review server"
cl.dict = d
cl.reviewer = d.get('reviewers', [])
cl.cc = d.get('cc', [])
if cl.local and cl.copied_from and cl.desc:
# local copy of CL written by someone else
# and we saved a description. use that one,
# so that committers can edit the description
# before doing hg submit.
pass
else:
cl.desc = d.get('description', "")
cl.url = server_url_base + name
cl.web = True
cl.private = d.get('private', False) != False
cl.lgtm = []
for m in d.get('messages', []):
if m.get('approval', False) == True or m.get('disapproval', False) == True:
who = re.sub('@.*', '', m.get('sender', ''))
text = re.sub("\n(.|\n)*", '', m.get('text', ''))
cl.lgtm.append((who, text, m.get('approval', False)))
set_status("loaded CL " + name)
return cl, ''
class LoadCLThread(threading.Thread):
def __init__(self, ui, repo, dir, f, web):
threading.Thread.__init__(self)
self.ui = ui
self.repo = repo
self.dir = dir
self.f = f
self.web = web
self.cl = None
def run(self):
cl, err = LoadCL(self.ui, self.repo, self.f[3:], web=self.web)
if err != '':
self.ui.warn("loading "+self.dir+self.f+": " + err + "\n")
return
self.cl = cl
# Load all the CLs from this repository.
def LoadAllCL(ui, repo, web=True):
dir = CodeReviewDir(ui, repo)
m = {}
files = [f for f in os.listdir(dir) if f.startswith('cl.')]
if not files:
return m
active = []
first = True
for f in files:
t = LoadCLThread(ui, repo, dir, f, web)
t.start()
if web and first:
# first request: wait in case it needs to authenticate
# otherwise we get lots of user/password prompts
# running in parallel.
t.join()
if t.cl:
m[t.cl.name] = t.cl
first = False
else:
active.append(t)
for t in active:
t.join()
if t.cl:
m[t.cl.name] = t.cl
return m
# Find repository root. On error, ui.warn and return None
def RepoDir(ui, repo):
url = repo.url();
if not url.startswith('file:'):
ui.warn("repository %s is not in local file system\n" % (url,))
return None
url = url[5:]
if url.endswith('/'):
url = url[:-1]
typecheck(url, str)
return url
# Find (or make) code review directory. On error, ui.warn and return None
def CodeReviewDir(ui, repo):
dir = RepoDir(ui, repo)
if dir == None:
return None
dir += '/.hg/codereview/'
if not os.path.isdir(dir):
try:
os.mkdir(dir, 0700)
except:
ui.warn('cannot mkdir %s: %s\n' % (dir, ExceptionDetail()))
return None
typecheck(dir, str)
return dir
# Turn leading tabs into spaces, so that the common white space
# prefix doesn't get confused when people's editors write out
# some lines with spaces, some with tabs. Only a heuristic
# (some editors don't use 8 spaces either) but a useful one.
def TabsToSpaces(line):
i = 0
while i < len(line) and line[i] == '\t':
i += 1
return ' '*(8*i) + line[i:]
# Strip maximal common leading white space prefix from text
def StripCommon(text):
typecheck(text, str)
ws = None
for line in text.split('\n'):
line = line.rstrip()
if line == '':
continue
line = TabsToSpaces(line)
white = line[:len(line)-len(line.lstrip())]
if ws == None:
ws = white
else:
common = ''
for i in range(min(len(white), len(ws))+1):
if white[0:i] == ws[0:i]:
common = white[0:i]
ws = common
if ws == '':
break
if ws == None:
return text
t = ''
for line in text.split('\n'):
line = line.rstrip()
line = TabsToSpaces(line)
if line.startswith(ws):
line = line[len(ws):]
if line == '' and t == '':
continue
t += line + '\n'
while len(t) >= 2 and t[-2:] == '\n\n':
t = t[:-1]
typecheck(t, str)
return t
# Indent text with indent.
def Indent(text, indent):
typecheck(text, str)
typecheck(indent, str)
t = ''
for line in text.split('\n'):
t += indent + line + '\n'
typecheck(t, str)
return t
# Return the first line of l
def line1(text):
typecheck(text, str)
return text.split('\n')[0]
_change_prolog = """# Change list.
# Lines beginning with # are ignored.
# Multi-line values should be indented.
"""
desc_re = '^(.+: |(tag )?(release|weekly)\.|fix build|undo CL)'
desc_msg = '''Your CL description appears not to use the standard form.
The first line of your change description is conventionally a
one-line summary of the change, prefixed by the primary affected package,
and is used as the subject for code review mail; the rest of the description
elaborates.
Examples:
encoding/rot13: new package
math: add IsInf, IsNaN
net: fix cname in LookupHost
unicode: update to Unicode 5.0.2
'''
def promptyesno(ui, msg):
if hgversion >= "2.7":
return ui.promptchoice(msg + " $$ &yes $$ &no", 0) == 0
else:
return ui.promptchoice(msg, ["&yes", "&no"], 0) == 0
def promptremove(ui, repo, f):
if promptyesno(ui, "hg remove %s (y/n)?" % (f,)):
if hg_commands.remove(ui, repo, 'path:'+f) != 0:
ui.warn("error removing %s" % (f,))
def promptadd(ui, repo, f):
if promptyesno(ui, "hg add %s (y/n)?" % (f,)):
if hg_commands.add(ui, repo, 'path:'+f) != 0:
ui.warn("error adding %s" % (f,))
def EditCL(ui, repo, cl):
set_status(None) # do not show status
s = cl.EditorText()
while True:
s = ui.edit(s, ui.username())
# We can't trust Mercurial + Python not to die before making the change,
# so, by popular demand, just scribble the most recent CL edit into
# $(hg root)/last-change so that if Mercurial does die, people
# can look there for their work.
try:
f = open(repo.root+"/last-change", "w")
f.write(s)
f.close()
except:
pass
clx, line, err = ParseCL(s, cl.name)
if err != '':
if not promptyesno(ui, "error parsing change list: line %d: %s\nre-edit (y/n)?" % (line, err)):
return "change list not modified"
continue
# Check description.
if clx.desc == '':
if promptyesno(ui, "change list should have a description\nre-edit (y/n)?"):
continue
elif re.search('<enter reason for undo>', clx.desc):
if promptyesno(ui, "change list description omits reason for undo\nre-edit (y/n)?"):
continue
elif not re.match(desc_re, clx.desc.split('\n')[0]):
if promptyesno(ui, desc_msg + "re-edit (y/n)?"):
continue
# Check file list for files that need to be hg added or hg removed
# or simply aren't understood.
pats = ['path:'+f for f in clx.files]
changed = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
deleted = hg_matchPattern(ui, repo, *pats, deleted=True)
unknown = hg_matchPattern(ui, repo, *pats, unknown=True)
ignored = hg_matchPattern(ui, repo, *pats, ignored=True)
clean = hg_matchPattern(ui, repo, *pats, clean=True)
files = []
for f in clx.files:
if f in changed:
files.append(f)
continue
if f in deleted:
promptremove(ui, repo, f)
files.append(f)
continue
if f in unknown:
promptadd(ui, repo, f)
files.append(f)
continue
if f in ignored:
ui.warn("error: %s is excluded by .hgignore; omitting\n" % (f,))
continue
if f in clean:
ui.warn("warning: %s is listed in the CL but unchanged\n" % (f,))
files.append(f)
continue
p = repo.root + '/' + f
if os.path.isfile(p):
ui.warn("warning: %s is a file but not known to hg\n" % (f,))
files.append(f)
continue
if os.path.isdir(p):
ui.warn("error: %s is a directory, not a file; omitting\n" % (f,))
continue
ui.warn("error: %s does not exist; omitting\n" % (f,))
clx.files = files
cl.desc = clx.desc
cl.reviewer = clx.reviewer
cl.cc = clx.cc
cl.files = clx.files
cl.private = clx.private
break
return ""
# For use by submit, etc. (NOT by change)
# Get change list number or list of files from command line.
# If files are given, make a new change list.
def CommandLineCL(ui, repo, pats, opts, op="verb", defaultcc=None):
if len(pats) > 0 and GoodCLName(pats[0]):
if len(pats) != 1:
return None, "cannot specify change number and file names"
if opts.get('message'):
return None, "cannot use -m with existing CL"
cl, err = LoadCL(ui, repo, pats[0], web=True)
if err != "":
return None, err
else:
cl = CL("new")
cl.local = True
cl.files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if not cl.files:
return None, "no files changed (use hg %s <number> to use existing CL)" % op
if opts.get('reviewer'):
cl.reviewer = Add(cl.reviewer, SplitCommaSpace(opts.get('reviewer')))
if opts.get('cc'):
cl.cc = Add(cl.cc, SplitCommaSpace(opts.get('cc')))
if defaultcc and not cl.private:
cl.cc = Add(cl.cc, defaultcc)
if cl.name == "new":
if opts.get('message'):
cl.desc = opts.get('message')
else:
err = EditCL(ui, repo, cl)
if err != '':
return None, err
return cl, ""
#######################################################################
# Change list file management
# Return list of changed files in repository that match pats.
# The patterns came from the command line, so we warn
# if they have no effect or cannot be understood.
def ChangedFiles(ui, repo, pats, taken=None):
taken = taken or {}
# Run each pattern separately so that we can warn about
# patterns that didn't do anything useful.
for p in pats:
for f in hg_matchPattern(ui, repo, p, unknown=True):
promptadd(ui, repo, f)
for f in hg_matchPattern(ui, repo, p, removed=True):
promptremove(ui, repo, f)
files = hg_matchPattern(ui, repo, p, modified=True, added=True, removed=True)
for f in files:
if f in taken:
ui.warn("warning: %s already in CL %s\n" % (f, taken[f].name))
if not files:
ui.warn("warning: %s did not match any modified files\n" % (p,))
# Again, all at once (eliminates duplicates)
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True, removed=True)
l.sort()
if taken:
l = Sub(l, taken.keys())
return l
# Return list of changed files in repository that match pats and still exist.
def ChangedExistingFiles(ui, repo, pats, opts):
l = hg_matchPattern(ui, repo, *pats, modified=True, added=True)
l.sort()
return l
# Return list of files claimed by existing CLs
def Taken(ui, repo):
all = LoadAllCL(ui, repo, web=False)
taken = {}
for _, cl in all.items():
for f in cl.files:
taken[f] = cl
return taken
# Return list of changed files that are not claimed by other CLs
def DefaultFiles(ui, repo, pats):
return ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
#######################################################################
# File format checking.
def CheckFormat(ui, repo, files, just_warn=False):
set_status("running gofmt")
CheckGofmt(ui, repo, files, just_warn)
CheckTabfmt(ui, repo, files, just_warn)
# Check that gofmt run on the list of files does not change them
def CheckGofmt(ui, repo, files, just_warn):
files = gofmt_required(files)
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
if not files:
return
try:
cmd = subprocess.Popen(["gofmt", "-l"] + files, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=sys.platform != "win32")
cmd.stdin.close()
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
data = cmd.stdout.read()
errors = cmd.stderr.read()
cmd.wait()
set_status("done with gofmt")
if len(errors) > 0:
ui.warn("gofmt errors:\n" + errors.rstrip() + "\n")
return
if len(data) > 0:
msg = "gofmt needs to format these files (run hg gofmt):\n" + Indent(data, "\t").rstrip()
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
# Check that *.[chys] files indent using tabs.
def CheckTabfmt(ui, repo, files, just_warn):
files = [f for f in files if f.startswith('src/') and re.search(r"\.[chys]$", f) and not re.search(r"\.tab\.[ch]$", f)]
if not files:
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
files = [f for f in files if os.access(f, 0)]
badfiles = []
for f in files:
try:
for line in open(f, 'r'):
# Four leading spaces is enough to complain about,
# except that some Plan 9 code uses four spaces as the label indent,
# so allow that.
if line.startswith(' ') and not re.match(' [A-Za-z0-9_]+:', line):
badfiles.append(f)
break
except:
# ignore cannot open file, etc.
pass
if len(badfiles) > 0:
msg = "these files use spaces for indentation (use tabs instead):\n\t" + "\n\t".join(badfiles)
if just_warn:
ui.warn("warning: " + msg + "\n")
else:
raise hg_util.Abort(msg)
return
#######################################################################
# CONTRIBUTORS file parsing
contributorsCache = None
contributorsURL = None
def ReadContributors(ui, repo):
global contributorsCache
if contributorsCache is not None:
return contributorsCache
try:
if contributorsURL is not None:
opening = contributorsURL
f = urllib2.urlopen(contributorsURL)
else:
opening = repo.root + '/CONTRIBUTORS'
f = open(repo.root + '/CONTRIBUTORS', 'r')
except:
ui.write("warning: cannot open %s: %s\n" % (opening, ExceptionDetail()))
return {}
contributors = {}
for line in f:
# CONTRIBUTORS is a list of lines like:
# Person <email>
# Person <email> <alt-email>
# The first email address is the one used in commit logs.
if line.startswith('#'):
continue
m = re.match(r"([^<>]+\S)\s+(<[^<>\s]+>)((\s+<[^<>\s]+>)*)\s*$", line)
if m:
name = m.group(1)
email = m.group(2)[1:-1]
contributors[email.lower()] = (name, email)
for extra in m.group(3).split():
contributors[extra[1:-1].lower()] = (name, email)
contributorsCache = contributors
return contributors
def CheckContributor(ui, repo, user=None):
set_status("checking CONTRIBUTORS file")
user, userline = FindContributor(ui, repo, user, warn=False)
if not userline:
raise hg_util.Abort("cannot find %s in CONTRIBUTORS" % (user,))
return userline
def FindContributor(ui, repo, user=None, warn=True):
if not user:
user = ui.config("ui", "username")
if not user:
raise hg_util.Abort("[ui] username is not configured in .hgrc")
user = user.lower()
m = re.match(r".*<(.*)>", user)
if m:
user = m.group(1)
contributors = ReadContributors(ui, repo)
if user not in contributors:
if warn:
ui.warn("warning: cannot find %s in CONTRIBUTORS\n" % (user,))
return user, None
user, email = contributors[user]
return email, "%s <%s>" % (user, email)
#######################################################################
# Mercurial helper functions.
# Read http://mercurial.selenic.com/wiki/MercurialApi before writing any of these.
# We use the ui.pushbuffer/ui.popbuffer + hg_commands.xxx tricks for all interaction
# with Mercurial. It has proved the most stable as they make changes.
hgversion = hg_util.version()
# We require Mercurial 1.9 and suggest Mercurial 2.1.
# The details of the scmutil package changed then,
# so allowing earlier versions would require extra band-aids below.
# Ubuntu 11.10 ships with Mercurial 1.9.1 as the default version.
hg_required = "1.9"
hg_suggested = "2.1"
old_message = """
The code review extension requires Mercurial """+hg_required+""" or newer.
You are using Mercurial """+hgversion+""".
To install a new Mercurial, visit http://mercurial.selenic.com/downloads/.
"""
linux_message = """
You may need to clear your current Mercurial installation by running:
sudo apt-get remove mercurial mercurial-common
sudo rm -rf /etc/mercurial
"""
if hgversion < hg_required:
msg = old_message
if os.access("/etc/mercurial", 0):
msg += linux_message
raise hg_util.Abort(msg)
from mercurial.hg import clean as hg_clean
from mercurial import cmdutil as hg_cmdutil
from mercurial import error as hg_error
from mercurial import match as hg_match
from mercurial import node as hg_node
class uiwrap(object):
def __init__(self, ui):
self.ui = ui
ui.pushbuffer()
self.oldQuiet = ui.quiet
ui.quiet = True
self.oldVerbose = ui.verbose
ui.verbose = False
def output(self):
ui = self.ui
ui.quiet = self.oldQuiet
ui.verbose = self.oldVerbose
return ui.popbuffer()
def to_slash(path):
if sys.platform == "win32":
return path.replace('\\', '/')
return path
def hg_matchPattern(ui, repo, *pats, **opts):
w = uiwrap(ui)
hg_commands.status(ui, repo, *pats, **opts)
text = w.output()
ret = []
prefix = to_slash(os.path.realpath(repo.root))+'/'
for line in text.split('\n'):
f = line.split()
if len(f) > 1:
if len(pats) > 0:
# Given patterns, Mercurial shows relative to cwd
p = to_slash(os.path.realpath(f[1]))
if not p.startswith(prefix):
print >>sys.stderr, "File %s not in repo root %s.\n" % (p, prefix)
else:
ret.append(p[len(prefix):])
else:
# Without patterns, Mercurial shows relative to root (what we want)
ret.append(to_slash(f[1]))
return ret
def hg_heads(ui, repo):
w = uiwrap(ui)
hg_commands.heads(ui, repo)
return w.output()
noise = [
"",
"resolving manifests",
"searching for changes",
"couldn't find merge tool hgmerge",
"adding changesets",
"adding manifests",
"adding file changes",
"all local heads known remotely",
]
def isNoise(line):
line = str(line)
for x in noise:
if line == x:
return True
return False
def hg_incoming(ui, repo):
w = uiwrap(ui)
ret = hg_commands.incoming(ui, repo, force=False, bundle="")
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_log(ui, repo, **opts):
for k in ['date', 'keyword', 'rev', 'user']:
if not opts.has_key(k):
opts[k] = ""
w = uiwrap(ui)
ret = hg_commands.log(ui, repo, **opts)
if ret:
raise hg_util.Abort(ret)
return w.output()
def hg_outgoing(ui, repo, **opts):
w = uiwrap(ui)
ret = hg_commands.outgoing(ui, repo, **opts)
if ret and ret != 1:
raise hg_util.Abort(ret)
return w.output()
def hg_pull(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.pull(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_update(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True # for file list
err = hg_commands.update(ui, repo, **opts)
for line in w.output().split('\n'):
if isNoise(line):
continue
if line.startswith('moving '):
line = 'mv ' + line[len('moving '):]
if line.startswith('getting ') and line.find(' to ') >= 0:
line = 'mv ' + line[len('getting '):]
if line.startswith('getting '):
line = '+ ' + line[len('getting '):]
if line.startswith('removing '):
line = '- ' + line[len('removing '):]
ui.write(line + '\n')
return err
def hg_push(ui, repo, **opts):
w = uiwrap(ui)
ui.quiet = False
ui.verbose = True
err = hg_commands.push(ui, repo, **opts)
for line in w.output().split('\n'):
if not isNoise(line):
ui.write(line + '\n')
return err
def hg_commit(ui, repo, *pats, **opts):
return hg_commands.commit(ui, repo, *pats, **opts)
#######################################################################
# Mercurial precommit hook to disable commit except through this interface.
commit_okay = False
def precommithook(ui, repo, **opts):
if hgversion >= "2.1":
from mercurial import phases
if repo.ui.config('phases', 'new-commit') >= phases.secret:
return False
if commit_okay:
return False # False means okay.
ui.write("\ncodereview extension enabled; use mail, upload, or submit instead of commit\n\n")
return True
#######################################################################
# @clnumber file pattern support
# We replace scmutil.match with the MatchAt wrapper to add the @clnumber pattern.
match_repo = None
match_ui = None
match_orig = None
def InstallMatch(ui, repo):
global match_repo
global match_ui
global match_orig
match_ui = ui
match_repo = repo
from mercurial import scmutil
match_orig = scmutil.match
scmutil.match = MatchAt
def MatchAt(ctx, pats=None, opts=None, globbed=False, default='relpath'):
taken = []
files = []
pats = pats or []
opts = opts or {}
for p in pats:
if p.startswith('@'):
taken.append(p)
clname = p[1:]
if clname == "default":
files = DefaultFiles(match_ui, match_repo, [])
else:
if not GoodCLName(clname):
raise hg_util.Abort("invalid CL name " + clname)
cl, err = LoadCL(match_repo.ui, match_repo, clname, web=False)
if err != '':
raise hg_util.Abort("loading CL " + clname + ": " + err)
if not cl.files:
raise hg_util.Abort("no files in CL " + clname)
files = Add(files, cl.files)
pats = Sub(pats, taken) + ['path:'+f for f in files]
# work-around for http://selenic.com/hg/rev/785bbc8634f8
if not hasattr(ctx, 'match'):
ctx = ctx[None]
return match_orig(ctx, pats=pats, opts=opts, globbed=globbed, default=default)
#######################################################################
# Commands added by code review extension.
def hgcommand(f):
return f
#######################################################################
# hg change
@hgcommand
def change(ui, repo, *pats, **opts):
"""create, edit or delete a change list
Create, edit or delete a change list.
A change list is a group of files to be reviewed and submitted together,
plus a textual description of the change.
Change lists are referred to by simple alphanumeric names.
Changes must be reviewed before they can be submitted.
In the absence of options, the change command opens the
change list for editing in the default editor.
Deleting a change with the -d or -D flag does not affect
the contents of the files listed in that change. To revert
the files listed in a change, use
hg revert @123456
before running hg change -d 123456.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
dirty = {}
if len(pats) > 0 and GoodCLName(pats[0]):
name = pats[0]
if len(pats) != 1:
raise hg_util.Abort("cannot specify CL name and file patterns")
pats = pats[1:]
cl, err = LoadCL(ui, repo, name, web=True)
if err != '':
raise hg_util.Abort(err)
if not cl.local and (opts["stdin"] or not opts["stdout"]):
raise hg_util.Abort("cannot change non-local CL " + name)
else:
name = "new"
cl = CL("new")
if not workbranch(repo[None].branch()):
raise hg_util.Abort("cannot create CL outside default branch; switch with 'hg update default'")
dirty[cl] = True
files = ChangedFiles(ui, repo, pats, taken=Taken(ui, repo))
if opts["delete"] or opts["deletelocal"]:
if opts["delete"] and opts["deletelocal"]:
raise hg_util.Abort("cannot use -d and -D together")
flag = "-d"
if opts["deletelocal"]:
flag = "-D"
if name == "new":
raise hg_util.Abort("cannot use "+flag+" with file patterns")
if opts["stdin"] or opts["stdout"]:
raise hg_util.Abort("cannot use "+flag+" with -i or -o")
if not cl.local:
raise hg_util.Abort("cannot change non-local CL " + name)
if opts["delete"]:
if cl.copied_from:
raise hg_util.Abort("original author must delete CL; hg change -D will remove locally")
PostMessage(ui, cl.name, "*** Abandoned ***", send_mail=cl.mailed)
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
return
if opts["stdin"]:
s = sys.stdin.read()
clx, line, err = ParseCL(s, name)
if err != '':
raise hg_util.Abort("error parsing change list: line %d: %s" % (line, err))
if clx.desc is not None:
cl.desc = clx.desc;
dirty[cl] = True
if clx.reviewer is not None:
cl.reviewer = clx.reviewer
dirty[cl] = True
if clx.cc is not None:
cl.cc = clx.cc
dirty[cl] = True
if clx.files is not None:
cl.files = clx.files
dirty[cl] = True
if clx.private != cl.private:
cl.private = clx.private
dirty[cl] = True
if not opts["stdin"] and not opts["stdout"]:
if name == "new":
cl.files = files
err = EditCL(ui, repo, cl)
if err != "":
raise hg_util.Abort(err)
dirty[cl] = True
for d, _ in dirty.items():
name = d.name
d.Flush(ui, repo)
if name == "new":
d.Upload(ui, repo, quiet=True)
if opts["stdout"]:
ui.write(cl.EditorText())
elif opts["pending"]:
ui.write(cl.PendingText())
elif name == "new":
if ui.quiet:
ui.write(cl.name)
else:
ui.write("CL created: " + cl.url + "\n")
return
#######################################################################
# hg code-login (broken?)
@hgcommand
def code_login(ui, repo, **opts):
"""log in to code review server
Logs in to the code review server, saving a cookie in
a file in your home directory.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
MySend(None)
#######################################################################
# hg clpatch / undo / release-apply / download
# All concerned with applying or unapplying patches to the repository.
@hgcommand
def clpatch(ui, repo, clname, **opts):
"""import a patch from the code review server
Imports a patch from the code review server into the local client.
If the local client has already modified any of the files that the
patch modifies, this command will refuse to apply the patch.
Submitting an imported patch will keep the original author's
name as the Author: line but add your own name to a Committer: line.
"""
if not workbranch(repo[None].branch()):
raise hg_util.Abort("cannot run hg clpatch outside default branch")
err = clpatch_or_undo(ui, repo, clname, opts, mode="clpatch")
if err:
raise hg_util.Abort(err)
@hgcommand
def undo(ui, repo, clname, **opts):
"""undo the effect of a CL
Creates a new CL that undoes an earlier CL.
After creating the CL, opens the CL text for editing so that
you can add the reason for the undo to the description.
"""
if not workbranch(repo[None].branch()):
raise hg_util.Abort("cannot run hg undo outside default branch")
err = clpatch_or_undo(ui, repo, clname, opts, mode="undo")
if err:
raise hg_util.Abort(err)
@hgcommand
def release_apply(ui, repo, clname, **opts):
"""apply a CL to the release branch
Creates a new CL copying a previously committed change
from the main branch to the release branch.
The current client must either be clean or already be in
the release branch.
The release branch must be created by starting with a
clean client, disabling the code review plugin, and running:
hg update weekly.YYYY-MM-DD
hg branch release-branch.rNN
hg commit -m 'create release-branch.rNN'
hg push --new-branch
Then re-enable the code review plugin.
People can test the release branch by running
hg update release-branch.rNN
in a clean client. To return to the normal tree,
hg update default
Move changes since the weekly into the release branch
using hg release-apply followed by the usual code review
process and hg submit.
When it comes time to tag the release, record the
final long-form tag of the release-branch.rNN
in the *default* branch's .hgtags file. That is, run
hg update default
and then edit .hgtags as you would for a weekly.
"""
c = repo[None]
if not releaseBranch:
raise hg_util.Abort("no active release branches")
if c.branch() != releaseBranch:
if c.modified() or c.added() or c.removed():
raise hg_util.Abort("uncommitted local changes - cannot switch branches")
err = hg_clean(repo, releaseBranch)
if err:
raise hg_util.Abort(err)
try:
err = clpatch_or_undo(ui, repo, clname, opts, mode="backport")
if err:
raise hg_util.Abort(err)
except Exception, e:
hg_clean(repo, "default")
raise e
def rev2clname(rev):
# Extract CL name from revision description.
# The last line in the description that is a codereview URL is the real one.
# Earlier lines might be part of the user-written description.
all = re.findall('(?m)^https?://codereview.appspot.com/([0-9]+)$', rev.description())
if len(all) > 0:
return all[-1]
return ""
undoHeader = """undo CL %s / %s
<enter reason for undo>
««« original CL description
"""
undoFooter = """
»»»
"""
backportHeader = """[%s] %s
««« CL %s / %s
"""
backportFooter = """
»»»
"""
# Implementation of clpatch/undo.
def clpatch_or_undo(ui, repo, clname, opts, mode):
if codereview_disabled:
return codereview_disabled
if mode == "undo" or mode == "backport":
# Find revision in Mercurial repository.
# Assume CL number is 7+ decimal digits.
# Otherwise is either change log sequence number (fewer decimal digits),
# hexadecimal hash, or tag name.
# Mercurial will fall over long before the change log
# sequence numbers get to be 7 digits long.
if re.match('^[0-9]{7,}$', clname):
found = False
for r in hg_log(ui, repo, keyword="codereview.appspot.com/"+clname, limit=100, template="{node}\n").split():
rev = repo[r]
# Last line with a code review URL is the actual review URL.
# Earlier ones might be part of the CL description.
n = rev2clname(rev)
if n == clname:
found = True
break
if not found:
return "cannot find CL %s in local repository" % clname
else:
rev = repo[clname]
if not rev:
return "unknown revision %s" % clname
clname = rev2clname(rev)
if clname == "":
return "cannot find CL name in revision description"
# Create fresh CL and start with patch that would reverse the change.
vers = hg_node.short(rev.node())
cl = CL("new")
desc = str(rev.description())
if mode == "undo":
cl.desc = (undoHeader % (clname, vers)) + desc + undoFooter
else:
cl.desc = (backportHeader % (releaseBranch, line1(desc), clname, vers)) + desc + undoFooter
v1 = vers
v0 = hg_node.short(rev.parents()[0].node())
if mode == "undo":
arg = v1 + ":" + v0
else:
vers = v0
arg = v0 + ":" + v1
patch = RunShell(["hg", "diff", "--git", "-r", arg])
else: # clpatch
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
if patch == emptydiff:
return "codereview issue %s has no diff" % clname
# find current hg version (hg identify)
ctx = repo[None]
parents = ctx.parents()
id = '+'.join([hg_node.short(p.node()) for p in parents])
# if version does not match the patch version,
# try to update the patch line numbers.
if vers != "" and id != vers:
# "vers in repo" gives the wrong answer
# on some versions of Mercurial. Instead, do the actual
# lookup and catch the exception.
try:
repo[vers].description()
except:
return "local repository is out of date; sync to get %s" % (vers)
patch1, err = portPatch(repo, patch, vers, id)
if err != "":
if not opts["ignore_hgapplydiff_failure"]:
return "codereview issue %s is out of date: %s (%s->%s)" % (clname, err, vers, id)
else:
patch = patch1
argv = ["hgapplydiff"]
if opts["no_incoming"] or mode == "backport":
argv += ["--checksync=false"]
try:
cmd = subprocess.Popen(argv, shell=False, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, close_fds=sys.platform != "win32")
except:
return "hgapplydiff: " + ExceptionDetail() + "\nInstall hgapplydiff with:\n$ go get code.google.com/p/go.codereview/cmd/hgapplydiff\n"
out, err = cmd.communicate(patch)
if cmd.returncode != 0 and not opts["ignore_hgapplydiff_failure"]:
return "hgapplydiff failed"
cl.local = True
cl.files = out.strip().split()
if not cl.files and not opts["ignore_hgapplydiff_failure"]:
return "codereview issue %s has no changed files" % clname
files = ChangedFiles(ui, repo, [])
extra = Sub(cl.files, files)
if extra:
ui.warn("warning: these files were listed in the patch but not changed:\n\t" + "\n\t".join(extra) + "\n")
cl.Flush(ui, repo)
if mode == "undo":
err = EditCL(ui, repo, cl)
if err != "":
return "CL created, but error editing: " + err
cl.Flush(ui, repo)
else:
ui.write(cl.PendingText() + "\n")
# portPatch rewrites patch from being a patch against
# oldver to being a patch against newver.
def portPatch(repo, patch, oldver, newver):
lines = patch.splitlines(True) # True = keep \n
delta = None
for i in range(len(lines)):
line = lines[i]
if line.startswith('--- a/'):
file = line[6:-1]
delta = fileDeltas(repo, file, oldver, newver)
if not delta or not line.startswith('@@ '):
continue
# @@ -x,y +z,w @@ means the patch chunk replaces
# the original file's line numbers x up to x+y with the
# line numbers z up to z+w in the new file.
# Find the delta from x in the original to the same
# line in the current version and add that delta to both
# x and z.
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
return None, "error parsing patch line numbers"
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
d, err = lineDelta(delta, n1, len1)
if err != "":
return "", err
n1 += d
n2 += d
lines[i] = "@@ -%d,%d +%d,%d @@\n" % (n1, len1, n2, len2)
newpatch = ''.join(lines)
return newpatch, ""
# fileDelta returns the line number deltas for the given file's
# changes from oldver to newver.
# The deltas are a list of (n, len, newdelta) triples that say
# lines [n, n+len) were modified, and after that range the
# line numbers are +newdelta from what they were before.
def fileDeltas(repo, file, oldver, newver):
cmd = ["hg", "diff", "--git", "-r", oldver + ":" + newver, "path:" + file]
data = RunShell(cmd, silent_ok=True)
deltas = []
for line in data.splitlines():
m = re.match('@@ -([0-9]+),([0-9]+) \+([0-9]+),([0-9]+) @@', line)
if not m:
continue
n1, len1, n2, len2 = int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4))
deltas.append((n1, len1, n2+len2-(n1+len1)))
return deltas
# lineDelta finds the appropriate line number delta to apply to the lines [n, n+len).
# It returns an error if those lines were rewritten by the patch.
def lineDelta(deltas, n, len):
d = 0
for (old, oldlen, newdelta) in deltas:
if old >= n+len:
break
if old+len > n:
return 0, "patch and recent changes conflict"
d = newdelta
return d, ""
@hgcommand
def download(ui, repo, clname, **opts):
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff, downloaded from the code review server.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, vers, patch, err = DownloadCL(ui, repo, clname)
if err != "":
return err
ui.write(cl.EditorText() + "\n")
ui.write(patch + "\n")
return
#######################################################################
# hg file
@hgcommand
def file(ui, repo, clname, pat, *pats, **opts):
"""assign files to or remove files from a change list
Assign files to or (with -d) remove files from a change list.
The -d option only removes files from the change list.
It does not edit them or remove them from the repository.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
pats = tuple([pat] + list(pats))
if not GoodCLName(clname):
return "invalid CL name " + clname
dirty = {}
cl, err = LoadCL(ui, repo, clname, web=False)
if err != '':
return err
if not cl.local:
return "cannot change non-local CL " + clname
files = ChangedFiles(ui, repo, pats)
if opts["delete"]:
oldfiles = Intersect(files, cl.files)
if oldfiles:
if not ui.quiet:
ui.status("# Removing files from CL. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
for f in oldfiles:
ui.status("# hg file %s %s\n" % (cl.name, f))
cl.files = Sub(cl.files, oldfiles)
cl.Flush(ui, repo)
else:
ui.status("no such files in CL")
return
if not files:
return "no such modified files"
files = Sub(files, cl.files)
taken = Taken(ui, repo)
warned = False
for f in files:
if f in taken:
if not warned and not ui.quiet:
ui.status("# Taking files from other CLs. To undo:\n")
ui.status("# cd %s\n" % (repo.root))
warned = True
ocl = taken[f]
if not ui.quiet:
ui.status("# hg file %s %s\n" % (ocl.name, f))
if ocl not in dirty:
ocl.files = Sub(ocl.files, files)
dirty[ocl] = True
cl.files = Add(cl.files, files)
dirty[cl] = True
for d, _ in dirty.items():
d.Flush(ui, repo)
return
#######################################################################
# hg gofmt
@hgcommand
def gofmt(ui, repo, *pats, **opts):
"""apply gofmt to modified files
Applies gofmt to the modified files in the repository that match
the given patterns.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
files = ChangedExistingFiles(ui, repo, pats, opts)
files = gofmt_required(files)
if not files:
ui.status("no modified go files\n")
return
cwd = os.getcwd()
files = [RelativePath(repo.root + '/' + f, cwd) for f in files]
try:
cmd = ["gofmt", "-l"]
if not opts["list"]:
cmd += ["-w"]
if subprocess.call(cmd + files) != 0:
raise hg_util.Abort("gofmt did not exit cleanly")
except hg_error.Abort, e:
raise
except:
raise hg_util.Abort("gofmt: " + ExceptionDetail())
return
def gofmt_required(files):
return [f for f in files if (not f.startswith('test/') or f.startswith('test/bench/')) and f.endswith('.go')]
#######################################################################
# hg mail
@hgcommand
def mail(ui, repo, *pats, **opts):
"""mail a change for review
Uploads a patch to the code review server and then sends mail
to the reviewer and CC list asking for a review.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
cl, err = CommandLineCL(ui, repo, pats, opts, op="mail", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
cl.Upload(ui, repo, gofmt_just_warn=True)
if not cl.reviewer:
# If no reviewer is listed, assign the review to defaultcc.
# This makes sure that it appears in the
# codereview.appspot.com/user/defaultcc
# page, so that it doesn't get dropped on the floor.
if not defaultcc or cl.private:
raise hg_util.Abort("no reviewers listed in CL")
cl.cc = Sub(cl.cc, defaultcc)
cl.reviewer = defaultcc
cl.Flush(ui, repo)
if cl.files == []:
raise hg_util.Abort("no changed files, not sending mail")
cl.Mail(ui, repo)
#######################################################################
# hg p / hg pq / hg ps / hg pending
@hgcommand
def ps(ui, repo, *pats, **opts):
"""alias for hg p --short
"""
opts['short'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pq(ui, repo, *pats, **opts):
"""alias for hg p --quick
"""
opts['quick'] = True
return pending(ui, repo, *pats, **opts)
@hgcommand
def pending(ui, repo, *pats, **opts):
"""show pending changes
Lists pending changes followed by a list of unassigned but modified files.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
quick = opts.get('quick', False)
short = opts.get('short', False)
m = LoadAllCL(ui, repo, web=not quick and not short)
names = m.keys()
names.sort()
for name in names:
cl = m[name]
if short:
ui.write(name + "\t" + line1(cl.desc) + "\n")
else:
ui.write(cl.PendingText(quick=quick) + "\n")
if short:
return 0
files = DefaultFiles(ui, repo, [])
if len(files) > 0:
s = "Changed files not in any CL:\n"
for f in files:
s += "\t" + f + "\n"
ui.write(s)
#######################################################################
# hg submit
def need_sync():
raise hg_util.Abort("local repository out of date; must sync before submit")
def branch_prefix(ui, repo):
prefix = ""
branch = repo[None].branch()
if branch.startswith("dev."):
prefix = "[" + branch + "] "
return prefix
@hgcommand
def submit(ui, repo, *pats, **opts):
"""submit change to remote repository
Submits change to remote repository.
Bails out if the local repository is not in sync with the remote one.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
# We already called this on startup but sometimes Mercurial forgets.
set_mercurial_encoding_to_utf8()
if not opts["no_incoming"] and hg_incoming(ui, repo):
need_sync()
cl, err = CommandLineCL(ui, repo, pats, opts, op="submit", defaultcc=defaultcc)
if err != "":
raise hg_util.Abort(err)
user = None
if cl.copied_from:
user = cl.copied_from
userline = CheckContributor(ui, repo, user)
typecheck(userline, str)
about = ""
if not cl.lgtm and not opts.get('tbr') and needLGTM(cl):
raise hg_util.Abort("this CL has not been LGTM'ed")
if cl.lgtm:
about += "LGTM=" + JoinComma([CutDomain(who) for (who, line, approval) in cl.lgtm if approval]) + "\n"
reviewer = cl.reviewer
if opts.get('tbr'):
tbr = SplitCommaSpace(opts.get('tbr'))
for name in tbr:
if name.startswith('golang-'):
raise hg_util.Abort("--tbr requires a person, not a mailing list")
cl.reviewer = Add(cl.reviewer, tbr)
about += "TBR=" + JoinComma([CutDomain(s) for s in tbr]) + "\n"
if reviewer:
about += "R=" + JoinComma([CutDomain(s) for s in reviewer]) + "\n"
if cl.cc:
about += "CC=" + JoinComma([CutDomain(s) for s in cl.cc]) + "\n"
if not cl.reviewer and needLGTM(cl):
raise hg_util.Abort("no reviewers listed in CL")
if not cl.local:
raise hg_util.Abort("cannot submit non-local CL")
# upload, to sync current patch and also get change number if CL is new.
if not cl.copied_from:
cl.Upload(ui, repo, gofmt_just_warn=True)
# check gofmt for real; allowed upload to warn in order to save CL.
cl.Flush(ui, repo)
CheckFormat(ui, repo, cl.files)
about += "%s%s\n" % (server_url_base, cl.name)
if cl.copied_from:
about += "\nCommitter: " + CheckContributor(ui, repo, None) + "\n"
typecheck(about, str)
if not cl.mailed and not cl.copied_from: # in case this is TBR
cl.Mail(ui, repo)
# submit changes locally
message = branch_prefix(ui, repo) + cl.desc.rstrip() + "\n\n" + about
typecheck(message, str)
set_status("pushing " + cl.name + " to remote server")
if hg_outgoing(ui, repo):
raise hg_util.Abort("local repository corrupt or out-of-phase with remote: found outgoing changes")
old_heads = len(hg_heads(ui, repo).split())
# Normally we commit listing the specific files in the CL.
# If there are no changed files other than those in the CL, however,
# let hg build the list, because then committing a merge works.
# (You cannot name files for a merge commit, even if you name
# all the files that would be committed by not naming any.)
files = ['path:'+f for f in cl.files]
if ChangedFiles(ui, repo, []) == cl.files:
files = []
global commit_okay
commit_okay = True
ret = hg_commit(ui, repo, *files, message=message, user=userline)
commit_okay = False
if ret:
raise hg_util.Abort("nothing changed")
node = repo["-1"].node()
# push to remote; if it fails for any reason, roll back
try:
new_heads = len(hg_heads(ui, repo).split())
if old_heads != new_heads and not (old_heads == 0 and new_heads == 1):
# Created new head, so we weren't up to date.
need_sync()
# Push changes to remote. If it works, we're committed. If not, roll back.
try:
if hg_push(ui, repo):
raise hg_util.Abort("push error")
except hg_error.Abort, e:
if e.message.find("push creates new heads") >= 0:
# Remote repository had changes we missed.
need_sync()
raise
except urllib2.HTTPError, e:
print >>sys.stderr, "pushing to remote server failed; do you have commit permissions?"
raise
except:
real_rollback()
raise
# We're committed. Upload final patch, close review, add commit message.
changeURL = hg_node.short(node)
url = ui.expandpath("default")
m = re.match("(^https?://([^@/]+@)?([^.]+)\.googlecode\.com/hg/?)" + "|" +
"(^https?://([^@/]+@)?code\.google\.com/p/([^/.]+)(\.[^./]+)?/?)", url)
if m:
if m.group(1): # prj.googlecode.com/hg/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s" % (m.group(3), changeURL)
elif m.group(4) and m.group(7): # code.google.com/p/prj.subrepo/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s&repo=%s" % (m.group(6), changeURL, m.group(7)[1:])
elif m.group(4): # code.google.com/p/prj/ case
changeURL = "https://code.google.com/p/%s/source/detail?r=%s" % (m.group(6), changeURL)
else:
print >>sys.stderr, "URL: ", url
else:
print >>sys.stderr, "URL: ", url
pmsg = "*** Submitted as " + changeURL + " ***\n\n" + message
# When posting, move reviewers to CC line,
# so that the issue stops showing up in their "My Issues" page.
PostMessage(ui, cl.name, pmsg, reviewers="", cc=JoinComma(cl.reviewer+cl.cc))
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
c = repo[None]
if c.branch() == releaseBranch and not c.modified() and not c.added() and not c.removed():
ui.write("switching from %s to default branch.\n" % releaseBranch)
err = hg_clean(repo, "default")
if err:
return err
return 0
def needLGTM(cl):
rev = cl.reviewer
isGobot = 'gobot' in rev or 'gobot@swtch.com' in rev or 'gobot@golang.org' in rev
# A+C CLs generated by addca do not need LGTM
if cl.desc.startswith('A+C:') and 'Generated by a+c.' in cl.desc and isGobot:
return False
# CLs modifying only go1.x.txt do not need LGTM
if len(cl.files) == 1 and cl.files[0].startswith('doc/go1.') and cl.files[0].endswith('.txt'):
return False
# Other CLs need LGTM
# But not on gofrontend where there is only committed.
return False
#######################################################################
# hg sync
@hgcommand
def sync(ui, repo, **opts):
"""synchronize with remote repository
Incorporates recent changes from the remote repository
into the local repository.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
if not opts["local"]:
# If there are incoming CLs, pull -u will do the update.
# If there are no incoming CLs, do hg update to make sure
# that an update always happens regardless. This is less
# surprising than update depending on incoming CLs.
# It is important not to do both hg pull -u and hg update
# in the same command, because the hg update will end
# up marking resolve conflicts from the hg pull -u as resolved,
# causing files with <<< >>> markers to not show up in
# hg resolve -l. Yay Mercurial.
if hg_incoming(ui, repo):
err = hg_pull(ui, repo, update=True)
else:
err = hg_update(ui, repo)
if err:
return err
sync_changes(ui, repo)
def sync_changes(ui, repo):
# Look through recent change log descriptions to find
# potential references to http://.*/our-CL-number.
# Double-check them by looking at the Rietveld log.
for rev in hg_log(ui, repo, limit=100, template="{node}\n").split():
desc = repo[rev].description().strip()
for clname in re.findall('(?m)^https?://(?:[^\n]+)/([0-9]+)$', desc):
if IsLocalCL(ui, repo, clname) and IsRietveldSubmitted(ui, clname, repo[rev].hex()):
ui.warn("CL %s submitted as %s; closing\n" % (clname, repo[rev]))
cl, err = LoadCL(ui, repo, clname, web=False)
if err != "":
ui.warn("loading CL %s: %s\n" % (clname, err))
continue
if not cl.copied_from:
EditDesc(cl.name, closed=True, private=cl.private)
cl.Delete(ui, repo)
# Remove files that are not modified from the CLs in which they appear.
all = LoadAllCL(ui, repo, web=False)
changed = ChangedFiles(ui, repo, [])
for cl in all.values():
extra = Sub(cl.files, changed)
if extra:
ui.warn("Removing unmodified files from CL %s:\n" % (cl.name,))
for f in extra:
ui.warn("\t%s\n" % (f,))
cl.files = Sub(cl.files, extra)
cl.Flush(ui, repo)
if not cl.files:
if not cl.copied_from:
ui.warn("CL %s has no files; delete (abandon) with hg change -d %s\n" % (cl.name, cl.name))
else:
ui.warn("CL %s has no files; delete locally with hg change -D %s\n" % (cl.name, cl.name))
return 0
#######################################################################
# hg upload
@hgcommand
def upload(ui, repo, name, **opts):
"""upload diffs to the code review server
Uploads the current modifications for a given change to the server.
"""
if codereview_disabled:
raise hg_util.Abort(codereview_disabled)
repo.ui.quiet = True
cl, err = LoadCL(ui, repo, name, web=True)
if err != "":
raise hg_util.Abort(err)
if not cl.local:
raise hg_util.Abort("cannot upload non-local change")
cl.Upload(ui, repo)
print "%s%s\n" % (server_url_base, cl.name)
return 0
#######################################################################
# Table of commands, supplied to Mercurial for installation.
review_opts = [
('r', 'reviewer', '', 'add reviewer'),
('', 'cc', '', 'add cc'),
('', 'tbr', '', 'add future reviewer'),
('m', 'message', '', 'change description (for new change)'),
]
cmdtable = {
# The ^ means to show this command in the help text that
# is printed when running hg with no arguments.
"^change": (
change,
[
('d', 'delete', None, 'delete existing change list'),
('D', 'deletelocal', None, 'delete locally, but do not change CL on server'),
('i', 'stdin', None, 'read change list from standard input'),
('o', 'stdout', None, 'print change list to standard output'),
('p', 'pending', None, 'print pending summary to standard output'),
],
"[-d | -D] [-i] [-o] change# or FILE ..."
),
"^clpatch": (
clpatch,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# Would prefer to call this codereview-login, but then
# hg help codereview prints the help for this command
# instead of the help for the extension.
"code-login": (
code_login,
[],
"",
),
"^download": (
download,
[],
"change#"
),
"^file": (
file,
[
('d', 'delete', None, 'delete files from change list (but not repository)'),
],
"[-d] change# FILE ..."
),
"^gofmt": (
gofmt,
[
('l', 'list', None, 'list files that would change, but do not edit them'),
],
"FILE ..."
),
"^pending|p": (
pending,
[
('s', 'short', False, 'show short result form'),
('', 'quick', False, 'do not consult codereview server'),
],
"[FILE ...]"
),
"^ps": (
ps,
[],
"[FILE ...]"
),
"^pq": (
pq,
[],
"[FILE ...]"
),
"^mail": (
mail,
review_opts + [
] + hg_commands.walkopts,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^release-apply": (
release_apply,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
# TODO: release-start, release-tag, weekly-tag
"^submit": (
submit,
review_opts + [
('', 'no_incoming', None, 'disable initial incoming check (for testing)'),
] + hg_commands.walkopts + hg_commands.commitopts + hg_commands.commitopts2,
"[-r reviewer] [--cc cc] [change# | file ...]"
),
"^sync": (
sync,
[
('', 'local', None, 'do not pull changes from remote repository')
],
"[--local]",
),
"^undo": (
undo,
[
('', 'ignore_hgapplydiff_failure', None, 'create CL metadata even if hgapplydiff fails'),
('', 'no_incoming', None, 'disable check for incoming changes'),
],
"change#"
),
"^upload": (
upload,
[],
"change#"
),
}
#######################################################################
# Mercurial extension initialization
def norollback(*pats, **opts):
"""(disabled when using this extension)"""
raise hg_util.Abort("codereview extension enabled; use undo instead of rollback")
codereview_init = False
def uisetup(ui):
global testing
testing = ui.config("codereview", "testing")
# Disable the Mercurial commands that might change the repository.
# Only commands in this extension are supposed to do that.
ui.setconfig("hooks", "pre-commit.codereview", precommithook) # runs before 'hg commit'
ui.setconfig("hooks", "precommit.codereview", precommithook) # catches all cases
def reposetup(ui, repo):
global codereview_disabled
global defaultcc
# reposetup gets called both for the local repository
# and also for any repository we are pulling or pushing to.
# Only initialize the first time.
global codereview_init
if codereview_init:
return
codereview_init = True
start_status_thread()
# Read repository-specific options from support/codereview/codereview.cfg or codereview.cfg.
root = ''
try:
root = repo.root
except:
# Yes, repo might not have root; see issue 959.
codereview_disabled = 'codereview disabled: repository has no root'
return
repo_config_path = ''
p1 = root + '/support/codereview/codereview.cfg'
p2 = root + '/codereview.cfg'
if os.access(p1, os.F_OK):
repo_config_path = p1
else:
repo_config_path = p2
try:
f = open(repo_config_path)
for line in f:
if line.startswith('defaultcc:'):
defaultcc = SplitCommaSpace(line[len('defaultcc:'):])
if line.startswith('contributors:'):
global contributorsURL
contributorsURL = line[len('contributors:'):].strip()
except:
codereview_disabled = 'codereview disabled: cannot open ' + repo_config_path
return
remote = ui.config("paths", "default", "")
if remote.find("://") < 0 and not testing:
raise hg_util.Abort("codereview: default path '%s' is not a URL" % (remote,))
InstallMatch(ui, repo)
RietveldSetup(ui, repo)
# Rollback removes an existing commit. Don't do that either.
global real_rollback
real_rollback = repo.rollback
repo.rollback = norollback
#######################################################################
# Wrappers around upload.py for interacting with Rietveld
from HTMLParser import HTMLParser
# HTML form parser
class FormParser(HTMLParser):
def __init__(self):
self.map = {}
self.curtag = None
self.curdata = None
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag == "input":
key = None
value = ''
for a in attrs:
if a[0] == 'name':
key = a[1]
if a[0] == 'value':
value = a[1]
if key is not None:
self.map[key] = value
if tag == "textarea":
key = None
for a in attrs:
if a[0] == 'name':
key = a[1]
if key is not None:
self.curtag = key
self.curdata = ''
def handle_endtag(self, tag):
if tag == "textarea" and self.curtag is not None:
self.map[self.curtag] = self.curdata
self.curtag = None
self.curdata = None
def handle_charref(self, name):
self.handle_data(unichr(int(name)))
def handle_entityref(self, name):
import htmlentitydefs
if name in htmlentitydefs.entitydefs:
self.handle_data(htmlentitydefs.entitydefs[name])
else:
self.handle_data("&" + name + ";")
def handle_data(self, data):
if self.curdata is not None:
self.curdata += data
def JSONGet(ui, path):
try:
data = MySend(path, force_auth=False)
typecheck(data, str)
d = fix_json(json.loads(data))
except:
ui.warn("JSONGet %s: %s\n" % (path, ExceptionDetail()))
return None
return d
# Clean up json parser output to match our expectations:
# * all strings are UTF-8-encoded str, not unicode.
# * missing fields are missing, not None,
# so that d.get("foo", defaultvalue) works.
def fix_json(x):
if type(x) in [str, int, float, bool, type(None)]:
pass
elif type(x) is unicode:
x = x.encode("utf-8")
elif type(x) is list:
for i in range(len(x)):
x[i] = fix_json(x[i])
elif type(x) is dict:
todel = []
for k in x:
if x[k] is None:
todel.append(k)
else:
x[k] = fix_json(x[k])
for k in todel:
del x[k]
else:
raise hg_util.Abort("unknown type " + str(type(x)) + " in fix_json")
if type(x) is str:
x = x.replace('\r\n', '\n')
return x
def IsRietveldSubmitted(ui, clname, hex):
dict = JSONGet(ui, "/api/" + clname + "?messages=true")
if dict is None:
return False
for msg in dict.get("messages", []):
text = msg.get("text", "")
regex = '\*\*\* Submitted as [^*]*?r=([0-9a-f]+)[^ ]* \*\*\*'
if testing:
regex = '\*\*\* Submitted as ([0-9a-f]+) \*\*\*'
m = re.match(regex, text)
if m is not None and len(m.group(1)) >= 8 and hex.startswith(m.group(1)):
return True
return False
def IsRietveldMailed(cl):
for msg in cl.dict.get("messages", []):
if msg.get("text", "").find("I'd like you to review this change") >= 0:
return True
return False
def DownloadCL(ui, repo, clname):
set_status("downloading CL " + clname)
cl, err = LoadCL(ui, repo, clname, web=True)
if err != "":
return None, None, None, "error loading CL %s: %s" % (clname, err)
# Find most recent diff
diffs = cl.dict.get("patchsets", [])
if not diffs:
return None, None, None, "CL has no patch sets"
patchid = diffs[-1]
patchset = JSONGet(ui, "/api/" + clname + "/" + str(patchid))
if patchset is None:
return None, None, None, "error loading CL patchset %s/%d" % (clname, patchid)
if patchset.get("patchset", 0) != patchid:
return None, None, None, "malformed patchset information"
vers = ""
msg = patchset.get("message", "").split()
if len(msg) >= 3 and msg[0] == "diff" and msg[1] == "-r":
vers = msg[2]
diff = "/download/issue" + clname + "_" + str(patchid) + ".diff"
diffdata = MySend(diff, force_auth=False)
# Print warning if email is not in CONTRIBUTORS file.
email = cl.dict.get("owner_email", "")
if not email:
return None, None, None, "cannot find owner for %s" % (clname)
him = FindContributor(ui, repo, email)
me = FindContributor(ui, repo, None)
if him == me:
cl.mailed = IsRietveldMailed(cl)
else:
cl.copied_from = email
return cl, vers, diffdata, ""
def MySend(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Run MySend1 maybe twice, because Rietveld is unreliable."""
try:
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
except Exception, e:
if type(e) != urllib2.HTTPError or e.code != 500: # only retry on HTTP 500 error
raise
print >>sys.stderr, "Loading "+request_path+": "+ExceptionDetail()+"; trying again in 2 seconds."
time.sleep(2)
return MySend1(request_path, payload, content_type, timeout, force_auth, **kwargs)
# Like upload.py Send but only authenticates when the
# redirect is to www.google.com/accounts. This keeps
# unnecessary redirects from happening during testing.
def MySend1(request_path, payload=None,
content_type="application/octet-stream",
timeout=None, force_auth=True,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
global rpc
if rpc == None:
rpc = GetRpcServer(upload_options)
self = rpc
if not self.authenticated and force_auth:
self._Authenticate()
if request_path is None:
return
if timeout is None:
timeout = 30 # seconds
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if testing:
url = url.replace("https://", "http://")
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
# Translate \r\n into \n, because Rietveld doesn't.
response = response.replace('\r\n', '\n')
# who knows what urllib will give us
if type(response) == unicode:
response = response.encode("utf-8")
typecheck(response, str)
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401:
self._Authenticate()
elif e.code == 302:
loc = e.info()["location"]
if not loc.startswith('https://www.google.com/a') or loc.find('/ServiceLogin') < 0:
return ''
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
def GetForm(url):
f = FormParser()
f.feed(ustr(MySend(url))) # f.feed wants unicode
f.close()
# convert back to utf-8 to restore sanity
m = {}
for k,v in f.map.items():
m[k.encode("utf-8")] = v.replace("\r\n", "\n").encode("utf-8")
return m
def EditDesc(issue, subject=None, desc=None, reviewers=None, cc=None, closed=False, private=False):
set_status("uploading change to description")
form_fields = GetForm("/" + issue + "/edit")
if subject is not None:
form_fields['subject'] = subject
if desc is not None:
form_fields['description'] = desc
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if closed:
form_fields['closed'] = "checked"
if private:
form_fields['private'] = "checked"
ctype, body = EncodeMultipartFormData(form_fields.items(), [])
response = MySend("/" + issue + "/edit", body, content_type=ctype)
if response != "":
print >>sys.stderr, "Error editing description:\n" + "Sent form: \n", form_fields, "\n", response
sys.exit(2)
def PostMessage(ui, issue, message, reviewers=None, cc=None, send_mail=True, subject=None):
set_status("uploading message")
form_fields = GetForm("/" + issue + "/publish")
if reviewers is not None:
form_fields['reviewers'] = reviewers
if cc is not None:
form_fields['cc'] = cc
if send_mail:
form_fields['send_mail'] = "checked"
else:
del form_fields['send_mail']
if subject is not None:
form_fields['subject'] = subject
form_fields['message'] = message
form_fields['message_only'] = '1' # Don't include draft comments
if reviewers is not None or cc is not None:
form_fields['message_only'] = '' # Must set '' in order to override cc/reviewer
ctype = "applications/x-www-form-urlencoded"
body = urllib.urlencode(form_fields)
response = MySend("/" + issue + "/publish", body, content_type=ctype)
if response != "":
print response
sys.exit(2)
class opt(object):
pass
def RietveldSetup(ui, repo):
global force_google_account
global rpc
global server
global server_url_base
global upload_options
global verbosity
if not ui.verbose:
verbosity = 0
# Config options.
x = ui.config("codereview", "server")
if x is not None:
server = x
# TODO(rsc): Take from ui.username?
email = None
x = ui.config("codereview", "email")
if x is not None:
email = x
server_url_base = "https://" + server + "/"
if testing:
server_url_base = server_url_base.replace("https://", "http://")
force_google_account = ui.configbool("codereview", "force_google_account", False)
upload_options = opt()
upload_options.email = email
upload_options.host = None
upload_options.verbose = 0
upload_options.description = None
upload_options.description_file = None
upload_options.reviewers = None
upload_options.cc = None
upload_options.message = None
upload_options.issue = None
upload_options.download_base = False
upload_options.send_mail = False
upload_options.vcs = None
upload_options.server = server
upload_options.save_cookies = True
if testing:
upload_options.save_cookies = False
upload_options.email = "test@example.com"
rpc = None
global releaseBranch
tags = repo.branchmap().keys()
if 'release-branch.go10' in tags:
# NOTE(rsc): This tags.sort is going to get the wrong
# answer when comparing release-branch.go9 with
# release-branch.go10. It will be a while before we care.
raise hg_util.Abort('tags.sort needs to be fixed for release-branch.go10')
tags.sort()
for t in tags:
if t.startswith('release-branch.go'):
releaseBranch = t
def workbranch(name):
return name == "default" or name.startswith('dev.')
#######################################################################
# http://codereview.appspot.com/static/upload.py, heavily edited.
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import cookielib
import getpass
import logging
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = [
'application/javascript',
'application/x-javascript',
'application/x-freemind'
]
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
# .reason is now a read-only property based on .msg
# this means we ignore 'msg', but that seems to work fine.
self.msg = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
"""
self.host = host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = "GOOGLE"
if self.host.endswith(".google.com") and not force_google_account:
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=") for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
reqUrl = "https://%s/_ah/login?%s" % (self.host, urllib.urlencode(args))
if testing:
reqUrl = reqUrl.replace("https://", "http://")
req = self._CreateRequest(reqUrl)
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.msg == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.msg == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.")
break
if e.msg == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.msg == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.msg == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.msg == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.msg == "ServiceDisabled":
print >>sys.stderr, "The user's access to the service has been disabled."
break
if e.msg == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "https://%s%s" % (self.host, request_path)
if testing:
url = url.replace("https://", "http://")
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies_" + server)
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
def GetRpcServer(options):
"""Returns an instance of an AbstractRpcServer.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Disable status prints so they don't obscure the password prompt.
global global_status
st = global_status
global_status = None
email = options.email
if email is None:
email = GetEmail("Email (login for uploading to %s)" % options.server)
password = getpass.getpass("Password for %s: " % email)
# Put status back.
global_status = st
return (email, password)
# If this is the dev_appserver, use fake authentication.
host = (options.host or options.server).lower()
if host == "localhost" or host.startswith("localhost:"):
email = options.email
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
options.server,
lambda: (email, "password"),
host_override=options.host,
extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email},
save_cookies=options.save_cookies)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
return rpc_server_class(options.server, GetUserCredentials,
host_override=options.host, save_cookies=options.save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
typecheck(key, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
lines.append(value)
for (key, filename, value) in files:
typecheck(key, str)
typecheck(filename, str)
typecheck(value, str)
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True, env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines, env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = to_slash(filename.strip())
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
set_status("uploading " + filename)
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [
("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)])
response_body = rpc_server.Send(url, body, content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
# Don't want to spawn too many threads, nor do we want to
# hit Rietveld too hard, or it will start serving 500 errors.
# When 8 works, it's no better than 4, and sometimes 8 is
# too many for Rietveld to handle.
MAX_PARALLEL_UPLOADS = 4
sema = threading.BoundedSemaphore(MAX_PARALLEL_UPLOADS)
upload_threads = []
finished_upload_threads = []
class UploadFileThread(threading.Thread):
def __init__(self, args):
threading.Thread.__init__(self)
self.args = args
def run(self):
UploadFile(*self.args)
finished_upload_threads.append(self)
sema.release()
def StartUploadFile(*args):
sema.acquire()
while len(finished_upload_threads) > 0:
t = finished_upload_threads.pop()
upload_threads.remove(t)
t.join()
t = UploadFileThread(args)
upload_threads.append(t)
t.start()
def WaitForUploads():
for t in upload_threads:
t.join()
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
StartUploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
StartUploadFile(filename, file_id, new_content, is_binary, status, False)
WaitForUploads()
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class FakeMercurialUI(object):
def __init__(self):
self.quiet = True
self.output = ''
def write(self, *args, **opts):
self.output += ' '.join(args)
def copy(self):
return self
def status(self, *args, **opts):
pass
def formatter(self, topic, opts):
from mercurial.formatter import plainformatter
return plainformatter(self, topic, opts)
def readconfig(self, *args, **opts):
pass
def expandpath(self, *args, **opts):
return global_ui.expandpath(*args, **opts)
def configitems(self, *args, **opts):
return global_ui.configitems(*args, **opts)
def config(self, *args, **opts):
return global_ui.config(*args, **opts)
use_hg_shell = False # set to True to shell out to hg always; slower
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, ui, repo):
super(MercurialVCS, self).__init__(options)
self.ui = ui
self.repo = repo
self.status = None
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo.root)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
mqparent, err = RunShellWithReturnCode(['hg', 'log', '--rev', 'qparent', '--template={node}'])
if not err and mqparent != "":
self.base_rev = mqparent
else:
out = RunShell(["hg", "parents", "-q", "--template={node} {branch}"], silent_ok=True).strip()
if not out:
# No revisions; use 0 to mean a repository with nothing.
out = "0:0 default"
# Find parent along current branch.
branch = repo[None].branch()
base = ""
for line in out.splitlines():
fields = line.strip().split(' ')
if fields[1] == branch:
base = fields[0]
break
if base == "":
# Use the first parent
base = out.strip().split(' ')[0]
self.base_rev = base
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
# If no file specified, restrict to the current subdir
extra_args = extra_args or ["."]
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def get_hg_status(self, rev, path):
# We'd like to use 'hg status -C path', but that is buggy
# (see http://mercurial.selenic.com/bts/issue3023).
# Instead, run 'hg status -C' without a path
# and skim the output for the path we want.
if self.status is None:
if use_hg_shell:
out = RunShell(["hg", "status", "-C", "--rev", rev])
else:
fui = FakeMercurialUI()
ret = hg_commands.status(fui, self.repo, *[], **{'rev': [rev], 'copies': True})
if ret:
raise hg_util.Abort(ret)
out = fui.output
self.status = out.splitlines()
for i in range(len(self.status)):
# line is
# A path
# M path
# etc
line = to_slash(self.status[i])
if line[2:] == path:
if i+1 < len(self.status) and self.status[i+1][:2] == ' ':
return self.status[i:i+2]
return self.status[i:i+1]
raise hg_util.Abort("no status for " + path)
def GetBaseFile(self, filename):
set_status("inspecting " + filename)
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
out = self.get_hg_status(self.base_rev, relpath)
status, what = out[0].split(' ', 1)
if len(out) > 1 and status == "A" and what == relpath:
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
if use_hg_shell:
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath], silent_ok=True)
else:
base_content = str(self.repo[base_rev][oldrelpath].data())
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content and use_hg_shell:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = to_slash(temp_filename.strip())
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
set_status("uploading patch for " + patch[0])
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
| anlhord/gofrontend | support/codereview/codereview.py | Python | bsd-3-clause | 111,287 | [
"VisIt"
] | 7d02c127dfffdf428c0c722fae1bb1fa1448940f705850377997897d7253e2c0 |
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
# Import early because this import has monkey-patching side effects
from scancode.pool import get_pool
import codecs
from collections import OrderedDict
from functools import partial
import os
from os.path import expanduser
from os.path import abspath
import sys
from time import time
import traceback
from types import GeneratorType
import click
click.disable_unicode_literals_warning = True
from click.termui import style
from commoncode import filetype
from commoncode import fileutils
from commoncode import ignore
import plugincode.output
from scancode import __version__ as version
from scancode.api import get_copyrights
from scancode.api import get_emails
from scancode.api import get_file_infos
from scancode.api import _empty_file_infos
from scancode.api import get_licenses
from scancode.api import get_package_infos
from scancode.api import get_urls
from scancode.cache import ScanFileCache
from scancode.cache import get_scans_cache_class
from scancode.interrupt import DEFAULT_TIMEOUT
from scancode.interrupt import interruptible
from scancode.interrupt import TimeoutError
from scancode.utils import BaseCommand
from scancode.utils import compute_fn_max_len
from scancode.utils import fixed_width_file_name
from scancode.utils import get_relative_path
from scancode.utils import progressmanager
echo_stderr = partial(click.secho, err=True)
# Python 2 and 3 support
try:
# Python 2
unicode
str_orig = str
bytes = str
str = unicode
except NameError:
# Python 3
unicode = str
# this will init the plugins
plugincode.output.initialize()
info_text = '''
ScanCode scans code and other files for origin and license.
Visit https://github.com/nexB/scancode-toolkit/ for support and download.
'''
notice_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'NOTICE')
notice_text = open(notice_path).read()
delimiter = '\n\n\n'
[notice_text, extra_notice_text] = notice_text.split(delimiter, 1)
extra_notice_text = delimiter + extra_notice_text
delimiter = '\n\n '
[notice_text, acknowledgment_text] = notice_text.split(delimiter, 1)
acknowledgment_text = delimiter + acknowledgment_text
notice = acknowledgment_text.strip().replace(' ', '')
def print_about(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(info_text + notice_text + acknowledgment_text + extra_notice_text)
ctx.exit()
examples_text = '''
Scancode command lines examples:
(Note for Windows: use '\\' back slash instead of '/' forward slash for paths.)
Scan the 'samples' directory for licenses and copyrights. Save scan results to
an HTML app file for interactive scan results navigation. When the scan is done,
open 'scancode_result.html' in your web browser. Note that additional app files
are saved in a directory named 'scancode_result_files':
scancode --format html-app samples/ scancode_result.html
Scan a directory for licenses and copyrights. Save scan results to an
HTML file:
scancode --format html samples/zlib scancode_result.html
Scan a single file for copyrights. Print scan results to stdout as JSON:
scancode --copyright samples/zlib/zlib.h
Scan a single file for licenses, print verbose progress to stderr as each
file is scanned. Save scan to a JSON file:
scancode --license --verbose samples/zlib/zlib.h licenses.json
Scan a directory explicitly for licenses and copyrights. Redirect JSON scan
results to a file:
scancode -f json -l -c samples/zlib/ > scan.json
Scan a directory while ignoring a single file. Print scan results to stdout as JSON:
scancode --ignore README samples/
Scan a directory while ignoring all files with txt extension. Print scan results to
stdout as JSON (It is recommended to use quoted glob patterns to prevent pattern
expansion by the shell):
scancode --ignore "*.txt" samples/
Special characters supported in GLOB pattern:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any character not in seq
For a literal match, wrap the meta-characters in brackets. For example, '[?]' matches the character '?'.
For glob see https://en.wikipedia.org/wiki/Glob_(programming).
Note: Glob patterns cannot be applied to path as strings, for e.g.
scancode --ignore "samples*licenses" samples/
will not ignore "samples/JGroups/licenses".
Scan a directory while ignoring multiple files (or glob patterns). Print the scan
results to stdout as JSON:
scancode --ignore README --ignore "*.txt" samples/
To extract archives, see the 'extractcode' command instead.
'''
def print_examples(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo(examples_text)
ctx.exit()
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('ScanCode version ' + version)
ctx.exit()
def reindex_licenses(ctx, param, value):
if not value or ctx.resilient_parsing:
return
from licensedcode import cache
click.echo('Checking and rebuilding the license index...')
cache.reindex()
click.echo('Done.')
ctx.exit()
epilog_text = '''\b\bExamples (use --examples for more):
\b
Scan the 'samples' directory for licenses and copyrights.
Save scan results to a JSON file:
scancode --format json samples scancode_result.json
\b
Scan the 'samples' directory for licenses and copyrights. Save scan results to
an HTML app file for interactive web browser results navigation. Additional app
files are saved to the 'myscan_files' directory:
scancode --format html-app samples myscan.html
Note: when you run scancode, a progress bar is displayed with a counter of the
number of files processed. Use --verbose to display file-by-file progress.
'''
class ScanCommand(BaseCommand):
short_usage_help = '''
Try 'scancode --help' for help on options and arguments.'''
def get_params(self, ctx):
"""
Add options returned by plugins to the params list
"""
return super(BaseCommand, self).get_params(ctx)
def validate_formats(ctx, param, value):
"""
Validate formats and template files. Raise a BadParameter on errors.
"""
value_lower = value.lower()
if value_lower in plugincode.output.get_format_plugins():
return value_lower
# render using a user-provided custom format template
if not os.path.isfile(value):
raise click.BadParameter('Unknwow <format> or invalid template file path: "%(value)s" does not exist or is not readable.' % locals())
return value
def validate_exclusive(ctx, exclusive_options):
"""
Validate mutually exclusive options.
Raise a UsageError with on errors.
"""
ctx_params = ctx.params
selected_options = [ctx_params[eop] for eop in exclusive_options if ctx_params[eop]]
if len(selected_options) > 1:
msg = ' and '.join('`' + eo.replace('_', '-') + '`' for eo in exclusive_options)
msg += ' are mutually exclusion options. You can use only one of them.'
raise click.UsageError(msg)
@click.command(name='scancode', epilog=epilog_text, cls=ScanCommand)
@click.pass_context
# ensure that the input path is always Unicode
@click.argument('input', metavar='<input>', type=click.Path(exists=True, readable=True, path_type=str))
@click.argument('output_file', default='-', metavar='<output_file>', type=click.File(mode='wb', lazy=False))
# Note that click's 'default' option is set to 'false' here despite these being documented to be enabled by default in
# order to more elegantly enable all of these (see code below) if *none* of the command line options are specified.
@click.option('-c', '--copyright', is_flag=True, default=False, help='Scan <input> for copyrights. [default]')
@click.option('-l', '--license', is_flag=True, default=False, help='Scan <input> for licenses. [default]')
@click.option('-p', '--package', is_flag=True, default=False, help='Scan <input> for packages. [default]')
@click.option('-e', '--email', is_flag=True, default=False, help='Scan <input> for emails.')
@click.option('-u', '--url', is_flag=True, default=False, help='Scan <input> for urls.')
@click.option('-i', '--info', is_flag=True, default=False, help='Include information such as size, type, etc.')
@click.option('--license-score', is_flag=False, default=0, type=int, show_default=True,
help='Do not return license matches with scores lower than this score. A number between 0 and 100.')
@click.option('--license-text', is_flag=True, default=False,
help='Include the detected licenses matched text. Has no effect unless --license is requested.')
@click.option('--only-findings', is_flag=True, default=False,
help='Only return files or directories with findings for the requested scans. Files without findings are omitted.')
@click.option('--strip-root', is_flag=True, default=False,
help='Strip the root directory segment of all paths. The default is to always '
'include the last directory segment of the scanned path such that all paths have a common root directory. '
'This cannot be combined with `--full-root` option.')
@click.option('--full-root', is_flag=True, default=False,
help='Report full, absolute paths. The default is to always '
'include the last directory segment of the scanned path such that all paths have a common root directory. '
'This cannot be combined with the `--strip-root` option.')
@click.option('-f', '--format', is_flag=False, default='json', show_default=True, metavar='<format>',
help=('Set <output_file> format to one of: %s or use <format> '
'as the path to a custom template file' % ', '.join(plugincode.output.get_format_plugins())),
callback=validate_formats)
@click.option('--ignore', default=None, multiple=True, metavar='<pattern>',
help=('Ignore files matching <pattern>.'))
@click.option('--verbose', is_flag=True, default=False, help='Print verbose file-by-file progress messages.')
@click.option('--quiet', is_flag=True, default=False, help='Do not print summary or progress messages.')
@click.option('-n', '--processes', is_flag=False, default=1, type=int, show_default=True, help='Scan <input> using n parallel processes.')
@click.help_option('-h', '--help')
@click.option('--examples', is_flag=True, is_eager=True, callback=print_examples, help=('Show command examples and exit.'))
@click.option('--about', is_flag=True, is_eager=True, callback=print_about, help='Show information about ScanCode and licensing and exit.')
@click.option('--version', is_flag=True, is_eager=True, callback=print_version, help='Show the version and exit.')
@click.option('--diag', is_flag=True, default=False, help='Include additional diagnostic information such as error messages or result details.')
@click.option('--timeout', is_flag=False, default=DEFAULT_TIMEOUT, type=float, show_default=True, help='Stop scanning a file if scanning takes longer than a timeout in seconds.')
@click.option('--reindex-licenses', is_flag=True, default=False, is_eager=True, callback=reindex_licenses, help='Force a check and possible reindexing of the cached license index.')
def scancode(ctx,
input, output_file,
copyright, license, package,
email, url, info,
license_score, license_text, only_findings, strip_root, full_root,
format, ignore, verbose, quiet, processes,
diag, timeout, *args, **kwargs):
"""scan the <input> file or directory for origin clues and license and save results to the <output_file>.
The scan results are printed to stdout if <output_file> is not provided.
Error and progress is printed to stderr.
"""
validate_exclusive(ctx, ['strip_root', 'full_root'])
possible_scans = OrderedDict([
('infos', info),
('licenses', license),
('copyrights', copyright),
('packages', package),
('emails', email),
('urls', url)
])
options = OrderedDict([
('--copyright', copyright),
('--license', license),
('--package', package),
('--email', email),
('--url', url),
('--info', info),
('--license-score', license_score),
('--license-text', license_text),
('--only-findings', only_findings),
('--strip-root', strip_root),
('--full-root', full_root),
('--ignore', ignore),
('--format', format),
('--diag', diag),
])
# Use default scan options when no options are provided on the command line.
if not any(possible_scans.values()):
possible_scans['copyrights'] = True
possible_scans['licenses'] = True
possible_scans['packages'] = True
options['--copyright'] = True
options['--license'] = True
options['--package'] = True
# A hack to force info being exposed for SPDX output in order to reuse calculated file SHA1s.
if format in ('spdx-tv', 'spdx-rdf'):
possible_scans['infos'] = True
# FIXME: pombredanne: what is this? I cannot understand what this does
for key in options:
if key == "--license-score":
continue
if options[key] == False:
del options[key]
get_licenses_with_score = partial(get_licenses, min_score=license_score, include_text=license_text, diag=diag)
# List of scan functions in the same order as "possible_scans".
scan_functions = [
None, # For "infos" there is no separate scan function, they are always gathered, though not always exposed.
get_licenses_with_score,
get_copyrights,
get_package_infos,
get_emails,
get_urls
]
# FIXME: this is does not make sense to use tuple and positional values
scanners = OrderedDict(zip(possible_scans.keys(), zip(possible_scans.values(), scan_functions)))
scans_cache_class = get_scans_cache_class()
user_ignore = {patt: 'User ignore: Supplied by --ignore' for patt in ignore}
try:
files_count, results, success = scan(
input_path=input,
scanners=scanners,
verbose=verbose,
quiet=quiet,
processes=processes,
timeout=timeout,
diag=diag,
scans_cache_class=scans_cache_class,
strip_root=strip_root,
full_root=full_root,
ignore=user_ignore)
if not quiet:
echo_stderr('Saving results.', fg='green')
# FIXME: we should have simpler args: a scan "header" and scan results
save_results(scanners, only_findings, files_count, results, format, options, input, output_file)
finally:
# cleanup
cache = scans_cache_class()
cache.clear()
rc = 0 if success else 1
ctx.exit(rc)
def scan(input_path,
scanners,
verbose=False, quiet=False,
processes=1, timeout=DEFAULT_TIMEOUT,
diag=False,
scans_cache_class=None,
strip_root=False,
full_root=False,
ignore=None):
"""
Return a tuple of (files_count, scan_results, success) where
scan_results is an iterable and success is a boolean.
Run each requested scan proper: each individual file scan is cached
on disk to free memory. Then the whole set of scans is loaded from
the cache and streamed at the end.
"""
assert scans_cache_class
scan_summary = OrderedDict()
scan_summary['scanned_path'] = input_path
scan_summary['processes'] = processes
# Display scan start details
############################
# FIXME: it does not make sense to use tuple and positional values
scans = [k for k, v in scanners.items() if v[0]]
_scans = ', '.join(scans)
if not quiet:
echo_stderr('Scanning files for: %(_scans)s with %(processes)d process(es)...' % locals())
scan_summary['scans'] = scans[:]
scan_start = time()
indexing_time = 0
# FIXME: It does not make sense to use tuple and positional values
with_licenses, _ = scanners.get('licenses', (False, ''))
if with_licenses:
# build index outside of the main loop for speed
# this also ensures that forked processes will get the index on POSIX naturally
if not quiet:
echo_stderr('Building license detection index...', fg='green', nl=False)
from licensedcode.cache import get_index
get_index(False)
indexing_time = time() - scan_start
if not quiet:
echo_stderr('Done.', fg='green', nl=True)
scan_summary['indexing_time'] = indexing_time
# TODO: handle pickling errors as in ./scancode -cilp samples/ -n3: note they are only caused by a FanoutCache
# TODO: handle other exceptions properly to avoid any hanging
# maxtasksperchild helps with recycling processes in case of leaks
pool = get_pool(processes=processes, maxtasksperchild=1000)
ignore = ignore or {}
resources = resource_paths(input_path, ignore)
logfile_path = scans_cache_class().cache_files_log
paths_with_error = []
files_count = 0
with codecs.open(logfile_path, 'w', encoding='utf-8') as logfile_fd:
logged_resources = _resource_logger(logfile_fd, resources)
scanit = partial(_scanit, scanners=scanners, scans_cache_class=scans_cache_class,
diag=diag, timeout=timeout)
max_file_name_len = compute_fn_max_len()
# do not display a file name in progress bar if there is less than 5 chars available.
display_fn = bool(max_file_name_len > 10)
try:
# Using chunksize is documented as much more efficient in the Python doc.
# Yet "1" still provides a better and more progressive feedback.
# With imap_unordered, results are returned as soon as ready and out of order.
scanned_files = pool.imap_unordered(scanit, logged_resources, chunksize=1)
pool.close()
if not quiet:
echo_stderr('Scanning files...', fg='green')
def scan_event(item):
"""Progress event displayed each time a file is scanned"""
if quiet or not item or not display_fn:
return ''
_scan_success, _scanned_path = item
if verbose:
_progress_line = _scanned_path
else:
_progress_line = fixed_width_file_name(_scanned_path, max_file_name_len)
return style('Scanned: ') + style(_progress_line, fg=_scan_success and 'green' or 'red')
scanning_errors = []
files_count = 0
with progressmanager(
scanned_files, item_show_func=scan_event, show_pos=True,
verbose=verbose, quiet=quiet, file=sys.stderr) as scanned:
while True:
try:
result = scanned.next()
scan_success, scanned_rel_path = result
if not scan_success:
paths_with_error.append(scanned_rel_path)
files_count += 1
except StopIteration:
break
except KeyboardInterrupt:
print('\nAborted with Ctrl+C!')
pool.terminate()
break
finally:
# ensure the pool is really dead to work around a Python 2.7.3 bug:
# http://bugs.python.org/issue15101
pool.terminate()
# TODO: add stats to results somehow
# Compute stats
##########################
scan_summary['files_count'] = files_count
scan_summary['files_with_errors'] = paths_with_error
total_time = time() - scan_start
scanning_time = total_time - indexing_time
scan_summary['total_time'] = total_time
scan_summary['scanning_time'] = scanning_time
files_scanned_per_second = round(float(files_count) / scanning_time , 2)
scan_summary['files_scanned_per_second'] = files_scanned_per_second
if not quiet:
# Display stats
##########################
echo_stderr('Scanning done.', fg=paths_with_error and 'red' or 'green')
if paths_with_error:
if diag:
echo_stderr('Some files failed to scan properly:', fg='red')
# iterate cached results to collect all scan errors
cached_scan = scans_cache_class()
root_dir = _get_root_dir(input_path, strip_root, full_root)
scan_results = cached_scan.iterate(scans, root_dir, paths_subset=paths_with_error)
for scan_result in scan_results:
errored_path = scan_result.get('path', '')
echo_stderr('Path: ' + errored_path, fg='red')
for error in scan_result.get('scan_errors', []):
for emsg in error.splitlines(False):
echo_stderr(' ' + emsg)
echo_stderr('')
else:
echo_stderr('Some files failed to scan properly. Use the --diag option for additional details:', fg='red')
for errored_path in paths_with_error:
echo_stderr(' ' + errored_path, fg='red')
echo_stderr('Scan statistics: %(files_count)d files scanned in %(total_time)ds.' % locals())
echo_stderr('Scan options: %(_scans)s with %(processes)d process(es).' % locals())
echo_stderr('Scanning speed: %(files_scanned_per_second)s files per sec.' % locals())
echo_stderr('Scanning time: %(scanning_time)ds.' % locals())
echo_stderr('Indexing time: %(indexing_time)ds.' % locals(), reset=True)
success = not paths_with_error
# finally return an iterator on cached results
cached_scan = scans_cache_class()
root_dir = _get_root_dir(input_path, strip_root, full_root)
return files_count, cached_scan.iterate(scans, root_dir), success
def _get_root_dir(input_path, strip_root=False, full_root=False):
"""
Return a root dir name or None.
On Windows, the path uses POSIX (forward slash) separators.
"""
if strip_root:
return
scanned_path = os.path.abspath(os.path.normpath(os.path.expanduser(input_path)))
scanned_path = fileutils.as_posixpath(scanned_path)
if full_root:
return scanned_path
if filetype.is_dir(scanned_path):
root_dir = scanned_path
else:
root_dir = fileutils.parent_directory(scanned_path)
return fileutils.file_name(root_dir)
def _resource_logger(logfile_fd, resources):
"""
Log file path to the logfile_fd opened file descriptor for each resource and
yield back the resources.
"""
file_logger = ScanFileCache.log_file_path
for posix_path, rel_path in resources:
file_logger(logfile_fd, rel_path)
yield posix_path, rel_path
def _scanit(paths, scanners, scans_cache_class, diag, timeout=DEFAULT_TIMEOUT):
"""
Run scans and cache results on disk. Return a tuple of (success, scanned relative
path) where sucess is True on success, False on error. Note that this is really
only a wrapper function used as an execution unit for parallel processing.
"""
abs_path, rel_path = paths
# always fetch infos and cache.
infos = OrderedDict()
infos['path'] = rel_path
infos.update(scan_infos(abs_path, diag=diag))
success = True
scans_cache = scans_cache_class()
is_cached = scans_cache.put_info(rel_path, infos)
# note: "flag and function" expressions return the function if flag is True
# note: the order of the scans matters to show things in logical order
scanner_functions = map(lambda t : t[0] and t[1], scanners.values())
scanners = OrderedDict(zip(scanners.keys(), scanner_functions))
if any(scanner_functions):
# Skip other scans if already cached
# FIXME: ENSURE we only do this for files not directories
if not is_cached:
# run the scan as an interruptiple task
scans_runner = partial(scan_one, abs_path, scanners, diag)
# quota keyword args for interruptible
success, scan_result = interruptible(scans_runner, timeout=timeout)
if not success:
# Use scan errors as the scan result for that file on failure this is
# a top-level error not attachedd to a specific scanner, hence the
# "scan" key is used for these errors
scan_result = {'scan_errors': [scan_result]}
scans_cache.put_scan(rel_path, infos, scan_result)
# do not report success if some other errors happened
if scan_result.get('scan_errors'):
success = False
return success, rel_path
def resource_paths(base_path, user_ignores):
"""
Yield tuples of (absolute path, base_path-relative path) for all the files found
at base_path (either a directory or file) given an absolute base_path. Only yield
Files, not directories.
absolute path is a native OS path.
base_path-relative path is a POSIX path.
The relative path is guaranted to be unicode and may be URL-encoded and may not
be suitable to address an actual file.
"""
base_path = os.path.abspath(os.path.normpath(os.path.expanduser(base_path)))
base_is_dir = filetype.is_dir(base_path)
len_base_path = len(base_path)
ignores = dict()
ignores.update(user_ignores)
ignores.update(ignore.ignores_VCS)
ignored = partial(ignore.is_ignored, ignores=ignores, unignores={})
resources = fileutils.resource_iter(base_path, ignored=ignored)
for abs_path in resources:
posix_path = fileutils.as_posixpath(abs_path)
# fix paths: keep the path as relative to the original base_path
rel_path = get_relative_path(posix_path, len_base_path, base_is_dir)
yield abs_path, rel_path
def scan_infos(input_file, diag=False):
"""
Scan one file or directory and return file_infos data. This always
contains an extra 'errors' key with a list of error messages,
possibly empty. If `diag` is True, additional diagnostic messages
are included.
"""
errors = []
try:
infos = get_file_infos(input_file)
except Exception as e:
# never fail but instead add an error message.
infos = _empty_file_infos()
errors = ['ERROR: infos: ' + e.message]
if diag:
errors.append('ERROR: infos: ' + traceback.format_exc())
# put errors last
infos['scan_errors'] = errors
return infos
def scan_one(input_file, scanners, diag=False):
"""
Scan one file or directory and return a scanned data, calling every scan in
the `scans` mapping of (scan name -> scan function). Scan data contain a
'scan_errors' key with a list of error messages.
If `diag` is True, 'scan_errors' error messages also contain detailed diagnostic
information such as a traceback if available.
"""
scan_result = OrderedDict()
scan_errors = []
for scan_name, scan_func in scanners.items():
if not scan_func:
continue
try:
scan_details = scan_func(input_file)
# consume generators
if isinstance(scan_details, GeneratorType):
scan_details = list(scan_details)
scan_result[scan_name] = scan_details
except TimeoutError:
raise
except Exception as e:
# never fail but instead add an error message and keep an empty scan:
scan_result[scan_name] = []
messages = ['ERROR: ' + scan_name + ': ' + e.message]
if diag:
messages.append('ERROR: ' + scan_name + ': ' + traceback.format_exc())
scan_errors.extend(messages)
# put errors last, after scans proper
scan_result['scan_errors'] = scan_errors
return scan_result
def has_findings(active_scans, file_data):
"""
Return True if the file_data has findings for any of the `active_scans` names list.
"""
return any(file_data.get(scan_name) for scan_name in active_scans)
def save_results(scanners, only_findings, files_count, results, format, options, input, output_file):
"""
Save scan results to file or screen.
"""
if only_findings:
# Find all scans that are both enabled and have a valid function
# reference. This deliberately filters out the "info" scan
# (which always has a "None" function reference) as there is no
# dedicated "infos" key in the results that "has_findings()"
# could check.
# FIXME: we should not use positional tings tuples for v[0], v[1] that are mysterious values for now
active_scans = [k for k, v in scanners.items() if v[0] and v[1]]
# FIXME: this is forcing all the scan results to be loaded in memory
# and defeats lazy loading from cache
# FIXME: we should instead use a generator of use a filter
# function that pass to the scan results loader iterator
results = [file_data for file_data in results if has_findings(active_scans, file_data)]
# FIXME: computing len before hand will need a list and therefore needs loading
# it all ahead of timeand defeats caching entirely
files_count = len(results)
# note: in tests, sys.stdout is not used, but is instead some io
# wrapper with no name attributes. We use this to check if this is a
# real filesystem file or not.
# note: sys.stdout.name == '<stdout>' so it has a name.
is_real_file = hasattr(output_file, 'name')
if output_file != sys.stdout and is_real_file:
# we are writing to a real filesystem file: create directories!
parent_dir = os.path.dirname(output_file.name)
if parent_dir:
fileutils.create_dir(abspath(expanduser(parent_dir)))
# Write scan results to file or screen as a formatted output ...
# ... using a user-provided custom format template
format_plugins = plugincode.output.get_format_plugins()
if format not in format_plugins:
# format may be a custom template file path
if not os.path.isfile(format):
# this check was done before in the CLI validation, but this
# is done again if the function is used directly
echo_stderr('\nInvalid template: must be a file.', fg='red')
else:
from formattedcode import format_templated
# FIXME: carrying an echo function does not make sense
format_templated.write_custom(
results, output_file, _echo=echo_stderr, template_path=format)
# ... or using the selected format plugin
else:
writer = format_plugins[format]
# FIXME: carrying an echo function does not make sense
# FIXME: do not use input as a variable name
writer(files_count=files_count, version=version, notice=notice,
scanned_files=results,
options=options,
input=input, output_file=output_file, _echo=echo_stderr)
| yashdsaraf/scancode-toolkit | src/scancode/cli.py | Python | apache-2.0 | 32,883 | [
"VisIt"
] | 39af1efa5c3deedb5ff2430aaba7daaecec822614e6d0c0432e35dfd933149a6 |
# # # # #
# Tool to downscale the CMIP5 data from the PCMDI group.
# # # # #
def shiftgrid(lon0,datain,lonsin,start=True,cyclic=360.0):
import numpy as np
"""
Shift global lat/lon grid east or west.
.. tabularcolumns:: |l|L|
============== ====================================================
Arguments Description
============== ====================================================
lon0 starting longitude for shifted grid
(ending longitude if start=False). lon0 must be on
input grid (within the range of lonsin).
datain original data with longitude the right-most
dimension.
lonsin original longitudes.
============== ====================================================
.. tabularcolumns:: |l|L|
============== ====================================================
Keywords Description
============== ====================================================
start if True, lon0 represents the starting longitude
of the new grid. if False, lon0 is the ending
longitude. Default True.
cyclic width of periodic domain (default 360)
============== ====================================================
returns ``dataout,lonsout`` (data and longitudes on shifted grid).
"""
if np.fabs(lonsin[-1]-lonsin[0]-cyclic) > 1.e-4:
# Use all data instead of raise ValueError, 'cyclic point not included'
start_idx = 0
else:
# If cyclic, remove the duplicate point
start_idx = 1
if lon0 < lonsin[0] or lon0 > lonsin[-1]:
raise ValueError('lon0 outside of range of lonsin')
i0 = np.argmin(np.fabs(lonsin-lon0))
i0_shift = len(lonsin)-i0
if np.ma.isMA(datain):
dataout = np.ma.zeros(datain.shape,datain.dtype)
else:
dataout = np.zeros(datain.shape,datain.dtype)
if np.ma.isMA(lonsin):
lonsout = np.ma.zeros(lonsin.shape,lonsin.dtype)
else:
lonsout = np.zeros(lonsin.shape,lonsin.dtype)
if start:
lonsout[0:i0_shift] = lonsin[i0:]
else:
lonsout[0:i0_shift] = lonsin[i0:]-cyclic
dataout[...,0:i0_shift] = datain[...,i0:]
if start:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]+cyclic
else:
lonsout[i0_shift:] = lonsin[start_idx:i0+start_idx]
dataout[...,i0_shift:] = datain[...,start_idx:i0+start_idx]
return dataout,lonsout
def cru_generator( n, cru_clim_list ):
'''
generator that will produce the cru climatologies with a
generator and replicate for the total number of years in n
'''
months = [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
for i in range( n ):
for count, j in enumerate( cru_clim_list ):
yield j
def standardized_fn_to_vars( fn ):
''' take a filename string following the convention for this downscaling and break into parts and return a dict'''
name_convention = [ 'variable', 'cmor_table', 'model', 'scenario', 'experiment', 'begin_time', 'end_time' ]
fn = os.path.basename( fn )
fn_list = fn.split( '.' )[0].split( '_' )
return { i:j for i,j in zip( name_convention, fn_list )}
def downscale( src, dst, cru, src_crs, src_affine, dst_crs, dst_affine, output_filename, dst_meta, variable,\
method='cubic_spline', operation='add', output_dtype='float32', **kwargs ):
'''
operation can be one of two keywords for the operation to perform the delta downscaling
- keyword strings are one of: 'add'= addition, 'mult'=multiplication, or 'div'=division (not implemented)
- method can be one of 'cubic_spline', 'nearest', 'bilinear' and must be input as a string.
- output_dtype can be one of 'int32', 'float32'
'''
from rasterio.warp import reproject, RESAMPLING
def add( cru, anom ):
return cru + anom
def mult( cru, anom ):
return cru * anom
def div( cru, anom ):
# return cru / anom
# this one may not be useful, but the placeholder is here
return NotImplementedError
# switch to deal with numeric output dtypes
dtypes_switch = {'int32':np.int32, 'float32':np.float32}
# switch to deal with different resampling types
method_switch = { 'nearest':RESAMPLING.nearest, 'bilinear':RESAMPLING.bilinear, 'cubic_spline':RESAMPLING.cubic_spline }
method = method_switch[ method ]
# reproject src to dst
out = np.zeros( dst.shape )
reproject( src,
out,
src_transform=src_affine,
src_crs=src_crs,
dst_transform=dst_affine,
dst_crs=dst_crs,
resampling=method )
# switch to deal with different downscaling operators
operation_switch = { 'add':add, 'mult':mult, 'div':div }
downscaled = operation_switch[ operation ]( cru, out )
# reset any > 100 values to 95 if the variable is cld or hur
if variable == 'clt' or variable == 'hur' or variable == 'cld':
downscaled[ downscaled > 100.0 ] = 95.0
# give the proper fill values to the oob regions
downscaled.fill_value = dst_meta['nodata']
downscaled = downscaled.filled()
# this is a geotiff creator so lets pass in the lzw compression
dst_meta.update( compress='lzw' )
with rasterio.open( output_filename, 'w', **dst_meta ) as out:
out.write( downscaled.astype( dtypes_switch[ output_dtype ] ), 1 )
return output_filename
def run( args ):
'''
simple function wrapper for unpacking an argument dict
to the downscale function for getting around the single
argument pass to multiprocessing.map implementation issue.
'''
return( downscale( **args ) )
if __name__ == '__main__':
import pandas as pd
import numpy as np
import os, sys, re, xray, rasterio, glob, argparse
from rasterio import Affine as A
from rasterio.warp import reproject, RESAMPLING
from pathos import multiprocessing as mp
# parse the commandline arguments
parser = argparse.ArgumentParser( description='preprocess cmip5 input netcdf files to a common type and single files' )
parser.add_argument( "-mi", "--modeled_fn", nargs='?', const=None, action='store', dest='modeled_fn', type=str, help="path to modeled input filename (NetCDF); default:None" )
parser.add_argument( "-hi", "--historical_fn", nargs='?', const=None, action='store', dest='historical_fn', type=str, help="path to historical input filename (NetCDF); default:None" )
parser.add_argument( "-o", "--output_path", action='store', dest='output_path', type=str, help="string path to the output folder containing the new downscaled outputs" )
parser.add_argument( "-cbt", "--climatology_begin_time", nargs='?', const='196101', action='store', dest='climatology_begin', type=str, help="string in format YYYYMM or YYYY of the beginning month and potentially (year) of the climatology period" )
parser.add_argument( "-cet", "--climatology_end_time", nargs='?', const='199012', action='store', dest='climatology_end', type=str, help="string in format YYYYMM or YYYY of the ending month and potentially (year) of the climatology period" )
parser.add_argument( "-plev", "--plev", nargs='?', const=None, action='store', dest='plev', type=int, help="integer value (in millibars) of the desired pressure level to extract, if there is one." )
parser.add_argument( "-cru", "--cru_path", action='store', dest='cru_path', type=str, help="path to the directory storing the cru climatology data derived from CL2.0" )
parser.add_argument( "-at", "--anomalies_calc_type", nargs='?', const='absolute', action='store', dest='anomalies_calc_type', type=str, help="string of 'proportional' or 'absolute' to inform of anomalies calculation type to perform." )
parser.add_argument( "-m", "--metric", nargs='?', const='metric', action='store', dest='metric', type=str, help="string of whatever the metric type is of the outputs to put in the filename." )
parser.add_argument( "-dso", "--downscale_operation", action='store', dest='downscale_operation', type=str, help="string of 'add', 'mult', 'div', which refers to the type or downscaling operation to use." )
parser.add_argument( "-nc", "--ncores", nargs='?', const=2, action='store', dest='ncores', type=int, help="integer valueof number of cores to use. default:2" )
# parse args
args = parser.parse_args()
# unpack args
modeled_fn = args.modeled_fn
historical_fn = args.historical_fn
# temporary
# modeled_fn = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/MRI-CGCM3/clt/clt_Amon_MRI-CGCM3_rcp26_r1i1p1_200601_210012.nc'
# historical_fn = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/MRI-CGCM3/clt/clt_Amon_MRI-CGCM3_rcp85_r1i1p1_200601_210012.nc'
output_path = args.output_path
climatology_begin = args.climatology_begin
climatology_end = args.climatology_end
plev = args.plev
cru_path = args.cru_path
anomalies_calc_type = args.anomalies_calc_type
metric = args.metric
downscale_operation = args.downscale_operation
ncores = args.ncores
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# [NOTE]: hardwired raster metadata meeting the ALFRESCO Model's needs for
# perfectly aligned inputs this is used as template metadata that
# is used in output generation. template raster filename below:
# '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/
# TEM_Data/templates/tas_mean_C_AR5_GFDL-CM3_historical_01_1860.tif'
meta_3338 = {'affine': A(2000.0, 0.0, -2173223.206087799,
0.0, -2000.0, 2548412.932644147),
'count': 1,
'crs': {'init':'epsg:3338'},
'driver': u'GTiff',
'dtype': 'float32',
'height': 1186,
'nodata': -3.4e+38,
'width': 3218,
'compress':'lzw'}
# output template numpy array same dimensions as the template
dst = np.empty( (1186, 3218) )
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# condition to deal with reading in historical data if needed.
if modeled_fn is not None and historical_fn is not None:
print 'here'
# parse the input name for some file metadata
output_naming_dict = standardized_fn_to_vars( modeled_fn )
# this is to maintain cleanliness
variable = output_naming_dict[ 'variable' ]
# read in both modeled and historical
ds = xray.open_dataset( modeled_fn )
ds = ds[ variable ].load()
clim_ds = xray.open_dataset( historical_fn )
clim_ds = clim_ds[ variable ].load()
# generate climatology / anomalies
clim_ds = clim_ds.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = clim_ds.groupby( 'time.month' ).mean( 'time' )
# find the begin/end years of the prepped files
dates = ds.time.to_pandas()
years = dates.apply( lambda x: x.year )
begin_time = years.min()
end_time = years.max()
print 'here'
del clim_ds
elif historical_fn is not None and modeled_fn is None:
# parse the input name for some file metadata
output_naming_dict = standardized_fn_to_vars( historical_fn )
# this is to maintain cleanliness
variable = output_naming_dict[ 'variable' ]
# read in historical
ds = xray.open_dataset( historical_fn )
ds = ds[ variable ].load()
# generate climatology / anomalies
climatology = ds.loc[ {'time':slice(climatology_begin,climatology_end)} ]
climatology = climatology.groupby( 'time.month' ).mean( 'time' )
# find the begin/end years of the prepped files
dates = ds.time.to_pandas()
years = dates.apply( lambda x: x.year )
begin_time = years.min()
end_time = years.max()
else:
NameError( 'ERROR: must have both modeled_fn and historical_fn, or just historical_fn' )
# standardize the output pathing
if output_naming_dict[ 'variable' ] == 'clt':
variable_out = 'cld'
print 'here'
else:
variable_out = output_naming_dict[ 'variable' ]
output_path = os.path.join( output_path, 'ar5', output_naming_dict['model'], variable_out, 'downscaled' )
if not os.path.exists( output_path ):
os.makedirs( output_path )
# if there is a pressure level to extract, extract it
if plev is not None:
plevel, = np.where( ds.plev == plev )
ds = ds[ :, plevel[0], ... ]
climatology = climatology[ :, plevel[0], ... ]
# deal with different anomaly calculation types
if anomalies_calc_type == 'absolute':
anomalies = ds.groupby( 'time.month' ) - climatology
elif anomalies_calc_type == 'proportional':
print 'here'
anomalies = ds.groupby( 'time.month' ) / climatology
else:
NameError( 'anomalies_calc_type can only be one of "absolute" or "proportional"' )
# some setup of the output raster metadata
time_len, rows, cols = anomalies.shape
crs = 'epsg:4326'
affine = A( *[np.diff( ds.lon )[ 0 ], 0.0, -180.0, 0.0, -np.diff( ds.lat )[ 0 ], 90.0] )
count = time_len
resolution = ( np.diff( ds.lat )[ 0 ], np.diff( ds.lon )[ 0 ] )
# close the dataset and clean it up
ds = None
# shift the grid to Greenwich Centering
dat, lons = shiftgrid( 180., anomalies[:], anomalies.lon.data, start=False )
# metadata for input?
meta_4326 = {'affine':affine,
'height':rows,
'width':cols,
'crs':crs,
'driver':'GTiff',
'dtype':np.float32,
'count':time_len,
'compress':'lzw' }
# build some filenames for the outputs to be generated
months = [ '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12' ]
years = [ str(year) for year in range( begin_time, end_time + 1, 1 ) ]
# combine the months and the years
combinations = [ (month, year) for year in years for month in months ]
output_filenames = [ os.path.join( output_path, '_'.join([variable_out, 'metric', output_naming_dict['model'], output_naming_dict['scenario'], output_naming_dict['experiment'], month, year]) + '.tif' ) for month, year in combinations ]
print 'here'
# load the baseline CRU CL2.0 data
# [NOTE]: THIS ASSUMES THEY ARE THE ONLY FILES IN THE DIRECTORY -- COULD BE A GOTCHA
cru_files = glob.glob( os.path.join( cru_path, '*.tif' ) )
cru_files.sort()
cru_stack = [ rasterio.open( fn ).read( 1 ) for fn in cru_files ]
# this is a hack to make a masked array with the cru data
cru_stack = [ np.ma.masked_where( cru == cru.min(), cru ) for cru in cru_stack ]
cru_gen = cru_generator( len(output_filenames), cru_stack )
print 'here'
# cleanup some uneeded vars that are hogging RAM
del climatology, anomalies
# run in parallel using PATHOS
pool = mp.Pool( processes=ncores )
args_list = zip( np.vsplit( dat, time_len ), output_filenames, cru_gen )
del dat, cru_gen, cru_stack
out = pool.map( run, [{'src':src, 'output_filename':fn, 'dst':dst, 'cru':cru, 'src_crs':meta_4326[ 'crs' ], 'src_affine':meta_4326[ 'affine' ], \
'dst_crs':meta_3338[ 'crs' ], 'dst_affine':meta_3338[ 'affine' ], 'dst_meta':meta_3338, 'operation':downscale_operation, 'variable':variable } \
for src,fn,cru in args_list ] )
pool.close()
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# # # # # # # # SOME EXAMPLES OF USE # # # # # # # # # # # # # #
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# # TO RUN THE CLOUDS DOWNSCALING USE THIS EXAMPLE:
import os
import pandas as pd
import numpy as np
# change to the script repo
os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
variable = 'clt' # AR5 naming convention cloud fraction
# to run the futures:
prepped_dir = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped'
file_groups = [ [os.path.join(root,f) for f in files] for root, sub, files in os.walk( prepped_dir ) if len(files) > 0 and files[0].endswith('.nc') and variable in files[0] ]
variable = 'cld' # swap it back for the cru naming convention
def make_rcp_file_pairs( file_group ):
# there is only one historical per group since these have been pre-processed to a single file and date range
historical = [ file_group.pop( count ) for count, i in enumerate( file_group ) if 'historical' in i ]
return zip( np.repeat( historical, len(file_group) ).tolist(), file_group )
grouped_pairs = [ make_rcp_file_pairs( file_group ) for file_group in file_groups ]
for file_group in grouped_pairs:
for historical_fn, modeled_fn in file_group:
if 'MRI-CGCM3_rcp26_' in modeled_fn:
print( 'running: %s' % os.path.basename( modeled_fn ) )
output_path = '/Data/malindgren/cru_november_final'
climatology_begin = '1961-01'
climatology_end = '1990-12'
cru_path = os.path.join( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_november_final/cru_cl20', variable, 'akcan' )
anomalies_calc_type = 'proportional'
metric = 'pct'
downscale_operation = 'mult'
ncores = '30'
# future modeled data
# build the args
modeled_fn = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/MRI-CGCM3/clt/clt_Amon_MRI-CGCM3_rcp26_r1i1p1_200601_210012.nc'
historical_fn = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped/clt_prepped/MRI-CGCM3/clt/clt_Amon_MRI-CGCM3_historical_r1i1p1_185001_200512.nc'
args_tuples = [ ( 'mi', modeled_fn ),
( 'hi', historical_fn ),
( 'o', output_path ),
( 'cbt', climatology_begin ),
( 'cet', climatology_end ),
( 'cru', cru_path ),
( 'at', anomalies_calc_type ),
( 'm', metric ),
( 'dso', downscale_operation ),
( 'nc', ncores ) ]
args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
os.system( 'python clt_ar5_model_data_downscaling.py ' + args )
del modeled_fn
output_path = output_path
climatology_begin = climatology_begin
climatology_end = climatology_end
plev = plev
cru_path = cru_path
anomalies_calc_type = anomalies_calc_type
metric = metric
downscale_operation = downscale_operation
ncores = ncores
# # now historical modeled data
# # build the args
# print( 'running: %s' % os.path.basename( historical_fn ) )
# args_tuples = [ ( 'hi', historical_fn ),
# ( 'o', output_path ),
# ( 'cbt', climatology_begin ),
# ( 'cet', climatology_end ),
# ( 'cru', cru_path ),
# ( 'at', anomalies_calc_type ),
# ( 'm', metric ),
# ( 'dso', downscale_operation ),
# ( 'nc', ncores ) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython -- ar5_model_data_downscaling.py ' + args )
# # # # # # # # # # # # # # # # # # # #
# # TO RUN THE TEMPERATURE DOWNSCALING USE THIS EXAMPLE:
# import os
# import pandas as pd
# import numpy as np
# # change to the script repo
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# variable = 'tas'
# # to run the futures:
# prepped_dir = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped'
# file_groups = [ [os.path.join(root,f) for f in files] for root, sub, files in os.walk( prepped_dir ) if len(files) > 0 and files[0].endswith('.nc') and variable in files[0] ]
# def make_rcp_file_pairs( file_group ):
# # there is only one historical per group since these have been pre-processed to a single file and date range
# historical = [ file_group.pop( count ) for count, i in enumerate( file_group ) if 'historical' in i ]
# return zip( np.repeat( historical, len(file_group) ).tolist(), file_group )
# grouped_pairs = [ make_rcp_file_pairs( file_group ) for file_group in file_groups ]
# for file_group in grouped_pairs:
# for historical_fn, modeled_fn in file_group:
# print( 'running: %s' % os.path.basename( modeled_fn ) )
# output_path = '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_november_final'
# climatology_begin = '1961-01'
# climatology_end = '1990-12'
# cru_path = os.path.join( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_november_final/cru_cl20',variable,'akcan' )
# anomalies_calc_type = 'absolute'
# metric = 'C'
# downscale_operation = 'add'
# ncores = '30'
# # future modeled data
# # # build the args
# args_tuples = [ ( 'mi', modeled_fn ),
# ( 'hi', historical_fn ),
# ( 'o', output_path ),
# ( 'cbt', climatology_begin ),
# ( 'cet', climatology_end ),
# ( 'cru', cru_path ),
# ( 'at', anomalies_calc_type ),
# ( 'm', metric ),
# ( 'dso', downscale_operation ),
# ( 'nc', ncores ) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'python clt_ar5_model_data_downscaling.py ' + args )
# del modeled_fn
# # now historical modeled data
# # # build the args by pop(-ping) out the first entry which is modeled_fn
# args_tuples.pop(0)
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython -- ar5_model_data_downscaling.py ' + args )
# # # # # # # # # # # # # # # # # # # #
# # TO RUN THE RELATIVE HUMIDITY DOWNSCALING USE THIS EXAMPLE:
# import os
# import pandas as pd
# import numpy as np
# # change to the script repo
# os.chdir( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/CODE/tem_ar5_inputs/downscale_cmip5/bin' )
# # variable we are running
# variable = 'hur'
# # to run the futures:
# prepped_dir = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/data/prepped'
# file_groups = [ [os.path.join(root,f) for f in files] for root, sub, files in os.walk( prepped_dir ) if len(files) > 0 and files[0].endswith('.nc') and variable in files[0] ]
# def make_rcp_file_pairs( file_group ):
# # there is only one historical per group since these have been pre-processed to a single file and date range
# historical = [ file_group.pop( count ) for count, i in enumerate( file_group ) if 'historical' in i ]
# return zip( np.repeat( historical, len(file_group) ).tolist(), file_group )
# grouped_pairs = [ make_rcp_file_pairs( file_group ) for file_group in file_groups ]
# for file_group in grouped_pairs:
# for historical_fn, modeled_fn in file_group:
# print( 'running: %s' % os.path.basename( modeled_fn ) )
# output_path = '/Data/malindgren/cru_november_final'
# plev = '1000'
# climatology_begin = '1961-01'
# climatology_end = '1990-12'
# cru_path = os.path.join( '/workspace/Shared/Tech_Projects/ALFRESCO_Inputs/project_data/TEM_Data/cru_november_final/cru_cl20',variable,'akcan' )
# anomalies_calc_type = 'proportional'
# metric = 'pct'
# downscale_operation = 'mult'
# ncores = '32'
# # future modeled data
# args_tuples = [ ( 'mi', modeled_fn ),
# ( 'hi', historical_fn ),
# ( 'o', output_path ),
# ( 'plev', plev ),
# ( 'cbt', climatology_begin ),
# ( 'cet', climatology_end ),
# ( 'cru', cru_path ),
# ( 'at', anomalies_calc_type ),
# ( 'm', metric ),
# ( 'dso', downscale_operation ),
# ( 'nc', ncores ) ]
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'python ar5_model_data_downscaling.py ' + args )
# del modeled_fn
# # now historical modeled data
# # # build the args by pop(-ping) out the first entry which is modeled_fn
# args_tuples.pop(0)
# args = ''.join([ ' -'+flag+' '+value for flag, value in args_tuples ])
# os.system( 'ipython -- ar5_model_data_downscaling.py ' + args )
| ua-snap/downscale | old/old_bin/ar5_model_data_downscaling_fix.py | Python | mit | 22,871 | [
"NetCDF"
] | 53b2d64aedd7e2224eb06696896df98889e05f92c45b8b0f72c5203d0435c585 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffxparser(RPackage):
"""Package for parsing Affymetrix files (CDF, CEL, CHP, BPMAP, BAR).
It provides methods for fast and memory efficient parsing of
Affymetrix files using the Affymetrix' Fusion SDK. Both ASCII-
and binary-based files are supported. Currently, there are methods
for reading chip definition file (CDF) and a cell intensity file (CEL).
These files can be read either in full or in part. For example,
probe signals from a few probesets can be extracted very quickly
from a set of CEL files into a convenient list structure."""
homepage = "https://www.bioconductor.org/packages/affxparser/"
url = "https://git.bioconductor.org/packages/affxparser"
list_url = homepage
version('1.48.0', git='https://git.bioconductor.org/packages/affxparser', commit='2461ea88f310b59c4a9a997a4b3dadedbd65a4aa')
depends_on('r@3.4.0:3.4.9', when='@1.48.0')
| lgarren/spack | var/spack/repos/builtin/packages/r-affxparser/package.py | Python | lgpl-2.1 | 2,176 | [
"Bioconductor"
] | 792ff34e63cfe6c775372d29bb3897a4fbf078f44b95973e93c7c5ef06b4ac56 |
""" Compute the centerlines according to Kienholz et al (2014) - with
modifications.
The output is a list of Centerline objects, stored as a list in a pickle.
The order of the list is important since the lines are
sorted per order (hydrological flow level), from the lower orders (upstream)
to the higher orders (downstream). Several tools later on rely on this order
so don't mess with it.
References::
Kienholz, C., Rich, J. L., Arendt, a. a., and Hock, R. (2014).
A new method for deriving glacier centerlines applied to glaciers in
Alaska and northwest Canada. The Cryosphere, 8(2), 503-519.
doi:10.5194/tc-8-503-2014
"""
from __future__ import absolute_import, division
from six.moves import zip
# Built ins
import logging
import copy
from itertools import groupby
# External libs
import numpy as np
from pandas import Series as pdSeries
import shapely.ops
import geopandas as gpd
import scipy.signal
from scipy.ndimage.filters import gaussian_filter1d
from scipy.ndimage.morphology import distance_transform_edt
from skimage.graph import route_through_array
import netCDF4
import shapely.geometry as shpg
import scipy.signal
from scipy.interpolate import RegularGridInterpolator
# Locals
import oggm.cfg as cfg
from oggm.cfg import GAUSSIAN_KERNEL
from salem import lazy_property
from oggm.utils import tuple2int, line_interpol, interp_nans
from oggm import entity_task, divide_task
# Module logger
log = logging.getLogger(__name__)
class Centerline(object):
"""A Centerline has geometrical and flow rooting properties.
It is instanciated and updated by _join_lines() exclusively
"""
def __init__(self, line, dx=None, surface_h=None, is_glacier=None):
""" Instanciate.
Parameters
----------
line: Shapely LineString
Properties
----------
#TODO: document properties
"""
self.line = None # Shapely LineString
self.head = None # Shapely Point
self.tail = None # Shapely Point
self.dis_on_line = None # Shapely Point
self.nx = None # Shapely Point
self.is_glacier = None # Shapely Point
self.set_line(line, is_glacier=is_glacier) # Init all previous properties
self.order = None # Hydrological flow level (~ Strahler number)
# These are computed at run time by compute_centerlines
self.flows_to = None # pointer to a Centerline object (when available)
self.flows_to_point = None # point of the junction in flows_to
self.inflows = [] # list of Centerline instances (when available)
self.inflow_points = [] # junction points
# Optional attrs
self.dx = dx # dx in pixels (assumes the line is on constant dx
self._surface_h = surface_h
self._widths = None
self.touches_border = None
# Set by external funcs
self.geometrical_widths = None # these are kept for plotting and such
self.apparent_mb = None # Apparent MB, NOT weighted by width.
self.flux = None # Flux (kg m-2)
def set_flows_to(self, other, check_tail=True, last_point=False):
"""Find the closest point in "other" and sets all the corresponding
attributes. Btw, it modifies the state of "other" too.
Parameters
----------
other: an other centerline
"""
self.flows_to = other
if check_tail:
# Project the point and Check that its not too close
prdis = other.line.project(self.tail, normalized=False)
ind_closest = np.argmin(np.abs(other.dis_on_line - prdis))
ind_closest = np.asscalar(ind_closest)
n = len(other.dis_on_line)
if n >= 9:
ind_closest = np.clip(ind_closest, 4, n-5)
elif n >= 7:
ind_closest = np.clip(ind_closest, 3, n-4)
elif n >= 5:
ind_closest = np.clip(ind_closest, 2, n-3)
p = shpg.Point(other.line.coords[int(ind_closest)])
self.flows_to_point = p
elif last_point:
self.flows_to_point = other.tail
else:
# just the closest
self.flows_to_point = _projection_point(other, self.tail)
other.inflow_points.append(self.flows_to_point)
other.inflows.append(self)
def set_line(self, line, is_glacier=None):
"""Update the Shapely LineString coordinate.
Parameters
----------
line: a shapely.geometry.LineString
"""
self.nx = len(line.coords)
self.line = line
dis = [line.project(shpg.Point(co)) for co in line.coords]
self.dis_on_line = np.array(dis)
xx, yy = line.xy
self.head = shpg.Point(xx[0], yy[0])
self.tail = shpg.Point(xx[-1], yy[-1])
if is_glacier is None:
self.is_glacier = np.ones(self.nx).astype(np.bool)
else:
assert len(is_glacier) == self.nx
self.is_glacier = np.asarray(is_glacier)
@lazy_property
def flows_to_indice(self):
"""Indices instead of geometry"""
tofind = self.flows_to_point.coords[0]
for i, p in enumerate(self.flows_to.line.coords):
if p == tofind:
ind = i
assert ind is not None
return ind
@lazy_property
def inflow_indices(self):
"""Indices instead of geometries"""
inds = []
for p in self.inflow_points:
ind = [i for (i, pi) in enumerate(self.line.coords)
if (p.coords[0] == pi)]
inds.append(ind[0])
assert len(inds) == len(self.inflow_points)
return inds
@lazy_property
def normals(self):
"""List of (n1, n2) normal vectors at each point.
We use second order derivatives for smoother widths.
"""
pcoords = np.array(self.line.coords)
normals = []
# First
normal = np.array(pcoords[1, :] - pcoords[0, :])
normals.append(_normalize(normal))
# Second
normal = np.array(pcoords[2, :] - pcoords[0, :])
normals.append(_normalize(normal))
# Others
for (bbef, bef, cur, aft, aaft) in zip(pcoords[:-4, :],
pcoords[1:-3, :],
pcoords[2:-2, :],
pcoords[3:-1, :],
pcoords[4:, :]):
normal = np.array(aaft + 2*aft - 2*bef - bbef)
normals.append(_normalize(normal))
# One before last
normal = np.array(pcoords[-1, :] - pcoords[-3, :])
normals.append(_normalize(normal))
# Last
normal = np.array(pcoords[-1, :] - pcoords[-2, :])
normals.append(_normalize(normal))
return normals
@property
def widths(self):
"""Needed for overriding later"""
return self._widths
@widths.setter
def widths(self, value):
self._widths = value
@property
def surface_h(self):
"""Needed for overriding later"""
return self._surface_h
@surface_h.setter
def surface_h(self, value):
self._surface_h = value
def set_apparent_mb(self, mb):
"""Set the apparent mb and flux for the flowline.
MB is expected in kg m-2 yr-1 (= mm w.e. yr-1)
This should happen in line order, otherwise it will be wrong.
"""
self.apparent_mb = mb
# Add MB to current flux and sum
# no more changes should happen after that
self.flux += mb * self.widths * self.dx
self.flux = np.cumsum(self.flux)
# Add to outflow. That's why it should happen in order
if self.flows_to is not None:
n = len(self.flows_to.line.coords)
ide = self.flows_to_indice
if n >= 9:
gk = GAUSSIAN_KERNEL[9]
self.flows_to.flux[ide-4:ide+5] += gk * self.flux[-1]
elif n >= 7:
gk = GAUSSIAN_KERNEL[7]
self.flows_to.flux[ide-3:ide+4] += gk * self.flux[-1]
elif n >= 5:
gk = GAUSSIAN_KERNEL[5]
self.flows_to.flux[ide-2:ide+3] += gk * self.flux[-1]
def _filter_heads(heads, heads_height, radius, polygon):
"""Filter the head candidates following Kienholz et al. (2014), Ch. 4.1.2
Parameters
----------
heads : list of shapely.geometry.Point instances
The heads to filter out (in raster coordinates).
heads_height : list
The heads altitudes.
radius : float
The radius around each head to search for potential challengers
polygon : shapely.geometry.Polygon class instance
The glacier geometry (in raster coordinates).
Returns
-------
a list of shapely.geometry.Point instances with the "bad ones" removed
"""
heads = copy.copy(heads)
heads_height = copy.copy(heads_height)
i = 0
# I think a "while" here is ok: we remove the heads forwards only
while i < len(heads):
head = heads[i]
pbuffer = head.buffer(radius)
inter_poly = pbuffer.intersection(polygon.exterior)
if inter_poly.type in ['MultiPolygon',
'GeometryCollection',
'MultiLineString']:
# In the case of a junction point, we have to do a check
# http://lists.gispython.org/pipermail/community/
# 2015-January/003357.html
if inter_poly.type == 'MultiLineString':
inter_poly = shapely.ops.linemerge(inter_poly)
if inter_poly.type is not 'LineString':
# keep the local polygon only
for sub_poly in inter_poly:
if sub_poly.intersects(head):
inter_poly = sub_poly
break
elif inter_poly.type is 'LineString':
inter_poly = shpg.Polygon(np.asarray(inter_poly.xy).T)
elif inter_poly.type is 'Polygon':
pass
else:
extext ='Geometry collection not expected: {}'.format(
inter_poly.type)
raise NotImplementedError(extext)
# Find other points in radius and in polygon
_heads = [head]
_z = [heads_height[i]]
for op, z in zip(heads[i+1:], heads_height[i+1:]):
if inter_poly.intersects(op):
_heads.append(op)
_z.append(z)
# If alone, go to the next point
if len(_heads) == 1:
i += 1
continue
# If not, keep the highest
_w = np.argmax(_z)
for head in _heads:
if not (head is _heads[_w]):
heads_height = np.delete(heads_height, heads.index(head))
heads.remove(head)
return heads, heads_height
def _filter_lines(lines, heads, k, r):
"""Filter the centerline candidates by length.
Kienholz et al. (2014), Ch. 4.3.1
Parameters
----------
lines : list of shapely.geometry.LineString instances
The lines to filter out (in raster coordinates).
heads : list of shapely.geometry.Point instances
The heads corresponding to the lines.
k : float
A buffer (in raster coordinates) to cut around the selected lines
r : float
The lines shorter than r will be filtered out.
Returns
-------
(lines, heads) a list of the new lines and corresponding heads
"""
olines = []
oheads = []
ilines = copy.copy(lines)
while len(ilines) > 0: # loop as long as we haven't filtered all lines
if len(olines) > 0: # enter this after the first step only
toremove = lastline.buffer(k) # buffer centerlines the last line
tokeep = []
for l in ilines:
# loop over all remaining lines and compute their diff
# to the last longest line
diff = l.difference(toremove)
if diff.type is 'MultiLineString':
# Remove the lines that have no head
diff = list(diff)
for il in diff:
hashead = False
for h in heads:
if il.intersects(h):
hashead = True
diff = il
break
if hashead:
break
else:
raise RuntimeError('Head not found')
# keep this head line only if it's long enough
if diff.length > r:
# Fun fact. The heads can be cut by the buffer too
diff = shpg.LineString(l.coords[0:2] + diff.coords[2:])
tokeep.append(diff)
ilines = tokeep
# it could happen that we're done at this point
if len(ilines) == 0:
break
# Otherwise keep the longest one and continue
lengths = np.array([])
for l in ilines:
lengths = np.append(lengths, l.length)
l = ilines[np.argmax(lengths)]
ilines.remove(l)
if len(olines) > 0:
# the cutted line's last point is not guaranteed
# to on straight coordinates. Remove it
olines.append(shpg.LineString(np.asarray(l.xy)[:, 0:-1].T))
else:
olines.append(l)
lastline = l
# add the corresponding head to each line
for l in olines:
for h in heads:
if l.intersects(h):
oheads.append(h)
break
return olines, oheads
def _filter_lines_slope(lines, topo, gdir):
"""Filter the centerline candidates by slope: if they go up, remove
Kienholz et al. (2014), Ch. 4.3.1
Parameters
----------
lines : list of shapely.geometry.LineString instances
The lines to filter out (in raster coordinates).
topo : the glacier topogaphy
gdir : the glacier directory for simplicity
Returns
-------
(lines, heads) a list of the new lines and corresponding heads
"""
dx_cls = cfg.PARAMS['flowline_dx']
lid = int(cfg.PARAMS['flowline_junction_pix'])
sw = cfg.PARAMS['flowline_height_smooth']
# Here we use a conservative value
min_slope = np.deg2rad(cfg.PARAMS['min_slope'])
# Bilinear interpolation
# Geometries coordinates are in "pixel centered" convention, i.e
# (0, 0) is also located in the center of the pixel
xy = (np.arange(0, gdir.grid.ny-0.1, 1),
np.arange(0, gdir.grid.nx-0.1, 1))
interpolator = RegularGridInterpolator(xy, topo)
olines = [lines[0]]
for line in lines[1:]:
# The code below mimicks what initialize_flowlines will do
# this is a bit smelly but necessary
points = line_interpol(line, dx_cls)
# For tributaries, remove the tail
points = points[0:-lid]
new_line = shpg.LineString(points)
# Interpolate heights
x, y = new_line.xy
hgts = interpolator((y, x))
# If smoothing, this is the moment
hgts = gaussian_filter1d(hgts, sw)
# Finally slope
slope = np.arctan(-np.gradient(hgts, dx_cls*gdir.grid.dx))
# arbitrary threshold with which we filter the lines, otherwise bye bye
if np.sum(slope >= min_slope) >= 5:
olines.append(line)
return olines
def _projection_point(centerline, point):
"""Projects a point on a line and returns the closest integer point
guaranteed to be on the line, and guaranteed to be far enough from the
head and tail.
Parameters
----------
centerline : Centerline instance
point : Shapely Point geometry
Returns
-------
(flow_point, ind_closest): Shapely Point and indice in the line
"""
prdis = centerline.line.project(point, normalized=False)
ind_closest = np.argmin(np.abs(centerline.dis_on_line - prdis))
ind_closest = np.asscalar(ind_closest)
flow_point = shpg.Point(centerline.line.coords[int(ind_closest)])
return flow_point
def _join_lines(lines):
"""Re-joins the lines that have been cut by _filter_lines
Compute the rooting scheme.
Parameters
----------
lines: list of shapely lines instances
Returns
-------
Centerline instances, updated with flow routing properties
"""
olines = [Centerline(l) for l in lines[::-1]]
nl = len(olines)
if nl == 1:
return olines
# per construction the line cannot flow in a line placed before in the list
for i, l in enumerate(olines):
last_point = shpg.Point(*l.line.coords[-1])
totest = olines[i+1:]
dis = [last_point.distance(t.line) for t in totest]
flow_to = totest[np.argmin(dis)]
flow_point = _projection_point(flow_to, last_point)
# Interpolate to finish the line, bute force:
# we interpolate 20 points, round them, remove consecutive duplicates
endline = shpg.LineString([last_point, flow_point])
endline = shpg.LineString([endline.interpolate(x, normalized=True)
for x in np.linspace(0., 1., num=20)])
# we keep all coords without the first AND the last
grouped = groupby(map(tuple, np.rint(endline.coords)))
endline = [x[0] for x in grouped][1:-1]
# We're done
l.set_line(shpg.LineString(l.line.coords[:] + endline))
l.set_flows_to(flow_to, check_tail=False)
# The last one has nowhere to flow
if i+2 == nl:
break
return olines[::-1]
def _line_order(line):
"""Recursive search for the line's hydrological level.
Parameters
----------
line: a Centerline instance
Returns
-------
The line;s order
"""
if len(line.inflows) == 0:
return 0
else:
levels = [_line_order(s) for s in line.inflows]
return np.max(levels) + 1
def _make_costgrid(mask, ext, z):
"""Computes a costgrid following Kienholz et al. (2014) Eq. (2)
Parameters
----------
mask : numpy.array
The glacier mask.
ext : numpy.array
The glacier boundaries' mask.
z : numpy.array
The terrain height.
Returns
-------
numpy.array of the costgrid
"""
dis = np.where(mask, distance_transform_edt(mask), np.NaN)
z = np.where(mask, z, np.NaN)
dmax = np.nanmax(dis)
zmax = np.nanmax(z)
zmin = np.nanmin(z)
cost = ((dmax - dis) / dmax * cfg.PARAMS['f1']) ** cfg.PARAMS['a'] + \
((z - zmin) / (zmax - zmin) * cfg.PARAMS['f2']) ** cfg.PARAMS['b']
# This is new: we make the cost to go over boundaries
# arbitrary high to avoid the lines to jump over adjacent boundaries
cost[np.where(ext)] = np.nanmax(cost[np.where(ext)]) * 50
return np.where(mask, cost, np.Inf)
def _get_terminus_coord(gdir, ext_yx, zoutline):
"""This finds the terminus coordinate of the glacier.
There is a special case for marine terminating glaciers/
"""
perc = cfg.PARAMS['terminus_search_percentile']
deltah = cfg.PARAMS['terminus_search_altitude_range']
if gdir.is_tidewater and (perc > 0):
# There is calving
# find the lowest percentile
plow = np.percentile(zoutline, perc).astype(np.int64)
# the minimum altitude in the glacier
mini = np.min(zoutline)
# indices of where in the outline the altitude is lower than the qth
# percentile and lower than 100m higher, than the minimum altitude
ind = np.where((zoutline < plow) & (zoutline < (mini + deltah)))[0]
# We take the middle of this area
ind_term = ind[np.round(len(ind) / 2.).astype(np.int)]
else:
# easy: just the minimum
ind_term = np.argmin(zoutline)
return np.asarray(ext_yx)[:, ind_term].astype(np.int64)
def _normalize(n):
"""Computes the normals of a vector n.
Returns
-------
the two normals (n1, n2)
"""
nn = n / np.sqrt(np.sum(n*n))
n1 = np.array([-nn[1], nn[0]])
n2 = np.array([nn[1], -nn[0]])
return n1, n2
@entity_task(log, writes=['centerlines', 'gridded_data'])
@divide_task(log, add_0=False)
def compute_centerlines(gdir, div_id=None):
"""Compute the centerlines following Kienholz et al., (2014).
They are then sorted according to the modified Strahler number:
http://en.wikipedia.org/wiki/Strahler_number
Parameters
----------
gdir : oggm.GlacierDirectory
"""
# Params
single_fl = not cfg.PARAMS['use_multiple_flowlines']
do_filter_slope = cfg.PARAMS['filter_min_slope']
if 'force_one_flowline' in cfg.PARAMS:
if gdir.rgi_id in cfg.PARAMS['force_one_flowline']:
single_fl = True
# open
geom = gdir.read_pickle('geometries', div_id=div_id)
grids_file = gdir.get_filepath('gridded_data', div_id=div_id)
with netCDF4.Dataset(grids_file) as nc:
# Variables
glacier_mask = nc.variables['glacier_mask'][:]
glacier_ext = nc.variables['glacier_ext'][:]
topo = nc.variables['topo_smoothed'][:]
poly_pix = geom['polygon_pix']
# Find for local maximas on the outline
x, y = tuple2int(poly_pix.exterior.xy)
ext_yx = tuple(reversed(poly_pix.exterior.xy))
zoutline = topo[y[:-1], x[:-1]] # last point is first point
# Size of the half window to use to look for local maximas
maxorder = np.rint(cfg.PARAMS['localmax_window'] / gdir.grid.dx)
maxorder = np.clip(maxorder, 5., np.rint((len(zoutline) / 5.)))
heads_idx = scipy.signal.argrelmax(zoutline, mode='wrap',
order=maxorder.astype(np.int64))
if single_fl or len(heads_idx[0]) <= 1:
# small glaciers with one or less heads: take the absolute max
heads_idx = (np.atleast_1d(np.argmax(zoutline)),)
# Remove the heads that are too low
zglacier = topo[np.where(glacier_mask)]
head_threshold = np.percentile(zglacier, (1./3.)*100)
heads_idx = heads_idx[0][np.where(zoutline[heads_idx] > head_threshold)]
heads = np.asarray(ext_yx)[:, heads_idx]
heads_z = zoutline[heads_idx]
# careful, the coords are in y, x order!
heads = [shpg.Point(x, y) for y, x in zip(heads[0, :],
heads[1, :])]
# get radius of the buffer according to Kienholz eq. (1)
radius = cfg.PARAMS['q1'] * geom['polygon_area'] + cfg.PARAMS['q2']
radius = np.clip(radius, 0, cfg.PARAMS['rmax'])
radius /= gdir.grid.dx # in raster coordinates
# Plus our criteria, quite usefull to remove short lines:
radius += cfg.PARAMS['flowline_junction_pix'] * cfg.PARAMS['flowline_dx']
log.debug('%s: radius in raster coordinates: %.2f',
gdir.rgi_id, radius)
# OK. Filter and see.
log.debug('%s: number of heads before radius filter: %d',
gdir.rgi_id, len(heads))
heads, heads_z = _filter_heads(heads, heads_z, radius, poly_pix)
log.debug('%s: number of heads after radius filter: %d',
gdir.rgi_id, len(heads))
# Cost array
costgrid = _make_costgrid(glacier_mask, glacier_ext, topo)
# Terminus
t_coord = _get_terminus_coord(gdir, ext_yx, zoutline)
# Compute the routes
lines = []
for h in heads:
h_coord = np.asarray(h.xy)[::-1].astype(np.int64)
indices, _ = route_through_array(costgrid, h_coord, t_coord)
lines.append(shpg.LineString(np.array(indices)[:, [1, 0]]))
log.debug('%s: computed the routes', gdir.rgi_id)
# Filter the shortest lines out
dx_cls = cfg.PARAMS['flowline_dx']
radius = cfg.PARAMS['flowline_junction_pix'] * dx_cls
radius += 6 * dx_cls
olines, _ = _filter_lines(lines, heads, cfg.PARAMS['kbuffer'], radius)
log.debug('%s: number of heads after lines filter: %d',
gdir.rgi_id, len(olines))
# Filter the lines which are going up instead of down
if do_filter_slope:
olines = _filter_lines_slope(olines, topo, gdir)
log.debug('%s: number of heads after slope filter: %d',
gdir.rgi_id, len(olines))
# And rejoin the cutted tails
olines = _join_lines(olines)
# Adds the line level
for cl in olines:
cl.order = _line_order(cl)
# And sort them per order !!! several downstream tasks rely on this
cls = []
for i in np.argsort([cl.order for cl in olines]):
cls.append(olines[i])
# Final check
if len(cls) == 0:
raise RuntimeError('{} : no centerline found!'.format(gdir.rgi_id))
# Write the data
gdir.write_pickle(cls, 'centerlines', div_id=div_id)
# Netcdf
with netCDF4.Dataset(grids_file, 'a') as nc:
if 'cost_grid' in nc.variables:
# Overwrite
nc.variables['cost_grid'][:] = costgrid
else:
# Create
v = nc.createVariable('cost_grid', 'f4', ('y', 'x', ), zlib=True)
v.units = '-'
v.long_name = 'Centerlines cost grid'
v[:] = costgrid
@entity_task(log, writes=['downstream_lines', 'major_divide'])
def compute_downstream_lines(gdir):
"""Compute the lines continuing the glacier (one per divide).
The idea is simple: starting from the glacier tail, compute all the routes
to all local minimas found at the domain edge. The cheapest is "The One".
The task also determines a so-called "major flowline" which is the
simply the flowline starting at the lowest point on the glacier. Other
downstream lines might either flow in the major flowline, another
downstream or out of the domain.
The rest of the job (merging all centerlines + downstreams into
one single glacier is realized by
:py:func:`~oggm.tasks.init_present_time_glacier`).
Parameters
----------
gdir : oggm.GlacierDirectory
"""
with netCDF4.Dataset(gdir.get_filepath('gridded_data', div_id=0)) as nc:
topo = nc.variables['topo_smoothed'][:]
glacier_ext = nc.variables['glacier_ext'][:]
# Look for the starting points
heads = []
head_alts = []
div_ids = list(gdir.divide_ids)
for div_id in div_ids:
p = gdir.read_pickle('centerlines', div_id=div_id)[-1].tail
head_alts.append(topo[int(p.y), int(p.x)])
heads.append((int(p.y), int(p.x)))
# Find the lowest first
major_id = np.argmin(head_alts)
# For tidewater glaciers no need for all this
# I actually think tidewater glaciers can't be divided anyway
if gdir.is_tidewater:
gdir.write_pickle(div_ids[major_id], 'major_divide', div_id=0)
return
# Make going up very costy
topo = topo**4
# We add an artificial cost as distance from the glacier
# This should have to much influence on mountain glaciers but helps for
# tidewater-candidates
topo = topo + distance_transform_edt(1 - glacier_ext)
# Make going up very costy
topo = topo**2
# Variables we gonna need: the outer side of the domain
xmesh, ymesh = np.meshgrid(np.arange(0, gdir.grid.nx, 1, dtype=np.int64),
np.arange(0, gdir.grid.ny, 1, dtype=np.int64))
_h = [topo[:, 0], topo[0, :], topo[:, -1], topo[-1, :]]
_x = [xmesh[:, 0], xmesh[0, :], xmesh[:, -1], xmesh[-1, :]]
_y = [ymesh[:, 0], ymesh[0, :], ymesh[:, -1], ymesh[-1, :]]
# For all heads, find their way out of the domain
lines = []
for head in heads:
min_cost = np.Inf
min_len = np.Inf
line = None
for h, x, y in zip(_h, _x, _y):
ids = scipy.signal.argrelmin(h, order=10, mode='wrap')
if np.all(h == 0):
# Test every fifth (we don't really care)
ids = [np.arange(0, len(h), 5)]
for i in ids[0]:
lids, cost = route_through_array(topo, head, (y[i], x[i]))
if ((cost < min_cost) or
((cost == min_cost) and (len(lids) < min_len))):
min_cost = cost
min_len = len(lids)
line = shpg.LineString(np.array(lids)[:, [1, 0]])
if line is None:
raise RuntimeError('Downstream line not found')
lines.append(line)
# If we have divides some lines can merge. We use geopandas to group them
gdf = gpd.GeoDataFrame(geometry=lines)
gdf['div_id'] = div_ids
union = gdf.buffer(cfg.PARAMS['kbuffer']).unary_union
if type(union) is not shpg.MultiPolygon:
assert type(union) is shpg.Polygon
union = [union]
# See which lines belong to each group
odf = gdf.copy()
odf['is_major'] = False
odf['group'] = -1
for i, poly in enumerate(union):
inter = gdf.intersects(poly)
odf.loc[inter, 'group'] = i
group = gdf.loc[inter].copy()
# sort them by length, shorter is major
group['length'] = group.length
group = group.sort_values('length')
odf.loc[group.iloc[[0]].index, 'is_major'] = True
odf.loc[group.iloc[1:].index, 'is_major'] = False
# If needed we interrupt the route at the glacier boundary
geom = gdir.read_pickle('geometries', div_id=0)['polygon_pix']
odf_div = odf.loc[~odf.is_major]
for i, ent in odf_div.iterrows():
line = ent.geometry.difference(geom)
if type(line) is shpg.MultiLineString:
lens = [l.length for l in line]
line = line[np.argmax(lens)]
assert type(line) is shpg.LineString
odf.loc[i, 'geometry'] = line
# Write the intermediate data
major_divide = div_ids[major_id]
gdir.write_pickle(major_divide, 'major_divide', div_id=0)
gdir.write_pickle(odf, 'downstream_lines', div_id=0)
# Ok now merge all this together in a big, nice glacier
odf = odf.set_index('div_id')
major_group = odf.loc[major_divide].group
# We loop over the groups of downstream
radius = cfg.PARAMS['flowline_junction_pix'] * cfg.PARAMS['flowline_dx']
radius += 6 * cfg.PARAMS['flowline_dx']
final_lines = []
for group in np.unique(np.sort(odf.group)):
_odf = odf.loc[odf.group == group]
# Read all divides and add the downstream to the major line
lines = []
heads = []
for div_id in np.unique(np.sort(_odf.index)):
cls = gdir.read_pickle('centerlines', div_id=div_id)
dl = _odf.loc[div_id].geometry
for fl in cls:
line = fl.line
if fl is cls[-1]:
line = shpg.LineString(list(line.coords) + dl.coords[1:])
lines.append(line)
heads.append(fl.head)
# Filter the shortest lines out
olines, _ = _filter_lines(lines, heads, cfg.PARAMS['kbuffer'], radius)
# And rejoin the cutted tails
olines = _join_lines(olines)
final_lines.append(olines)
# The lines are sorted by length now
maj_lines = final_lines.pop(major_group)
flow_to = maj_lines[0]
for int_lines in final_lines:
l = int_lines[0]
l.set_flows_to(flow_to, check_tail=False, last_point=True)
# Ok, merge
olines = maj_lines
for fl in final_lines:
olines += fl
# Adds the line level
for cl in olines:
cl.order = _line_order(cl)
# And sort them per order !!! several downstream tasks rely on this
cls = []
for i in np.argsort([cl.order for cl in olines]):
cls.append(olines[i])
# Final check
if len(cls) == 0:
raise RuntimeError('{} : problem by downstream!'.format(gdir.rgi_id))
# Write the data
gdir.write_pickle(cls, 'centerlines', div_id=0)
def _approx_parabola(x, y, y0=0):
"""Fit a parabola to the equation y = a x**2 + y0
Parameters
----------
x : array
the x axis variabls
y : array
the dependant variable
y0 : float, optional
the intercept
Returns
-------
[a, 0, y0]
"""
# y=ax**2+y0
x, y = np.array(x), np.array(y)
a = np.sum(x ** 2 * (y - y0)) / np.sum(x ** 4)
return np.array([a, 0, y0])
def _parabola_error(x, y, f):
# f is an array represents polynom
x, y = np.array(x), np.array(y)
with np.errstate(divide='ignore', invalid='ignore'):
out = sum(abs((np.polyval(f, x) - y) / y)) / len(x)
return out
class HashablePoint(shpg.Point):
def __hash__(self):
return hash(tuple((self.x, self.y)))
def _parabolic_bed_from_topo(gdir, idl, interpolator):
"""this returns the parabolic bedhape for all points on idl"""
# Volume area scaling formula for the probable ice thickness
h_mean = 0.034 * gdir.rgi_area_km2**0.375 * 1000
gnx, gny = gdir.grid.nx, gdir.grid.ny
# Far Factor
r = 40
# number of points
cs_n = 20
# normals
ns = [i[0] for i in idl.normals]
cs = []
donot_compute = []
for pcoords, n, isgl in zip(idl.line.coords, ns, idl.is_glacier):
xi, yi = pcoords
vx, vy = n
modul = np.sqrt(vx ** 2 + vy ** 2)
ci = []
_isborder = False
for ro in np.linspace(0, r / 2.0, cs_n):
t = ro / modul
cp1 = HashablePoint(xi + t * vx, yi + t * vy)
cp2 = HashablePoint(xi - t * vx, yi - t * vy)
# check if out of the frame
if not (0 < cp2.y < gny - 1) or \
not (0 < cp2.x < gnx - 1) or \
not (0 < cp1.y < gny - 1) or \
not (0 < cp1.x < gnx - 1):
_isborder = True
ci.append((cp1, ro))
ci.append((cp2, -ro))
ci = list(set(ci))
cs.append(ci)
donot_compute.append(_isborder or isgl)
bed = []
for ic, (cc, dontcomp) in enumerate(zip(cs, donot_compute)):
if dontcomp:
bed.append(np.NaN)
continue
z = []
ro = []
for i in cc:
z.append(interpolator((i[0].y, i[0].x)))
ro.append(i[1])
aso = np.argsort(ro)
ro, z = np.array(ro)[aso], np.array(z)[aso]
# find top of parabola
roHead = ro[np.argmin(z)]
zero = np.argmin(z) # it is index of roHead/zHead
zHead = np.amin(z)
dsts = abs(h_mean + zHead - z)
# find local minima in set of distances
extr = scipy.signal.argrelextrema(dsts, np.less, mode='wrap')
if len(extr[0]) == 0:
bed.append(np.NaN)
continue
# from local minima find that with the minimum |x|
idx = extr[0][np.argmin(abs(ro[extr]))]
# x<0 => x=0
# (|x|+x)/2
roN = ro[int((abs(zero - abs(zero - idx)) + zero - abs(
zero - idx)) / 2):zero + abs(zero - idx) + 1]
zN = z[int((abs(zero - abs(zero - idx)) + zero - abs(
zero - idx)) / 2):zero + abs(zero - idx) + 1]
roNx = roN - roHead
# zN=zN-zHead#
p = _approx_parabola(roNx, zN, y0=zHead)
# shift parabola to the ds-line
p2 = np.copy(p)
p2[2] = z[ro == 0]
err = _parabola_error(roN, zN, p2) * 100
# The original implementation of @anton-ub stored all three parabola
# params. We just keep the one important here for now
if err < 1.5:
bed.append(p2[0])
else:
bed.append(np.NaN)
bed = np.asarray(bed)
assert len(bed) == idl.nx
pvalid = np.sum(np.isfinite(bed)) / len(bed) * 100
log.debug('%s: percentage of valid parabolas total: %d',
gdir.rgi_id, int(pvalid))
bedg = bed[~ idl.is_glacier]
if len(bedg) > 0:
pvalid = np.sum(np.isfinite(bedg)) / len(bedg) * 100
log.debug('%s: percentage of valid parabolas out glacier: %d',
gdir.rgi_id, int(pvalid))
if pvalid < 10:
log.warning('{}: {}% of valid bedshapes.'.format(gdir.rgi_id,
int(pvalid)))
# interpolation, filling the gaps
default = cfg.PARAMS['default_parabolic_bedshape']
bed_int = interp_nans(bed, default=default)
# Scale for dx (we worked in grid coords but need meters)
bed_int = bed_int / gdir.grid.dx**2
# Smoothing
bed_ma = pdSeries(bed_int)
bed_ma = bed_ma.rolling(window=5, center=True, min_periods=1).mean()
return bed_ma.values
@entity_task(log, writes=['downstream_bed'])
def compute_downstream_bedshape(gdir):
"""The bedshape obtained by fitting a parabola to the line's normals.
Parameters
----------
gdir : oggm.GlacierDirectory
"""
# get the entire glacier only
if gdir.is_tidewater:
cls = gdir.read_pickle('inversion_flowlines', div_id=1)
else:
cls = gdir.read_pickle('inversion_flowlines', div_id=0)
# Topography
with netCDF4.Dataset(gdir.get_filepath('gridded_data', div_id=0)) as nc:
topo = nc.variables['topo_smoothed'][:]
x = nc.variables['x'][:]
y = nc.variables['y'][:]
xy = (np.arange(0, len(y)-0.1, 1), np.arange(0, len(x)-0.1, 1))
interpolator = RegularGridInterpolator(xy, topo)
bedshapes = []
for cl in cls:
bs = _parabolic_bed_from_topo(gdir, cl, interpolator)
assert len(bs) == cl.nx
assert np.all(np.isfinite(bs))
bedshapes.append(bs)
# write output
gdir.write_pickle(bedshapes, 'downstream_bed')
| jlandmann/oggm | oggm/core/preprocessing/centerlines.py | Python | gpl-3.0 | 37,648 | [
"NetCDF"
] | 6cb13cf0a77baf42456b1cc278c61c49cd7493e81bbba2cb5941fd88d95b37f0 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
import pandas as pd
from scipy.linalg import svd, lstsq
from ._ordination_results import OrdinationResults
from ._utils import corr, svd_rank, scale
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def cca(y, x, scaling=1):
r"""Compute canonical (also known as constrained) correspondence
analysis.
Canonical (or constrained) correspondence analysis is a
multivariate ordination technique. It appeared in community
ecology [1]_ and relates community composition to the variation in
the environment (or in other factors). It works from data on
abundances or counts of samples and constraints variables,
and outputs ordination axes that maximize sample separation among species.
It is better suited to extract the niches of taxa than linear
multivariate methods because it assumes unimodal response curves
(habitat preferences are often unimodal functions of habitat
variables [2]_).
As more environmental variables are added, the result gets more
similar to unconstrained ordination, so only the variables that
are deemed explanatory should be included in the analysis.
Parameters
----------
y : DataFrame
Samples by features table (n, m)
x : DataFrame
Samples by constraints table (n, q)
scaling : int, {1, 2}, optional
Scaling type 1 maintains :math:`\chi^2` distances between rows.
Scaling type 2 preserver :math:`\chi^2` distances between columns.
For a more detailed explanation of the interpretation, check Legendre &
Legendre 1998, section 9.4.3.
Returns
-------
OrdinationResults
Object that stores the cca results.
Raises
------
ValueError
If `x` and `y` have different number of rows
If `y` contains negative values
If `y` contains a row of only 0's.
NotImplementedError
If scaling is not 1 or 2.
See Also
--------
ca
rda
OrdinationResults
Notes
-----
The algorithm is based on [3]_, \S 11.2, and is expected to give
the same results as ``cca(y, x)`` in R's package vegan, except
that this implementation won't drop constraining variables due to
perfect collinearity: the user needs to choose which ones to
input.
Canonical *correspondence* analysis shouldn't be confused with
canonical *correlation* analysis (CCorA, but sometimes called
CCA), a different technique to search for multivariate
relationships between two datasets. Canonical correlation analysis
is a statistical tool that, given two vectors of random variables,
finds linear combinations that have maximum correlation with each
other. In some sense, it assumes linear responses of "species" to
"environmental variables" and is not well suited to analyze
ecological data.
References
----------
.. [1] Cajo J. F. Ter Braak, "Canonical Correspondence Analysis: A
New Eigenvector Technique for Multivariate Direct Gradient
Analysis", Ecology 67.5 (1986), pp. 1167-1179.
.. [2] Cajo J.F. Braak and Piet F.M. Verdonschot, "Canonical
correspondence analysis and related multivariate methods in
aquatic ecology", Aquatic Sciences 57.3 (1995), pp. 255-289.
.. [3] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
Y = y.as_matrix()
X = x.as_matrix()
# Perform parameter sanity checks
if X.shape[0] != Y.shape[0]:
raise ValueError("The samples by features table 'y' and the samples by"
" constraints table 'x' must have the same number of "
" rows. 'y': {0} 'x': {1}".format(X.shape[0],
Y.shape[0]))
if Y.min() < 0:
raise ValueError(
"The samples by features table 'y' must be nonnegative")
row_max = Y.max(axis=1)
if np.any(row_max <= 0):
# Or else the lstsq call to compute Y_hat breaks
raise ValueError("The samples by features table 'y' cannot contain a "
"row with only 0's")
if scaling not in {1, 2}:
raise NotImplementedError(
"Scaling {0} not implemented.".format(scaling))
# Step 1 (similar to Pearson chi-square statistic)
grand_total = Y.sum()
Q = Y / grand_total # Relative frequencies of Y (contingency table)
# Features and sample weights (marginal totals)
column_marginals = Q.sum(axis=0)
row_marginals = Q.sum(axis=1)
# Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an
# scaled version of the contribution of each cell towards Pearson
# chi-square statistic.
expected = np.outer(row_marginals, column_marginals)
Q_bar = (Q - expected) / np.sqrt(expected)
# Step 2. Standardize columns of X with respect to sample weights,
# using the maximum likelihood variance estimator (Legendre &
# Legendre 1998, p. 595)
X = scale(X, weights=row_marginals, ddof=0)
# Step 3. Weighted multiple regression.
X_weighted = row_marginals[:, None]**0.5 * X
B, _, rank_lstsq, _ = lstsq(X_weighted, Q_bar)
Y_hat = X_weighted.dot(B)
Y_res = Q_bar - Y_hat
# Step 4. Eigenvalue decomposition
u, s, vt = svd(Y_hat, full_matrices=False)
rank = svd_rank(Y_hat.shape, s)
s = s[:rank]
u = u[:, :rank]
vt = vt[:rank]
U = vt.T
# Step 5. Eq. 9.38
U_hat = Q_bar.dot(U) * s**-1
# Residuals analysis
u_res, s_res, vt_res = svd(Y_res, full_matrices=False)
rank = svd_rank(Y_res.shape, s_res)
s_res = s_res[:rank]
u_res = u_res[:, :rank]
vt_res = vt_res[:rank]
U_res = vt_res.T
U_hat_res = Y_res.dot(U_res) * s_res**-1
eigenvalues = np.r_[s, s_res]**2
# Scalings (p. 596 L&L 1998):
# feature scores, scaling 1
V = (column_marginals**-0.5)[:, None] * U
# sample scores, scaling 2
V_hat = (row_marginals**-0.5)[:, None] * U_hat
# sample scores, scaling 1
F = V_hat * s
# feature scores, scaling 2
F_hat = V * s
# Sample scores which are linear combinations of constraint
# variables
Z_scaling1 = ((row_marginals**-0.5)[:, None] *
Y_hat.dot(U))
Z_scaling2 = Z_scaling1 * s**-1
# Feature residual scores, scaling 1
V_res = (column_marginals**-0.5)[:, None] * U_res
# Sample residual scores, scaling 2
V_hat_res = (row_marginals**-0.5)[:, None] * U_hat_res
# Sample residual scores, scaling 1
F_res = V_hat_res * s_res
# Feature residual scores, scaling 2
F_hat_res = V_res * s_res
eigvals = eigenvalues
if scaling == 1:
features_scores = np.hstack((V, V_res))
sample_scores = np.hstack((F, F_res))
sample_constraints = np.hstack((Z_scaling1, F_res))
elif scaling == 2:
features_scores = np.hstack((F_hat, F_hat_res))
sample_scores = np.hstack((V_hat, V_hat_res))
sample_constraints = np.hstack((Z_scaling2, V_hat_res))
biplot_scores = corr(X_weighted, u)
pc_ids = ['CCA%d' % (i+1) for i in range(len(eigenvalues))]
sample_ids = y.index
feature_ids = y.columns
eigvals = pd.Series(eigenvalues, index=pc_ids)
samples = pd.DataFrame(sample_scores,
columns=pc_ids, index=sample_ids)
features = pd.DataFrame(features_scores,
columns=pc_ids, index=feature_ids)
biplot_scores = pd.DataFrame(biplot_scores,
index=x.columns,
columns=pc_ids[:biplot_scores.shape[1]])
sample_constraints = pd.DataFrame(sample_constraints,
index=sample_ids, columns=pc_ids)
return OrdinationResults(
"CCA", "Canonical Correspondence Analysis", eigvals, samples,
features=features, biplot_scores=biplot_scores,
sample_constraints=sample_constraints,
proportion_explained=eigvals / eigvals.sum())
| kdmurray91/scikit-bio | skbio/stats/ordination/_canonical_correspondence_analysis.py | Python | bsd-3-clause | 8,409 | [
"scikit-bio"
] | 4227d3dba4058aeb94c175032d71ee41c53c9b8a39ab19c74c19b9a0a06658a0 |
""" POOL XML Catalog Class
This class handles simple XML-based File Catalog following the
POOL project schema. It presents a DIRAC generic File Catalog interface
although not complete and with several extensions
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import os
import xml.dom.minidom
from DIRAC import S_OK, S_ERROR
class PoolFile(object):
"""
A Pool XML File Catalog entry
@author A.Tsaregorodtsev
"""
def __init__(self, dom=None):
self.guid = ""
self.pfns = []
self.lfns = []
if dom:
self.guid = dom.getAttribute("ID")
physs = dom.getElementsByTagName("physical")
for p in physs:
pfns = p.getElementsByTagName("pfn")
meta = p.getElementsByTagName("metadata")
for pfn in pfns:
ftype = pfn.getAttribute("filetype")
name = pfn.getAttribute("name")
# Get the SE name if any
se = pfn.getAttribute("se")
se = se if se else "Unknown"
self.pfns.append((name, ftype, se))
logics = dom.getElementsByTagName("logical")
for l in logics:
# do not know yet the Pool lfn xml schema
lfns = l.getElementsByTagName("lfn")
for lfn in lfns:
name = lfn.getAttribute("name")
self.lfns.append(name)
def dump(self):
"""Dumps the contents to the standard output"""
print("\nPool Catalog file entry:")
print(" guid:", self.guid)
if len(self.lfns) > 0:
print(" lfns:")
for l in self.lfns:
print(" ", l)
if len(self.pfns) > 0:
print(" pfns:")
for p in self.pfns:
print(" ", p[0], "type:", p[1], "SE:", p[2])
def getPfns(self):
"""Retrieves all the PFNs"""
result = []
for p in self.pfns:
result.append((p[0], p[2]))
return result
def getLfns(self):
"""Retrieves all the LFNs"""
result = []
for l in self.lfns:
result.append(l)
return result
def addLfn(self, lfn):
"""Adds one LFN"""
self.lfns.append(lfn)
def addPfn(self, pfn, pfntype=None, se=None):
"""Adds one PFN"""
sename = "Unknown"
if se:
sename = se
if pfntype:
self.pfns.append((pfn, pfntype, sename))
else:
self.pfns.append((pfn, "ROOT_All", sename))
def toXML(self, metadata):
"""Output the contents as an XML string"""
doc = xml.dom.minidom.Document()
fileElt = doc.createElement("File")
fileElt.setAttribute("ID", self.guid)
if self.pfns:
physicalElt = doc.createElement("physical")
fileElt.appendChild(physicalElt)
for p in self.pfns:
pfnElt = doc.createElement("pfn")
physicalElt.appendChild(pfnElt)
# To properly escape <>& in POOL XML slice.
fixedp = p[0].replace("&", "&")
fixedp = fixedp.replace("&&amp;", "&")
fixedp = fixedp.replace("<", "<")
fixedp = fixedp.replace(">", ">")
pfnElt.setAttribute("filetype", p[1])
pfnElt.setAttribute("name", fixedp)
pfnElt.setAttribute("se", p[2])
if metadata:
for p in self.pfns:
metadataElt = doc.createElement("metadata")
physicalElt.appendChild(metadataElt)
metadataElt.setAttribute("att_name", p[0])
metadataElt.setAttribute("att_value", p[2])
if self.lfns:
logicalElt = doc.createElement("logical")
fileElt.appendChild(logicalElt)
for l in self.lfns:
lfnElt = doc.createElement("lfn")
logicalElt.appendChild(lfnElt)
lfnElt.setAttribute("name", l)
return fileElt.toprettyxml(indent=" ")
class PoolXMLCatalog(object):
"""A Pool XML File Catalog"""
def __init__(self, xmlfile=""):
"""PoolXMLCatalog constructor.
Constructor takes one of the following argument types:
xml string; list of xml strings; file name; list of file names
"""
self.files = {}
self.backend_file = None
self.name = "Pool"
# Get the dom representation of the catalog
if xmlfile:
if not isinstance(xmlfile, list):
if os.path.isfile(xmlfile):
self.backend_file = xmlfile
xmlfile = [xmlfile]
for xmlf in xmlfile:
if os.path.isfile(xmlf):
self.dom = xml.dom.minidom.parse(xmlf)
else:
self.dom = xml.dom.minidom.parseString(xmlf)
self.analyseCatalog(self.dom)
def setBackend(self, fname):
"""Set the backend file name
Sets the name of the file which will receive the contents of the
catalog when the flush() method will be called
"""
self.backend_file = fname
def flush(self):
"""Flush the contents of the catalog to a file
Flushes the contents of the catalog to a file from which
the catalog was instanciated or which was set explicitely
with setBackend() method
"""
if os.path.exists(self.backend_file):
os.rename(self.backend_file, self.backend_file + ".bak")
with open(self.backend_file, "w") as fp:
fp.write(self.toXML())
def getName(self):
"""Get the catalog type name"""
return S_OK(self.name)
def analyseCatalog(self, dom):
"""Create the catalog from a DOM object
Creates the contents of the catalog from the DOM XML object
"""
catalog = dom.getElementsByTagName("POOLFILECATALOG")[0]
pfiles = catalog.getElementsByTagName("File")
for p in pfiles:
guid = p.getAttribute("ID")
pf = PoolFile(p)
self.files[guid] = pf
# print p.nodeName,guid
def dump(self):
"""Dump catalog
Dumps the contents of the catalog to the std output
"""
for _guid, pfile in self.files.items():
pfile.dump()
def getFileByGuid(self, guid):
"""Get PoolFile object by GUID"""
if guid in self.files:
return self.files[guid]
return None
def getGuidByPfn(self, pfn):
"""Get GUID for a given PFN"""
for guid, pfile in self.files.items():
for p in pfile.pfns:
if pfn == p[0]:
return guid
return ""
def getGuidByLfn(self, lfn):
"""Get GUID for a given LFN"""
for guid, pfile in self.files.items():
for l in pfile.lfns:
if lfn == l:
return guid
return ""
def getTypeByPfn(self, pfn):
"""Get Type for a given PFN"""
for _guid, pfile in self.files.items():
for p in pfile.pfns:
if pfn == p[0]:
return p[1]
return ""
def exists(self, lfn):
"""Check for the given LFN existence"""
if self.getGuidByLfn(lfn):
return 1
return 0
def getLfnsList(self):
"""Get list of LFNs in catalogue."""
lfnsList = []
for guid in self.files:
lfn = self.files[guid].getLfns()
lfnsList.append(lfn[0])
return lfnsList
def getLfnsByGuid(self, guid):
"""Get LFN for a given GUID"""
lfn = ""
if guid in self.files:
lfns = self.files[guid].getLfns()
lfn = lfns[0]
if lfn:
return S_OK(lfn)
else:
return S_ERROR("GUID " + guid + " not found in the catalog")
def getPfnsByGuid(self, guid):
"""Get replicas for a given GUID"""
result = S_OK()
repdict = {}
if guid in self.files:
pfns = self.files[guid].getPfns()
for pfn, se in pfns:
repdict[se] = pfn
else:
return S_ERROR("GUID " + guid + " not found in the catalog")
result["Replicas"] = repdict
return result
def getPfnsByLfn(self, lfn):
"""Get replicas for a given LFN"""
guid = self.getGuidByLfn(lfn)
return self.getPfnsByGuid(guid)
def removeFileByGuid(self, guid):
"""Remove file for a given GUID"""
for g, _pfile in self.files.items():
if guid == g:
del self.files[guid]
def removeFileByLfn(self, lfn):
"""Remove file for a given LFN"""
for guid, pfile in self.files.items():
for l in pfile.lfns:
if lfn == l:
if guid in self.files:
del self.files[guid]
def addFile(self, fileTuple):
"""Add one or more files to the catalog"""
if isinstance(fileTuple, tuple):
files = [fileTuple]
elif isinstance(fileTuple, list):
files = fileTuple
else:
return S_ERROR("PoolXMLCatalog.addFile: Must supply a file tuple of list of tuples")
failed = {}
successful = {}
for lfn, pfn, se, guid, pfnType in files:
# print '>'*10
# print pfnType
pf = PoolFile()
pf.guid = guid
if lfn:
pf.addLfn(lfn)
if pfn:
pf.addPfn(pfn, pfnType, se)
self.files[guid] = pf
successful[lfn] = True
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def addReplica(self, replicaTuple):
"""This adds a replica to the catalogue
The tuple to be supplied is of the following form:
(lfn,pfn,se,master)
where master = True or False
"""
if isinstance(replicaTuple, tuple):
replicas = [replicaTuple]
elif isinstance(replicaTuple, list):
replicas = replicaTuple
else:
return S_ERROR("PoolXMLCatalog.addReplica: Must supply a replica tuple of list of tuples")
failed = {}
successful = {}
for lfn, pfn, se, _master in replicas:
guid = self.getGuidByLfn(lfn)
if guid:
self.files[guid].addPfn(pfn, None, se)
successful[lfn] = True
else:
failed[lfn] = "LFN not found"
resDict = {"Failed": failed, "Successful": successful}
return S_OK(resDict)
def addPfnByGuid(self, lfn, guid, pfn, pfntype=None, se=None):
"""Add PFN for a given GUID - not standard"""
if guid in self.files:
self.files[guid].addPfn(pfn, pfntype, se)
else:
self.addFile([lfn, pfn, se, guid, pfntype])
def addLfnByGuid(self, guid, lfn, pfn, se, pfntype):
"""Add LFN for a given GUID - not standard"""
if guid in self.files:
self.files[guid].addLfn(lfn)
else:
self.addFile([lfn, pfn, se, guid, pfntype])
def toXML(self, metadata=False):
"""Convert the contents into an XML string"""
res = """<?xml version="1.0" encoding="UTF-8" standalone="no" ?>
<!-- Edited By PoolXMLCatalog.py -->
<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">
<POOLFILECATALOG>\n\n"""
for _guid, pfile in self.files.items():
res = res + pfile.toXML(metadata)
res = res + "\n</POOLFILECATALOG>\n"
return res
| ic-hep/DIRAC | src/DIRAC/Resources/Catalog/PoolXMLCatalog.py | Python | gpl-3.0 | 11,925 | [
"DIRAC"
] | 65b874fd56940c88dcec10bb5ceba7ea861b92c9763ed940657d319976b7eedd |
""" Tests minimise module.
Tests the :meth:`GridSearch.fin_minimum`, method - an alternative to
calling :meth:`numpy.argmin` and :meth:`numpy.nanmin`, which are used
by default.
The main part of this test is verifying the :class:`GridSearch` class,
particularly the `meth:`GridSearch.minimise` method.
"""
import numpy
import echidna.core.spectra as spectra
import echidna.fit.test_statistic as test_statistic
from echidna.core.config import (GlobalFitConfig, SpectraFitConfig,
SpectraConfig)
from echidna.fit.minimise import GridSearch
import unittest
import copy
class TestGridSearch(unittest.TestCase):
""" Tests for the class :class:`echidna.limit.minimise.GridSearch`.
"""
def setUp(self):
""" Set up attributes for tests.
Attributes:
_A (:class:`spectra.Spectra`): Test spectrum A - to use in test
_B (:class:`spectra.Spectra`): Test spectrum B - to use in test
_C (:class:`spectra.Spectra`): Test spectrum C - to use in test
_test_statistic (:class:`test_statistic.BakerCousinsChi`): Test
statistic to use in test
_default_grid_search (:class:`GridSearch`): GridSearch using
default method (numpy) for minimisation.
_grid_search (:class:`GridSearch`): GridSearch using
:meth:`GridSearch.find_minimum`, to find minimum.
"""
# Create spectra
num_decays = 1.e4
spectra_config = SpectraConfig.load(
{"parameters":
{"x":
{"low": 2.0,
"high": 3.0,
"bins": 10}}},
name="spectra_config")
spectrum = spectra.Spectra("spectra", num_decays, spectra_config)
# Fill spectrum with random Gaussian data
for i_decay in range(int(num_decays)):
x = numpy.random.normal(loc=2.5, scale=0.1)
if numpy.random.uniform() > 0.1:
spectrum.fill(x=x)
# Save three copies of spectrum
self._A = copy.copy(spectrum)
self._A._name = "A"
self._B = copy.copy(spectrum)
self._B._name = "B"
self._C = copy.copy(spectrum)
self._C._name = "C"
# Make Global fit config
fit_config = GlobalFitConfig.load({
"global_fit_parameters": {
"x": {}}})
fit_config_A = SpectraFitConfig.load({
"spectral_fit_parameters": {
"rate": {
"prior": 5.0,
"sigma": None,
"low": 4.0,
"high": 6.0,
"bins": 11}}},
spectra_name=self._A.get_name())
fit_config_B = SpectraFitConfig.load({
"spectral_fit_parameters": {
"rate": {
"prior": 12.0,
"sigma": None,
"low": 11.0,
"high": 13.0,
"bins": 11}}},
spectra_name=self._B.get_name())
fit_config_C = SpectraFitConfig.load({
"spectral_fit_parameters": {
"rate": {
"prior": 13.0,
"sigma": None,
"low": 12.0,
"high": 14.0,
"bins": 11}}},
spectra_name=self._C.get_name())
fit_config.add_config(fit_config_A)
fit_config.add_config(fit_config_B)
fit_config.add_config(fit_config_C)
# Initialise test statistic
self._test_statistic = test_statistic.BakerCousinsChi(per_bin=True)
# Initialise default GridSearch
self._default_grid_search = GridSearch(fit_config,
spectra_config,
"default_grid_search")
# Initialise GridSearch using find_minimum
self._grid_search = GridSearch(fit_config,
spectra_config,
"grid_search",
use_numpy=False)
def _funct(self, *args):
""" Callable to pass to minimiser.
Fits A**2 + B**2 to C**2.
Returns:
:class:`numpy.ndarray`: Test statistic values
float: Penalty term - in this case always zero.
"""
a = args[0]
b = args[1]
c = args[2]
# Scale spectra
self._A.scale(a ** 2)
self._B.scale(b ** 2)
self._C.scale(c ** 2)
observed = self._C.project("x")
expected = self._A.project("x") + self._B.project("x")
# Return test statistics and penalty term (always zero)
return self._test_statistic.compute_statistic(observed, expected), 0.
def test_find_minimum(self):
""" Test the :meth:`GridSearch.find_minimum` method.
Tests:
* That :meth:`GridSearch.find_minimum` correctly locates the
minimum.
* That :meth:`GridSearch.find_minimum` exhibits the correct
behaviour when there are two minima.
"""
# Create a 100 * 100 * 100 array of uniform random numbers, in the
# range (0.01 < x < 1.0)
shape = tuple(numpy.repeat(100, 3))
array = numpy.random.uniform(low=0.01, high=1.0, size=shape)
# Generate a random coordinate position
coords = tuple(numpy.random.randint(low=0, high=100, size=3))
# Generate random minimum < 0.01
minimum = numpy.random.uniform(low=0, high=0.01)
# Set minimum at generated coordinates
array[coords[0], coords[1], coords[2]] = minimum
# Find minimum of array
fit_min, fit_coords = self._default_grid_search.find_minimum(array)
self.assertIsInstance(fit_min, float)
self.assertIsInstance(fit_coords, tuple)
self.assertEqual(fit_min, minimum)
self.assertEqual(fit_coords, coords)
# Now try with two equal minima
coords2 = tuple(numpy.random.randint(low=0, high=100, size=3))
# Set second minimum at second generated coordinates
array[coords2[0], coords2[1], coords2[2]] = minimum
# Find minimum of array
fit_min, fit_coords = self._default_grid_search.find_minimum(array)
self.assertEqual(fit_min, minimum)
if coords[0] < coords2[0]: # coords should be returned as best fit
self.assertEqual(fit_coords, coords)
else: # coords2 should be returned as best fit
self.assertEqual(fit_coords, coords2)
def test_minimise(self):
""" Test the :meth:`GridSearch.minimise` method.
Floats A, B and C (rates) and fits A^2 + B^2 to C^2, using a
a :class:`test_statistic.BakerCousinsChi` test statistic.
The fitted rates should form the pythagorean triple:
5^2 + 12^2 = 13^2.
"""
fit_A = 5.0
fit_B = 12.0
fit_C = 13.0
# Test default grid search, with numpy
minimum = self._default_grid_search.minimise(self._funct,
self._test_statistic)
self.assertIsInstance(minimum, float)
results = self._default_grid_search.get_summary()
self.assertAlmostEqual(results.get("A_rate").get("best_fit"), fit_A)
self.assertAlmostEqual(results.get("B_rate").get("best_fit"), fit_B)
self.assertAlmostEqual(results.get("C_rate").get("best_fit"), fit_C)
# Try grid search using find_minimum
self._grid_search.minimise(self._funct, self._test_statistic)
results = self._grid_search.get_summary()
self.assertAlmostEqual(results.get("A_rate").get("best_fit"), fit_A)
self.assertAlmostEqual(results.get("B_rate").get("best_fit"), fit_B)
self.assertAlmostEqual(results.get("C_rate").get("best_fit"), fit_C)
| ashleyrback/echidna | echidna/test/test_minimise.py | Python | mit | 7,878 | [
"Gaussian"
] | 765b34d3f60ffc674b30409f972e2b38159217c041c603c61ad74be620b9c27d |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
********************************
**espresso.integrator.ExtForce**
********************************
"""
from espresso.esutil import cxxinit
from espresso import pmi
from espresso.integrator.Extension import *
from _espresso import integrator_ExtForce
class ExtForceLocal(ExtensionLocal, integrator_ExtForce):
'The (local) external force part.'
def __init__(self, system, extForce, particleGroup = None):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if (particleGroup == None) or (particleGroup.size() == 0):
cxxinit(self, integrator_ExtForce, system, extForce)
else:
cxxinit(self, integrator_ExtForce, system, extForce, particleGroup)
if pmi.isController :
class ExtForce(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espresso.integrator.ExtForceLocal',
pmicall = ['setExtForce', 'getExtForce'],
pmiproperty = [ 'particleGroup' ]
)
| BackupTheBerlios/espressopp | src/integrator/ExtForce.py | Python | gpl-3.0 | 1,927 | [
"ESPResSo"
] | 170e591f357e3b0ceaf934f644e07d8249aefbc27b8d6ce1df6e2ee91b69d44e |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements Compatibility corrections for mixing runs of different
functionals.
"""
# flake8: ignore=E712
import os
import warnings
from itertools import groupby
from typing import List, Optional, Union
import numpy as np
import pandas as pd
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.entries.compatibility import (
Compatibility,
CompatibilityError,
MaterialsProject2020Compatibility,
)
from pymatgen.entries.computed_entries import (
ComputedEntry,
ComputedStructureEntry,
ConstantEnergyAdjustment,
)
from pymatgen.entries.entry_tools import EntrySet
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
__author__ = "Ryan Kingsbury"
__copyright__ = "Copyright 2019-2021, The Materials Project"
__version__ = "0.1"
__email__ = "RKingsbury@lbl.gov"
__date__ = "October 2021"
class MaterialsProjectDFTMixingScheme(Compatibility):
"""
This class implements the Materials Project mixing scheme, which allows mixing of
energies from different DFT functionals. Note that this should only be used for
VASP calculations using the MaterialsProject parameters (e.g. MPRelaxSet or
MPScanRelaxSet). Using this compatibility scheme on runs with different parameters
may lead to unexpected results.
This is the scheme used by the Materials Project to generate Phase Diagrams containing
a mixture of GGA(+U) and R2SCAN calculations. However in principle it can be used to
mix energies from any two functionals.
"""
def __init__(
self,
structure_matcher: StructureMatcher = StructureMatcher(),
run_type_1: str = "GGA(+U)",
run_type_2: str = "R2SCAN",
compat_1: Optional[Compatibility] = MaterialsProject2020Compatibility(),
compat_2: Optional[Compatibility] = None,
fuzzy_matching: bool = True,
):
"""
Instantiate the mixing scheme. The init method creates a generator class that
contains relevant settings (e.g., StrutureMatcher instance, Compatibility settings
for each functional) for processing groups of entries.
Args:
structure_matcher (StructureMatcher): StructureMatcher object used to determine
whether calculations from different functionals describe the same material.
run_type_1: The first DFT run_type. Typically this is the majority or run type or
the "base case" onto which the other calculations are referenced. Valid choices
are any run_type recognized by Vasprun.run_type, such as "LDA", "GGA", "GGA+U",
"PBEsol", "SCAN", or "R2SCAN". The class will ignore any entries that have a
run_type different than run_type_1 or run_type_2.
The list of run_type_1 entries provided to process_entries MUST form a complete
Phase Diagram in order for the mixing scheme to work. If this condition is not
satisfied, processing the entries will fail.
Note that the special string "GGA(+U)" (default) will treat both GGA and GGA+U
calculations as a single type. This option exists because GGA/GGA+U mixing is
already handled by MaterialsProject2020Compatibility.
run_type_2: The second DFT run_type. Typically this is the run_type that is 'preferred'
but has fewer calculations. If run_type_1 and run_type_2 calculations exist for all
materials, run_type_2 energies will be used (hence the 'preferred' status). The class
will ignore any entries that have a run_type different than run_type_1 or run_type_2.
compat_1: Compatibility class used to pre-process entries of run_type_1.
Defaults to MaterialsProjectCompatibility2020.
compat_2: Compatibility class used to pre-process entries of run_type_2.
Defaults to None.
fuzzy_matching: Whether to use less strict structure matching logic for
diatomic elements O2, N2, F2, H2, and Cl2 as well as I and Br. Outputs of DFT
relaxations using
different functionals frequently fail to structure match for these elements
even though they come from the same original material. Fuzzy structure matching
considers the materials equivalent if the formula, number of sites, and
space group are all identical. If there are multiple materials of run_type_2
that satisfy these criteria, the one with lowest energy is considered to
match.
"""
self.name = "MP DFT mixing scheme"
self.structure_matcher = structure_matcher
if run_type_1 == run_type_2:
raise ValueError(
f"You specified the same run_type {run_type_1} for both run_type_1 and run_type_2. "
"The mixing scheme is meaningless unless run_type_1 and run_type_2 are different"
)
self.run_type_1 = run_type_1
self.run_type_2 = run_type_2
if self.run_type_1 == "GGA(+U)":
self.valid_rtypes_1 = ["GGA", "GGA+U"]
else:
self.valid_rtypes_1 = [self.run_type_1]
if self.run_type_2 == "GGA(+U)":
self.valid_rtypes_2 = ["GGA", "GGA+U"]
else:
self.valid_rtypes_2 = [self.run_type_2]
self.compat_1 = compat_1
self.compat_2 = compat_2
self.fuzzy_matching = fuzzy_matching
def process_entries(
self,
entries: Union[ComputedStructureEntry, ComputedEntry, list],
clean: bool = True,
verbose: bool = True,
mixing_state_data=None,
):
"""
Process a sequence of entries with the DFT mixing scheme. Note
that this method will change the data of the original entries.
Args:
entries: ComputedEntry or [ComputedEntry]. Pass all entries as a single list, even if they are
computed with different functionals or require different preprocessing. This list will
automatically be filtered based on run_type_1 and run_type_2, and processed according to
compat_1 and compat_2.
Note that under typical use, when mixing_state_data=None, the entries MUST be
ComputedStructureEntry. They will be matched using structure_matcher.
clean: bool, whether to remove any previously-applied energy adjustments.
If True, all EnergyAdjustment are removed prior to processing the Entry.
Default is True.
verbose: bool, whether to print verbose error messages about the mixing scheme. Default is True.
mixing_state_data: A DataFrame containing information about which Entries
correspond to the same materials, which are stable on the phase diagrams of
the respective run_types, etc. If None (default), it will be generated from the
list of entries using MaterialsProjectDFTMixingScheme.get_mixing_state_data.
This argument is included to facilitate use of the mixing scheme in high-throughput
databases where an alternative to get_mixing_state_data is desirable for performance
reasons. In general, it should always be left at the default value (None) to avoid
inconsistencies between the mixing state data and the properties of the
ComputedStructureEntry in entries.
Returns:
A list of adjusted entries. Entries in the original list which
are not compatible are excluded.
"""
processed_entry_list: List = []
# We can't operate on single entries in this scheme
if len(entries) == 1:
warnings.warn(f"{self.__class__.__name__} cannot process single entries. Supply a list of entries.")
return processed_entry_list
# if clean is True, remove all previous adjustments from the entry
# this code must be placed before the next block, because we don't want to remove
# any corrections added by compat_1 or compat_2.
if clean:
for entry in entries:
for ea in entry.energy_adjustments:
entry.energy_adjustments.remove(ea)
entries_type_1, entries_type_2 = self._filter_and_sort_entries(entries, verbose=verbose)
if mixing_state_data is None:
if verbose:
print(" Generating mixing state data from provided entries.")
mixing_state_data = self.get_mixing_state_data(entries_type_1 + entries_type_2, verbose=False)
if verbose:
# how many stable entries from run_type_1 do we have in run_type_2?
hull_entries_2 = 0
stable_df = mixing_state_data[mixing_state_data["is_stable_1"]]
if len(stable_df) > 0:
hull_entries_2 = sum(stable_df["energy_2"].notna())
print(
f" Entries contain {self.run_type_2} calculations for {hull_entries_2} of {len(stable_df)} "
f"{self.run_type_1} hull entries."
)
if hull_entries_2 == len(stable_df):
print(f" {self.run_type_1} energies will be adjusted to the {self.run_type_2} scale")
else:
print(f" {self.run_type_2} energies will be adjusted to the {self.run_type_1} scale")
if hull_entries_2 > 0:
print(
f" The energy above hull for {self.run_type_2} materials at compositions with "
f"{self.run_type_2} hull entries will be preserved. For other compositions, "
f"Energies of {self.run_type_2} materials will be set equal to those of "
f"matching {self.run_type_1} materials"
)
# the code below is identical to code inside process_entries in the base
# Compatibility class, except that an extra kwarg is passed to get_adjustments
for entry in entries_type_1 + entries_type_2:
ignore_entry = False
# get the energy adjustments
try:
adjustments = self.get_adjustments(entry, mixing_state_data)
except CompatibilityError as exc:
if "WARNING!" in str(exc):
warnings.warn(str(exc))
elif verbose:
print(f" {exc}")
ignore_entry = True
continue
for ea in adjustments:
# Has this correction already been applied?
if (ea.name, ea.cls, ea.value) in [(ea.name, ea.cls, ea.value) for ea in entry.energy_adjustments]:
# we already applied this exact correction. Do nothing.
pass
elif (ea.name, ea.cls) in [(ea.name, ea.cls) for ea in entry.energy_adjustments]:
# we already applied a correction with the same name
# but a different value. Something is wrong.
ignore_entry = True
warnings.warn(
"Entry {} already has an energy adjustment called {}, but its "
"value differs from the value of {:.3f} calculated here. This "
"Entry will be discarded.".format(entry.entry_id, ea.name, ea.value)
)
else:
# Add the correction to the energy_adjustments list
entry.energy_adjustments.append(ea)
if not ignore_entry:
processed_entry_list.append(entry)
if verbose:
count_type_1 = len([e for e in processed_entry_list if e.parameters["run_type"] in self.valid_rtypes_1])
count_type_2 = len([e for e in processed_entry_list if e.parameters["run_type"] in self.valid_rtypes_2])
print(
f"\nProcessing complete. Mixed entries contain {count_type_1} {self.run_type_1} and {count_type_2} "
f"{self.run_type_2} entries.\n"
)
self.display_entries(processed_entry_list)
return processed_entry_list
def get_adjustments(self, entry, mixing_state_data: pd.DataFrame = None):
"""
Returns the corrections applied to a particular entry. Note that get_adjustments is not
intended to be called directly in the R2SCAN mixing scheme. Call process_entries instead,
and it will pass the required arguments to get_adjustments.
Args:
entry: A ComputedEntry object. The entry must be a member of the list of entries
used to create mixing_state_data.
mixing_state_data: A DataFrame containing information about which Entries
correspond to the same materials, which are stable on the phase diagrams of
the respective run_types, etc. Can be generated from a list of entries using
MaterialsProjectDFTMixingScheme.get_mixing_state_data. This argument is included to
facilitate use of the mixing scheme in high-throughput databases where an alternative
to get_mixing_state_data is desirable for performance reasons. In general, it should
always be left at the default value (None) to avoid inconsistencies between the mixing
state data and the properties of the ComputedStructureEntry.
Returns:
[EnergyAdjustment]: Energy adjustments to be applied to entry.
Raises:
CompatibilityError if the DFT mixing scheme cannot be applied to the entry.
"""
adjustments: List[ConstantEnergyAdjustment] = []
run_type = entry.parameters.get("run_type")
if mixing_state_data is None:
raise CompatibilityError(
"WARNING! `mixing_state_data` DataFrame is None. No energy adjustments will be applied."
)
if not all(mixing_state_data["hull_energy_1"].notna()):
if any(mixing_state_data["entry_id_1"].notna()):
raise CompatibilityError(
f"WARNING! {self.run_type_1} entries do not form a complete PhaseDiagram."
" No energy adjustments will be applied."
)
if run_type not in self.valid_rtypes_1 + self.valid_rtypes_2:
raise CompatibilityError(
f"WARNING! Invalid run_type {run_type} for entry {entry.entry_id}. Must be one of "
f"{self.valid_rtypes_1 + self.valid_rtypes_2}. This entry will be ignored."
)
# Verify that the entry is included in the mixing state data
if (entry.entry_id not in mixing_state_data["entry_id_1"].values) and (
entry.entry_id not in mixing_state_data["entry_id_2"].values
):
raise CompatibilityError(
f"WARNING! Discarding {run_type} entry {entry.entry_id} for {entry.composition.formula} "
f"because it was not found in the mixing state data. This can occur when there are duplicate "
"structures. In such cases, only the lowest energy entry with that structure appears in the "
"mixing state data."
)
# Verify that the entry's energy has not been modified since mixing state data was generated
if (entry.energy_per_atom not in mixing_state_data["energy_1"].values) and (
entry.energy_per_atom not in mixing_state_data["energy_2"].values
):
raise CompatibilityError(
f"WARNING! Discarding {run_type} entry {entry.entry_id} for {entry.composition.formula} "
"because it's energy has been modified since the mixing state data was generated."
)
# Compute the energy correction for mixing. The correction value depends on how many of the
# run_type_1 stable entries are present as run_type_2 calculations
# First case - ALL run_type_1 stable entries are present in run_type_2
# In this scenario we construct the hull using run_type_2 energies. We discard any
# run_type_1 entries that already exist in run_type_2 and correct other run_type_1
# energies to have the same e_above_hull on the run_type_2 hull as they had on the run_type_1 hull
if all(mixing_state_data[mixing_state_data["is_stable_1"]]["entry_id_2"].notna()):
if run_type in self.valid_rtypes_2: # pylint: disable=R1705
# For run_type_2 entries, there is no correction
return adjustments
# Discard GGA ground states whose structures already exist in R2SCAN.
else:
df_slice = mixing_state_data[(mixing_state_data["entry_id_1"] == entry.entry_id)]
if df_slice["entry_id_2"].notna().item():
# there is a matching run_type_2 entry, so we will discard this entry
if df_slice["is_stable_1"].item():
# this is a GGA ground state.
raise CompatibilityError(
f"Discarding {run_type} entry {entry.entry_id} for {entry.composition.formula} "
f"because it is a {self.run_type_1} ground state that matches a {self.run_type_2} "
"material."
)
raise CompatibilityError(
f"Discarding {run_type} entry {entry.entry_id} for {entry.composition.formula} "
f"because there is a matching {self.run_type_2} material."
)
# If a GGA is not present in R2SCAN, correct its energy to give the same
# e_above_hull on the R2SCAN hull that it would have on the GGA hull
hull_energy_1 = df_slice["hull_energy_1"].iloc[0]
hull_energy_2 = df_slice["hull_energy_2"].iloc[0]
correction = (hull_energy_2 - hull_energy_1) * entry.composition.num_atoms
adjustments.append(
ConstantEnergyAdjustment(
correction,
0.0,
name=f"MP {self.run_type_1}/{self.run_type_2} mixing adjustment",
cls=self.as_dict(),
description=f"Place {self.run_type_1} energy onto the {self.run_type_2} hull",
)
)
return adjustments
# Second case - there are run_type_2 energies available for at least some run_type_1
# stable entries. Here, we can correct run_type_2 energies at certain compositions
# to preserve their e_above_hull on the run_type_1 hull
elif any(mixing_state_data[mixing_state_data["is_stable_1"]]["entry_id_2"].notna()):
if run_type in self.valid_rtypes_1: # pylint: disable=R1705
df_slice = mixing_state_data[mixing_state_data["entry_id_1"] == entry.entry_id]
if df_slice["entry_id_2"].notna().item():
# there is a matching run_type_2 entry. We should discard this entry
if df_slice["is_stable_1"].item():
# this is a GGA ground state.
raise CompatibilityError(
f"Discarding {run_type} entry {entry.entry_id} for {entry.composition.formula} "
f"because it is a {self.run_type_1} ground state that matches a {self.run_type_2} "
"material."
)
raise CompatibilityError(
f"Discarding {run_type} entry {entry.entry_id} for {entry.composition.formula} "
f"because there is a matching {self.run_type_2} material"
)
# For other run_type_1 entries, there is no correction
return adjustments
else:
# for run_type_2, determine whether there is a run_type_2 ground state at this composition
df_slice = mixing_state_data[mixing_state_data["formula"] == entry.composition.reduced_formula]
if any(df_slice[df_slice["is_stable_1"]]["entry_id_2"].notna()):
# there is a run_type_2 entry corresponding to the run_type_1 ground state
# adjust the run_type_2 energy to preserve the e_above_hull
gs_energy_type_2 = df_slice[df_slice["is_stable_1"]]["energy_2"].item()
e_above_hull = entry.energy_per_atom - gs_energy_type_2
hull_energy_1 = df_slice["hull_energy_1"].iloc[0]
correction = (hull_energy_1 + e_above_hull - entry.energy_per_atom) * entry.composition.num_atoms
adjustments.append(
ConstantEnergyAdjustment(
correction,
0.0,
name=f"MP {self.run_type_1}/{self.run_type_2} mixing adjustment",
cls=self.as_dict(),
description=f"Place {self.run_type_2} energy onto the {self.run_type_1} hull",
)
)
return adjustments
# this composition is not stable in run_type_1. If the run_type_2 entry matches a run_type_1
# entry, we can adjust the run_type_2 energy to match the run_type_1 energy.
if any(df_slice[df_slice["entry_id_2"] == entry.entry_id]["entry_id_1"].notna()):
# adjust the energy of the run_type_2 entry to match that of the run_type_1 entry
type_1_energy = df_slice[df_slice["entry_id_2"] == entry.entry_id]["energy_1"].iloc[0]
correction = (type_1_energy - entry.energy_per_atom) * entry.composition.num_atoms
adjustments.append(
ConstantEnergyAdjustment(
correction,
0.0,
name=f"MP {self.run_type_1}/{self.run_type_2} mixing adjustment",
cls=self.as_dict(),
description=f"Replace {self.run_type_2} energy with {self.run_type_1} energy",
)
)
return adjustments
# there is no run_type_1 entry that matches this material, and no ground state. Discard.
raise CompatibilityError(
f"Discarding {run_type} entry {entry.entry_id} for {entry.composition.formula} "
f"because there is no matching {self.run_type_1} entry and no {self.run_type_2} "
"ground state at this composition."
)
# Third case - there are no run_type_2 energies available for any run_type_1
# ground states. There's no way to use the run_type_2 energies in this case.
elif all(mixing_state_data[mixing_state_data["is_stable_1"]]["entry_id_2"].isna()):
if run_type in self.valid_rtypes_1:
# nothing to do for run_type_1, return as is
return adjustments
# for run_type_2, discard the entry
raise CompatibilityError(
f"Discarding {run_type} entry {entry.entry_id} for {entry.composition.formula} "
f"because there are no {self.run_type_2} ground states at this composition."
)
# this statement is here to make pylint happy by guaranteeing a return or raise
else:
raise CompatibilityError(
"WARNING! If you see this Exception it means you have encountered"
f"an edge case in {self.__class__.__name__}. Inspect your input carefully and post a bug report."
)
def get_mixing_state_data(self, entries: List[ComputedStructureEntry], verbose: bool = False):
"""
Generate internal state data to be passed to get_adjustments.
Args:
entries: The list of ComputedStructureEntry to process. It is assumed that the entries have
already been filtered using _filter_and_sort_entries() to remove any irrelevant run types,
apply compat_1 and compat_2, and confirm that all have unique entry_id.
Returns:
DataFrame: A pandas DataFrame that contains information associating structures from
different functionals with specific materials and establishing how many run_type_1
ground states have been computed with run_type_2. The DataFrame contains one row
for each distinct material (Structure), with the following columns:
formula: str the reduced_formula
spacegroup: int the spacegroup
num_sites: int the number of sites in the Structure
entry_id_1: the entry_id of the run_type_1 entry
entry_id_2: the entry_id of the run_type_2 entry
run_type_1: Optional[str] the run_type_1 value
run_type_2: Optional[str] the run_type_2 value
energy_1: float or nan the ground state energy in run_type_1 in eV/atom
energy_2: float or nan the ground state energy in run_type_2 in eV/atom
is_stable_1: bool whether this material is stable on the run_type_1 PhaseDiagram
hull_energy_1: float or nan the energy of the run_type_1 hull at this composition in eV/atom
hull_energy_2: float or nan the energy of the run_type_1 hull at this composition in eV/atom
None: Returns None if the supplied ComputedStructureEntry are insufficient for applying
the mixing scheme.
"""
filtered_entries = []
for entry in entries:
if not isinstance(entry, ComputedStructureEntry):
warnings.warn(
"Entry {} is not a ComputedStructureEntry and will be"
"ignored. The DFT mixing scheme requires structures for"
" all entries".format(entry.entry_id)
)
continue
filtered_entries.append(entry)
# separate by run_type
entries_type_1 = [e for e in filtered_entries if e.parameters["run_type"] in self.valid_rtypes_1]
entries_type_2 = [e for e in filtered_entries if e.parameters["run_type"] in self.valid_rtypes_2]
# construct PhaseDiagram for each run_type, if possible
pd_type_1, pd_type_2 = None, None
try:
pd_type_1 = PhaseDiagram(entries_type_1)
except ValueError:
warnings.warn(f"{self.run_type_1} entries do not form a complete PhaseDiagram.")
try:
pd_type_2 = PhaseDiagram(entries_type_2)
except ValueError:
warnings.warn(f"{self.run_type_2} entries do not form a complete PhaseDiagram.")
# Objective: loop through all the entries, group them by structure matching (or fuzzy structure matching
# where relevant). For each group, put a row in a pandas DataFrame with the composition of the run_type_1 entry,
# the run_type_2 entry, whether or not that entry is a ground state (not necessarily on the hull), its energy,
# and the energy of the hull at that composition
all_entries = list(entries_type_1) + list(entries_type_2)
row_list = []
columns = [
"formula",
"spacegroup",
"num_sites",
"is_stable_1",
"entry_id_1",
"entry_id_2",
"run_type_1",
"run_type_2",
"energy_1",
"energy_2",
"hull_energy_1",
"hull_energy_2",
]
def _get_sg(struc) -> int:
"""helper function to get spacegroup with a loose tolerance"""
try:
return struc.get_space_group_info(symprec=0.1)[1]
except Exception:
return -1
# loop through all structures
# this logic follows emmet.builders.vasp.materials.MaterialsBuilder.filter_and_group_tasks
structures = []
for entry in all_entries:
s = entry.structure
s.entry_id = entry.entry_id
structures.append(s)
# First group by composition, then by spacegroup number, then by structure matching
for comp, compgroup in groupby(sorted(structures, key=lambda s: s.composition), key=lambda s: s.composition):
l_compgroup = list(compgroup)
# group by spacegroup, then by number of sites (for diatmics) or by structure matching
for sg, pregroup in groupby(sorted(l_compgroup, key=_get_sg), key=_get_sg):
l_pregroup = list(pregroup)
if comp.reduced_formula in ["O2", "H2", "Cl2", "F2", "N2", "I", "Br", "H2O"] and self.fuzzy_matching:
# group by number of sites
for n, sitegroup in groupby(
sorted(l_pregroup, key=lambda s: s.num_sites), key=lambda s: s.num_sites
):
l_sitegroup = list(sitegroup)
row_list.append(
self._populate_df_row(l_sitegroup, comp, sg, n, pd_type_1, pd_type_2, all_entries)
)
else:
for group in self.structure_matcher.group_structures(l_pregroup):
grp = list(group)
n = group[0].num_sites
# StructureMatcher.group_structures returns a list of lists,
# so each group should be a list containing matched structures
row_list.append(self._populate_df_row(grp, comp, sg, n, pd_type_1, pd_type_2, all_entries))
mixing_state_data = pd.DataFrame(row_list, columns=columns)
mixing_state_data.sort_values(
["formula", "energy_1", "spacegroup", "num_sites"], inplace=True, ignore_index=True
)
return mixing_state_data
def _filter_and_sort_entries(self, entries, verbose=True):
"""
Given a single list of entries, separate them by run_type and return two lists, one containin
only entries of each run_type
"""
filtered_entries = []
for entry in entries:
if not entry.parameters.get("run_type"):
warnings.warn(
"Entry {} is missing parameters.run_type! This field"
"is required. This entry will be ignored.".format(entry.entry_id)
)
continue
if entry.parameters.get("run_type") not in self.valid_rtypes_1 + self.valid_rtypes_2:
warnings.warn(
f"Invalid run_type {entry.parameters.get('run_type')} for entry {entry.entry_id}. Must be one of "
f"{self.valid_rtypes_1 + self.valid_rtypes_2}. This entry will be ignored."
)
continue
if entry.entry_id is None:
warnings.warn(
f"Entry_id for {entry.composition.reduced_formula} entry {entry.entry_id} is invalid. "
"Unique entry_ids are required for every ComputedStructureEntry. This entry will be ignored."
)
continue
filtered_entries.append(entry)
filtered_entry_ids = {e.entry_id for e in filtered_entries}
if len(filtered_entry_ids) != len(filtered_entries):
raise ValueError(
"The provided ComputedStructureEntry do not all have unique entry_ids."
" Unique entry_ids are required for every ComputedStructureEntry."
)
# separate by run_type
entries_type_1 = [e for e in filtered_entries if e.parameters["run_type"] in self.valid_rtypes_1]
entries_type_2 = [e for e in filtered_entries if e.parameters["run_type"] in self.valid_rtypes_2]
if verbose:
print(
f"Processing {len(entries_type_1)} {self.run_type_1} and {len(entries_type_2)} "
f"{self.run_type_2} entries..."
)
# preprocess entries with any corrections
# make an EntrySet to enable some useful methods like .chemsys and .is_ground_state
if self.compat_1:
entries_type_1 = self.compat_1.process_entries(entries_type_1)
if verbose:
print(
f" Processed {len(entries_type_1)} compatible {self.run_type_1} entries with "
f"{self.compat_1.__class__.__name__}"
)
entries_type_1 = EntrySet(entries_type_1)
if self.compat_2:
entries_type_2 = self.compat_2.process_entries(entries_type_2)
if verbose:
print(
f" Processed {len(entries_type_2)} compatible {self.run_type_2} entries with "
f"{self.compat_2.__class__.__name__}"
)
entries_type_2 = EntrySet(entries_type_2)
# make sure both sets of entries belong to the same chemical system
# assuming there are any gga entries at all
if len(entries_type_1.chemsys) > 0:
chemsys = entries_type_1.chemsys
if not entries_type_2.chemsys <= entries_type_1.chemsys:
warnings.warn(
f" {self.run_type_2} entries chemical system {entries_type_2.chemsys} is larger than "
f"{self.run_type_1} entries chemical system {entries_type_1.chemsys}. Entries outside the "
f"{self.run_type_1} chemical system will be discarded"
)
entries_type_2 = entries_type_2.get_subset_in_chemsys(chemsys)
else:
# if only run_type_2 entries are present, then they define the chemsys
chemsys = entries_type_2.chemsys
if verbose:
print(f" Entries belong to the {chemsys} chemical system")
return list(entries_type_1), list(entries_type_2)
def _populate_df_row(self, struct_group, comp, sg, n, pd_type_1, pd_type_2, all_entries):
"""
helper function to populate a row of the mixing state DataFrame, given
a list of matched structures
"""
# within the group of matched structures, keep the lowest energy entry from
# each run_type
entries_type_1 = sorted(
(
e
for e in all_entries
if e.entry_id in [s.entry_id for s in struct_group] and e.parameters["run_type"] in self.valid_rtypes_1
),
key=lambda x: x.energy_per_atom,
)
first_entry = entries_type_1[0] if len(entries_type_1) > 0 else None
entries_type_2 = sorted(
(
e
for e in all_entries
if e.entry_id in [s.entry_id for s in struct_group] and e.parameters["run_type"] in self.valid_rtypes_2
),
key=lambda x: x.energy_per_atom,
)
second_entry = entries_type_2[0] if len(entries_type_2) > 0 else None
# generate info for the DataFrame
stable_1 = False
id1 = first_entry.entry_id if first_entry else None
id2 = second_entry.entry_id if second_entry else None
rt1 = first_entry.parameters["run_type"] if first_entry else None
rt2 = second_entry.parameters["run_type"] if second_entry else None
# are the entries the lowest energy at this composition?
energy_1 = first_entry.energy_per_atom if first_entry else np.nan
energy_2 = second_entry.energy_per_atom if second_entry else np.nan
# are they stable?
if pd_type_1:
stable_1 = first_entry in pd_type_1.stable_entries
# get the respective hull energies at this composition, if available
hull_energy_1, hull_energy_2 = np.nan, np.nan
if pd_type_1:
hull_energy_1 = pd_type_1.get_hull_energy_per_atom(comp)
if pd_type_2:
hull_energy_2 = pd_type_2.get_hull_energy_per_atom(comp)
return [
comp.reduced_formula,
sg,
n,
stable_1,
id1,
id2,
rt1,
rt2,
energy_1,
energy_2,
hull_energy_1,
hull_energy_2,
]
@staticmethod
def display_entries(entries):
"""
Generate a pretty printout of key properties of a list of ComputedEntry
"""
entries = sorted(entries, key=lambda e: (e.composition.reduced_formula, e.energy_per_atom))
try:
pd = PhaseDiagram(entries)
except ValueError:
return None
print(
"{:<12}{:<12}{:<12}{:<10}{:<8} {:<9} {:<9}".format(
"entry_id", "formula", "spacegroup", "run_type", "eV/atom", "corr/atom", "e_above_hull"
)
)
for e in entries:
print(
"{:<12}{:<12}{:<12}{:<10}{:<8.3f} {:<9.3f} {:<9.3f}".format(
e.entry_id,
e.composition.reduced_formula,
e.structure.get_space_group_info()[0],
e.parameters["run_type"],
e.energy_per_atom,
e.correction / e.composition.num_atoms,
pd.get_e_above_hull(e),
)
)
return None
| materialsproject/pymatgen | pymatgen/entries/mixing_scheme.py | Python | mit | 37,977 | [
"VASP",
"pymatgen"
] | c972b88d4a4bddea09f828af4b60259bf52229e3fe75593fc800f7e86e27348b |
#File: gini_plot.py
#Created: Mon 12 Mar 2012 03:00:01 PM CDT
#Last Change: Wed 03 Oct 2012 04:08:38 PM CDT
#Author: Steven Boada
import pylab as pyl
import cPickle as pickle
galaxies = pickle.load(open('galaxies.pickle','rb'))
galaxies = filter(lambda galaxy: galaxy.Gini is not None, galaxies)
f1 = pyl.figure(1,figsize=(4,6))
f1s1 = f1.add_subplot(211)
f1s2 = f1.add_subplot(212)
for galaxy in galaxies:
if galaxy.ICD_IH > 0.2:
f1s1.scatter(galaxy.M20, galaxy.Gini, s=50, c='0.8')
else:
f1s2.scatter(galaxy.M20, galaxy.Gini, s=50, c='0.8')
m1 = pyl.arange(-3.0,0.0,0.1)
m2 = pyl.arange(-3.0,-1.68,0.1)
f1s1.plot(m1,-0.14*m1+0.33, color='g',lw=2)
f1s1.plot(m2,0.14*m2+0.80, color='b',lw=2)
f1s2.plot(m1,-0.14*m1+0.33, color='g',lw=2)
f1s2.plot(m2,0.14*m2+0.80, color='b',lw=2)
f1s1.set_xlim(0,-3)
f1s1.set_ylim(0.3,0.8)
f1s2.set_xlim(0,-3)
f1s2.set_ylim(0.3,0.8)
f1s1.set_xticks([-3, -2, -1, 0])
f1s2.set_xticks([-3, -2, -1, 0])
#ax = pyl.gca()
#ax.set_xlim(ax.get_xlim()[::-1])
f1s2.set_xlabel(r"$M_{20}$")
f1s1.set_ylabel("G")
f1s2.set_ylabel("G")
f1s1.set_title(r'$\xi[i_{775}, H_{160}] >$ 4%')
f1s2.set_title(r'$\xi[i_{775}, H_{160}] <$ 4%')
#f1l1 = pyl.figtext(0.45,0.7,'Merger')
#f1l2 = pyl.figtext(0.7,0.5,'E/S0/Sa')
#f1l3 = pyl.figtext(0.6,0.25,'Sb-Ir')
pyl.show()
| boada/ICD | sandbox/legacy_plot_code/plot_G_vs_M20.py | Python | mit | 1,313 | [
"Galaxy"
] | 957c87313ea3ac429bc6ef5f1015d485e8574c733afce440eb8bc4cfc10e7a57 |
# -*- coding: utf-8 -*-
#
# Minecraft-Mods documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 14 15:21:55 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Minecraft-Mods'
copyright = u'2016, Brian McMahan'
author = u'Brian McMahan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Minecraft-Modsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Minecraft-Mods.tex', u'Minecraft-Mods Documentation',
u'Brian McMahan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'minecraft-mods', u'Minecraft-Mods Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Minecraft-Mods', u'Minecraft-Mods Documentation',
author, 'Minecraft-Mods', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| Heroes-Academy/Minecraft_Spring_2016 | docs/source/conf.py | Python | mit | 9,223 | [
"Brian"
] | 9d97539b9666aba4f986c01caeaf503ab1eef4ad71a14e1221b54b3a6dbb42d0 |
""" Simple multi-layer perception neural network using Minpy """
# import minpy
import minpy.numpy as np
from minpy.nn import layers
from minpy.nn.model import ModelBase
from minpy.nn.solver import Solver
from minpy.nn.io import NDArrayIter
from examples.utils.data_utils import get_CIFAR10_data
from minpy.context import set_context, gpu
# set_context(gpu(0)) # set the global context as gpu(0)
batch_size = 128
input_size = (3, 32, 32)
# RGB width height
flattened_input_size = 3 * 32 * 32
hidden_size = 512
num_classes = 10
class TwoLayerNet(ModelBase):
def __init__(self):
super(TwoLayerNet, self).__init__()
# Define model parameters.
self.add_param(name='w1', shape=(flattened_input_size, hidden_size)) \
.add_param(name='b1', shape=(hidden_size,)) \
.add_param(name='w2', shape=(hidden_size, num_classes)) \
.add_param(name='b2', shape=(num_classes,)) \
.add_param(name='gamma', shape=(hidden_size,),
init_rule='constant', init_config={'value': 1.0}) \
.add_param(name='beta', shape=(hidden_size,),
init_rule='constant') \
.add_aux_param(name='running_mean', value=None) \
.add_aux_param(name='running_var', value=None)
def forward(self, X, mode):
# Flatten the input data to matrix.
X = np.reshape(X, (batch_size, 3 * 32 * 32))
# First affine layer (fully-connected layer).
y1 = layers.affine(X, self.params['w1'], self.params['b1'])
# ReLU activation.
y2 = layers.relu(y1)
# Batch normalization
y3, self.aux_params['running_mean'], \
self.aux_params['running_var'] = layers.batchnorm(
y2, self.params['gamma'], self.params['beta'],
running_mean=self.aux_params['running_mean'],
running_var=self.aux_params['running_var'])
# Second affine layer.
y4 = layers.affine(y3, self.params['w2'], self.params['b2'])
# Dropout
y5 = layers.dropout(y4, 0.5, mode=mode)
return y5
def loss(self, predict, y):
# Compute softmax loss between the output and the label.
# this function must be convex for gradient calculation
return layers.softmax_loss(predict, y)
def main():
# data dir
import os
data_dir = os.path.expandvars('$HOME/data/minpy/cifar-10-batches-py')
# Create model.
model = TwoLayerNet()
# Create data iterators for training and testing sets.
data = get_CIFAR10_data(data_dir)
train_dataiter = NDArrayIter(data=data['X_train'],
label=data['y_train'],
batch_size=batch_size,
shuffle=True)
test_dataiter = NDArrayIter(data=data['X_test'],
label=data['y_test'],
batch_size=batch_size,
shuffle=False)
# Create solver.
solver = Solver(model,
train_dataiter,
test_dataiter,
num_epochs=10,
init_rule='gaussian',
init_config={
'stdvar': 0.001
},
# automatically does the backpropagation
update_rule='sgd_momentum',
optim_config={
'learning_rate': 1e-4,
'momentum': 0.9
},
verbose=True,
print_every=20)
# Initialize model parameters.
solver.init()
# Train!
solver.train()
if __name__ == '__main__':
main()
| shadowleaves/deep_learning | twolayer/minpy_batchnorm.py | Python | mit | 3,754 | [
"Gaussian"
] | 55dbbabffba38d6f3980a5848c741ff9b190868771c6e4223f25266a6c977859 |
# Copyright 2006, 2007 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.SeqIO support for the "phylip" (PHYLIP) file format.
You are expected to use this module via the Bio.SeqIO functions.
Note:
In TREE_PUZZLE (Schmidt et al. 2003) and PHYML (Guindon and Gascuel 2003)
a dot/period (".") in a sequence is interpreted as meaning the same
character as in the first sequence. The PHYLIP 3.6 documentation says:
"a period was also previously allowed but it is no longer allowed,
because it sometimes is used in different senses in other programs"
At the time of writing, we do nothing special with a dot/period.
"""
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Interfaces import SequenceWriter
from sets import Set
#This is a generator function!
#TODO - Should the default be Gapped(single_letter_alphabet) instead?
def PhylipIterator(handle, alphabet = single_letter_alphabet) :
"""Reads a Phylip alignment file returning a SeqRecord object iterator
Record identifiers are limited to at most 10 characters.
It only copes with interlaced phylip files! Sequential files won't work
where the sequences are split over multiple lines.
For more information on the file format, please see:
http://evolution.genetics.washington.edu/phylip/doc/sequence.html
http://evolution.genetics.washington.edu/phylip/doc/main.html#inputfiles
"""
line = handle.readline()
if not line: return
line = line.strip()
parts = filter(None, line.split())
if len(parts)<>2 :
raise ValueError("First line should have two integers")
try :
number_of_seqs = int(parts[0])
length_of_seqs = int(parts[1])
except ValueError:
raise ValueError("First line should have two integers")
ids = []
seqs = []
#Expects STRICT truncation/padding to 10 characters
#Does not require any white space between name and seq.
for i in range(0,number_of_seqs) :
line = handle.readline().rstrip()
ids.append(line[:10].strip()) #first ten characters
seqs.append([line[10:].strip().replace(" ","")])
line=""
while True :
#Skip any blank lines between blocks...
while ""==line.strip():
line = handle.readline()
if not line : break #end of file
if not line : break
#print "New block..."
for i in range(0,number_of_seqs) :
seqs[i].append(line.strip().replace(" ",""))
line = handle.readline()
if (not line) and i+1 < number_of_seqs :
raise ValueError("End of file mid-block")
if not line : break #end of file
for i in range(0,number_of_seqs) :
seq = "".join(seqs[i])
if len(seq)<>length_of_seqs :
raise ValueError("Sequence %i length %i, expected length %i" \
% (i+1, len(seq), length_of_seqs))
yield SeqRecord(Seq(seq, alphabet), id=ids[i], name=ids[i], description="")
class PhylipWriter(SequenceWriter):
"""Write interlaced Phylip sequence alignments
For more information on the file format, please see:
http://evolution.genetics.washington.edu/phylip/doc/sequence.html
http://evolution.genetics.washington.edu/phylip/doc/main.html#inputfiles
All sequences must be the same length."""
def __init__(self, handle, truncate=10):
"""Creates the writer object
Use the method write_file() to actually record your sequence records."""
self.handle = handle
self.truncate = truncate
def write_file(self, records) :
"""Use this to write an entire file containing the given records.
If records is an iterator that does not support len(records) or
records[index] then it is converted into a list.
"""
#Need length, and multiple passes - and iterator will not do.
records = list(records)
if len(records)==0 :
raise ValueError("Must have at least one sequence")
length_of_sequences = len(records[0].seq)
for record in records :
if length_of_sequences <> len(record.seq) :
raise ValueError("Sequences must all be the same length")
if length_of_sequences <= 0 :
raise ValueError("Non-empty sequences are required")
if len(records) > len(Set([r.id[:self.truncate] for r in records])) :
raise ValueError("Repeated identifier, possibly due to truncation")
handle = self.handle
# From experimentation, the use of tabs is not understood by the
# EMBOSS suite. The nature of the expected white space is not
# defined, simply "These are in free format, separated by blanks"
handle.write(" %i %s\n" % (len(records), length_of_sequences))
block=0
while True :
for record in records :
if block==0 :
#Write name (truncated/padded to 10 characters)
"""
Quoting the PHYLIP version 3.6 documentation:
The name should be ten characters in length, filled out to
the full ten characters by blanks if shorter. Any printable
ASCII/ISO character is allowed in the name, except for
parentheses ("(" and ")"), square brackets ("[" and "]"),
colon (":"), semicolon (";") and comma (","). If you forget
to extend the names to ten characters in length by blanks,
the program [i.e. PHYLIP] will get out of synchronization with
the contents of the data file, and an error message will result.
Note that Tab characters count as only one character in the
species names. Their inclusion can cause trouble.
"""
name = record.id.strip()
#Either remove the banned characters, or map them to something
#else like an underscore "_" or pipe "|" character...
for char in "[]()," :
name = name.replace(char,"")
for char in ":;" :
name = name.replace(char,"|")
#Now truncate and right pad to expected length.
handle.write(name[:self.truncate].ljust(self.truncate))
else :
#write 10 space indent
handle.write(" "*self.truncate)
#Write five chunks of ten letters per line...
for chunk in range(0,5) :
i = block*50 + chunk*10
seq_segment = record.seq.tostring()[i:i+10]
#TODO - Force any gaps to be '-' character? Look at the alphabet...
#TODO - How to cope with '?' or '.' in the sequence?
handle.write(" %s" % seq_segment)
if i+10 > length_of_sequences : break
handle.write("\n")
block=block+1
if block*50 > length_of_sequences : break
handle.write("\n")
#Don't close the handle. Doing so would prevent this code
#from writing concatenated phylip files which are used
#in phylogenetic bootstrapping
#handle.close()
if __name__=="__main__" :
print "Testing"
phylip_text=""" 8 286
V_Harveyi_ --MKNWIKVA VAAIA--LSA A--------- ---------T VQAATEVKVG
B_subtilis MKMKKWTVLV VAALLAVLSA CG-------- ----NGNSSS KEDDNVLHVG
B_subtilis MKKALLALFM VVSIAALAAC GAGNDNQSKD NAKDGDLWAS IKKKGVLTVG
YA80_HAEIN MKKLLFTTAL LTGAIAFSTF ---------- -SHAGEIADR VEKTKTLLVG
FLIY_ECOLI MKLAHLGRQA LMGVMAVALV AG---MSVKS FADEG-LLNK VKERGTLLVG
E_coli_Gln --MKSVLKVS LAALTLAFAV S--------- ---------S HAADKKLVVA
Deinococcu -MKKSLLSLK LSGLLVPSVL ALS------- -LSACSSPSS TLNQGTLKIA
HISJ_E_COL MKKLVLSLSL VLAFSSATAA F--------- ---------- AAIPQNIRIG
MSGRYFPFTF VKQ--DKLQG FEVDMWDEIG KRNDYKIEYV TANFSGLFGL
ATGQSYPFAY KEN--GKLTG FDVEVMEAVA KKIDMKLDWK LLEFSGLMGE
TEGTYEPFTY HDKDTDKLTG YDVEVITEVA KRLGLKVDFK ETQWGSMFAG
TEGTYAPFTF HDK-SGKLTG FDVEVIRKVA EKLGLKVEFK ETQWDAMYAG
LEGTYPPFSF QGD-DGKLTG FEVEFAQQLA KHLGVEASLK PTKWDGMLAS
TDTAFVPFEF KQG--DKYVG FDVDLWAAIA KELKLDYELK PMDFSGIIPA
MEGTYPPFTS KNE-QGELVG FDVDIAKAVA QKLNLKPEFV LTEWSGILAG
TDPTYAPFES KNS-QGELVG FDIDLAKELC KRINTQCTFV ENPLDALIPS
LETGRIDTIS NQITMTDARK AKYLFADPYV VDG-AQITVR KGNDSIQGVE
LQTGKLDTIS NQVAVTDERK ETYNFTKPYA YAG-TQIVVK KDNTDIKSVD
LNSKRFDVVA NQVG-KTDRE DKYDFSDKYT TSR-AVVVTK KDNNDIKSEA
LNAKRFDVIA NQTNPSPERL KKYSFTTPYN YSG-GVIVTK SSDNSIKSFE
LDSKRIDVVI NQVTISDERK KKYDFSTPYT ISGIQALVKK GNEGTIKTAD
LQTKNVDLAL AGITITDERK KAIDFSDGYY KSG-LLVMVK ANNNDVKSVK
LQANKYDVIV NQVGITPERQ NSIGFSQPYA YSRPEIIVAK NNTFNPQSLA
LKAKKIDAIM SSLSITEKRQ QEIAFTDKLY AADSRLVVAK NSDIQP-TVE
DLAGKTVAVN LGSNFEQLLR DYDKDGKINI KTYDT--GIE HDVALGRADA
DLKGKTVAAV LGSNHAKNLE SKDPDKKINI KTYETQEGTL KDVAYGRVDA
DVKGKTSAQS LTSNYNKLAT N----AGAKV EGVEGMAQAL QMIQQARVDM
DLKGRKSAQS ATSNWGKDAK A----AGAQI LVVDGLAQSL ELIKQGRAEA
DLKGKKVGVG LGTNYEEWLR QNV--QGVDV RTYDDDPTKY QDLRVGRIDA
DLDGKVVAVK SGTGSVDYAK AN--IKTKDL RQFPNIDNAY MELGTNRADA
DLKGKRVGST LGSNYEKQLI DTG---DIKI VTYPGAPEIL ADLVAGRIDA
SLKGKRVGVL QGTTQETFGN EHWAPKGIEI VSYQGQDNIY SDLTAGRIDA
FIMDRLSALE -LIKKT-GLP LQLAGEPFET I-----QNAW PFVDNEKGRK
YVNSRTVLIA -QIKKT-GLP LKLAGDPIVY E-----QVAF PFAKDDAHDK
TYNDKLAVLN -YLKTSGNKN VKIAFETGEP Q-----STYF TFRKGS--GE
TINDKLAVLD -YFKQHPNSG LKIAYDRGDK T-----PTAF AFLQGE--DA
ILVDRLAALD -LVKKT-NDT LAVTGEAFSR Q-----ESGV ALRKGN--ED
VLHDTPNILY -FIKTAGNGQ FKAVGDSLEA Q-----QYGI AFPKGS--DE
AYNDRLVVNY -IINDQ-KLP VRGAGQIGDA A-----PVGI ALKKGN--SA
AFQDEVAASE GFLKQPVGKD YKFGGPSVKD EKLFGVGTGM GLRKED--NE
LQAEVNKALA EMRADGTVEK ISVKWFGADI TK----
LRKKVNKALD ELRKDGTLKK LSEKYFNEDI TVEQKH
VVDQVNKALK EMKEDGTLSK ISKKWFGEDV SK----
LITKFNQVLE ALRQDGTLKQ ISIEWFGYDI TQ----
LLKAVNDAIA EMQKDGTLQA LSEKWFGADV TK----
LRDKVNGALK TLRENGTYNE IYKKWFGTEP K-----
LKDQIDKALT EMRSDGTFEK ISQKWFGQDV GQP---
LREALNKAFA EMRADGTYEK LAKKYFDFDV YGG---
"""
from cStringIO import StringIO
handle = StringIO(phylip_text)
count=0
for record in PhylipIterator(handle) :
count=count+1
print record.id
#print record.seq.tostring()
assert count == 8
expected="""mkklvlslsl vlafssataa faaipqniri gtdptyapfe sknsqgelvg
fdidlakelc krintqctfv enpldalips lkakkidaim sslsitekrq qeiaftdkly
aadsrlvvak nsdiqptves lkgkrvgvlq gttqetfgne hwapkgieiv syqgqdniys
dltagridaafqdevaaseg flkqpvgkdy kfggpsvkde klfgvgtgmg lrkednelre
alnkafaemradgtyeklak kyfdfdvygg""".replace(" ","").replace("\n","").upper()
assert record.seq.tostring().replace("-","") == expected
#From here:
#http://atgc.lirmm.fr/phyml/usersguide.html
phylip_text2="""5 60
Tax1 CCATCTCACGGTCGGTACGATACACCTGCTTTTGGCAG
Tax2 CCATCTCACGGTCAGTAAGATACACCTGCTTTTGGCGG
Tax3 CCATCTCCCGCTCAGTAAGATACCCCTGCTGTTGGCGG
Tax4 TCATCTCATGGTCAATAAGATACTCCTGCTTTTGGCGG
Tax5 CCATCTCACGGTCGGTAAGATACACCTGCTTTTGGCGG
GAAATGGTCAATATTACAAGGT
GAAATGGTCAACATTAAAAGAT
GAAATCGTCAATATTAAAAGGT
GAAATGGTCAATCTTAAAAGGT
GAAATGGTCAATATTAAAAGGT"""
phylip_text3="""5 60
Tax1 CCATCTCACGGTCGGTACGATACACCTGCTTTTGGCAGGAAATGGTCAATATTACAAGGT
Tax2 CCATCTCACGGTCAGTAAGATACACCTGCTTTTGGCGGGAAATGGTCAACATTAAAAGAT
Tax3 CCATCTCCCGCTCAGTAAGATACCCCTGCTGTTGGCGGGAAATCGTCAATATTAAAAGGT
Tax4 TCATCTCATGGTCAATAAGATACTCCTGCTTTTGGCGGGAAATGGTCAATCTTAAAAGGT
Tax5 CCATCTCACGGTCGGTAAGATACACCTGCTTTTGGCGGGAAATGGTCAATATTAAAAGGT"""
handle = StringIO(phylip_text2)
list2 = list(PhylipIterator(handle))
handle.close()
assert len(list2)==5
handle = StringIO(phylip_text3)
list3 = list(PhylipIterator(handle))
handle.close()
assert len(list3)==5
for i in range(0,5) :
list2[i].id == list3[i].id
list2[i].seq.tostring() == list3[i].seq.tostring()
#From here:
#http://evolution.genetics.washington.edu/phylip/doc/sequence.html
#Note the lack of any white space between names 2 and 3 and their seqs.
phylip_text4=""" 5 42
Turkey AAGCTNGGGC ATTTCAGGGT
Salmo gairAAGCCTTGGC AGTGCAGGGT
H. SapiensACCGGTTGGC CGTTCAGGGT
Chimp AAACCCTTGC CGTTACGCTT
Gorilla AAACCCTTGC CGGTACGCTT
GAGCCCGGGC AATACAGGGT AT
GAGCCGTGGC CGGGCACGGT AT
ACAGGTTGGC CGTTCAGGGT AA
AAACCGAGGC CGGGACACTC AT
AAACCATTGC CGGTACGCTT AA"""
#From here:
#http://evolution.genetics.washington.edu/phylip/doc/sequence.html
phylip_text5=""" 5 42
Turkey AAGCTNGGGC ATTTCAGGGT
GAGCCCGGGC AATACAGGGT AT
Salmo gairAAGCCTTGGC AGTGCAGGGT
GAGCCGTGGC CGGGCACGGT AT
H. SapiensACCGGTTGGC CGTTCAGGGT
ACAGGTTGGC CGTTCAGGGT AA
Chimp AAACCCTTGC CGTTACGCTT
AAACCGAGGC CGGGACACTC AT
Gorilla AAACCCTTGC CGGTACGCTT
AAACCATTGC CGGTACGCTT AA"""
phylip_text5a=""" 5 42
Turkey AAGCTNGGGC ATTTCAGGGT GAGCCCGGGC AATACAGGGT AT
Salmo gairAAGCCTTGGC AGTGCAGGGT GAGCCGTGGC CGGGCACGGT AT
H. SapiensACCGGTTGGC CGTTCAGGGT ACAGGTTGGC CGTTCAGGGT AA
Chimp AAACCCTTGC CGTTACGCTT AAACCGAGGC CGGGACACTC AT
Gorilla AAACCCTTGC CGGTACGCTT AAACCATTGC CGGTACGCTT AA"""
handle = StringIO(phylip_text4)
list4 = list(PhylipIterator(handle))
handle.close()
assert len(list4)==5
handle = StringIO(phylip_text5)
try :
list5 = list(PhylipIterator(handle))
assert len(list5)==5
print "That should have failed..."
except ValueError :
print "Evil multiline non-interlaced example failed as expected"
handle.close()
handle = StringIO(phylip_text5a)
list5 = list(PhylipIterator(handle))
handle.close()
assert len(list5)==5
for i in range(0,5) :
list4[i].id == list5[i].id
list4[i].seq.tostring() == list5[i].seq.tostring()
"""
handle = StringIO(phylip_text)
out_handle=open("/tmp/test.phy","w")
writer = PhylipWriter(out_handle)
writer.write_file(PhylipIterator(handle))
out_handle.close()
print "---------------------"
print open("/tmp/test.phy").read()
"""
| dbmi-pitt/DIKB-Micropublication | scripts/mp-scripts/Bio/SeqIO/PhylipIO.py | Python | apache-2.0 | 14,785 | [
"Biopython"
] | 7cd21cb22cb6ca6861e263dce13d9250860e7e8a0eb74c8485a69964bcbd347a |
# ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""
This module is used to store/retrieve user profiles on the local machine.
A user profile can contain information such as connection parameters to the
database or the language the user wants to use in the application.
For this module to function correctly, the `settings` should have an attribute
named `CAMELOT_DBPROFILES_CIPHER`. This is a 'secret' per application string
that is used to encrypt the profile information as it is stored on the local
machine.
"""
import base64
import logging
from PyQt4 import QtCore
from camelot.core.conf import settings
logger = logging.getLogger('camelot.core.dbprofiles')
def get_cipher():
from Crypto.Cipher import ARC4
if hasattr( settings, 'CAMELOT_DBPROFILES_CIPHER' ):
key = getattr( settings, 'CAMELOT_DBPROFILES_CIPHER' )
else:
key = 'The Knights Who Say Ni'
return ARC4.new( key )
def get_languagecode(profile=None):
"""
:return: two-letter ISO 639 language code
"""
if not profile:
profile = selected_profile_info()
return selected_profile_info()['locale_language'][:2]
def get_countrycode(profile=None):
"""
:return: two-letter ISO 3166 country code
"""
if not profile:
profile = selected_profile_info()
return selected_profile_info()['locale_language'][2:]
def _encode_setting(value):
return base64.b64encode( get_cipher().encrypt( unicode(value).encode('utf-8' ) ) )
def _decode_setting(value):
return get_cipher().decrypt( base64.b64decode( value ) ).decode('utf-8')
def selected_profile_info():
"""
:return: a dict with the info of the selected profile
"""
profiles = fetch_profiles()
profilename = last_used_profile()
try:
return profiles[profilename]
except KeyError:
logger.error( u'no profile named %s, available profiles are '%profilename )
for key in profiles.keys():
logger.error( u' - %s'%key )
raise
def connection_string_from_profile( profile ):
connection_string = '%s://'%profile['dialect']
if profile['user'] or profile['pass']:
connection_string = connection_string + '%s:%s@'%( profile['user'],
profile['pass'] )
if profile['host']:
connection_string = connection_string + profile['host']
if profile['port']:
connection_string = connection_string + ':%s'%profile['port']
connection_string = connection_string + '/%s'%profile['database']
return connection_string
def engine_from_profile():
"""
Create a SQLAlchemy Engine from the selected profile
"""
from sqlalchemy import create_engine
profile = selected_profile_info()
connect_args = dict()
if profile['dialect'] == 'mysql':
connect_args['charset'] = 'utf8'
connection_string = connection_string_from_profile( profile )
return create_engine(connection_string, pool_recycle=True, connect_args=connect_args)
def media_root_from_profile():
"""
Return the media root from the selected profile
"""
profile = selected_profile_info()
return profile['media_location']
def stylesheet_from_profile():
profile = selected_profile_info()
from camelot.view import art
return art.read( 'stylesheet/office2007_' + profile.get('stylesheet', 'blue') + '.qss' )
def last_used_profile():
settings = QtCore.QSettings()
return unicode(settings.value('last_used_database_profile',
QtCore.QVariant('')).toString(), 'utf-8')
def fetch_profiles(from_file=None):
profiles = {}
try:
if from_file is None:
settings = QtCore.QSettings()
else:
settings = QtCore.QSettings(from_file, QtCore.QSettings.IniFormat)
size = settings.beginReadArray('database_profiles')
if size == 0:
return profiles
for index in range(size):
settings.setArrayIndex(index)
info = {}
profilename = unicode(settings.value('profilename', QtCore.QVariant('')).toString(), 'utf-8')
if not profilename:
continue # well we should not really be doing anything
info['dialect'] = _decode_setting(settings.value('dialect', QtCore.QVariant('')).toString())
info['host'] = _decode_setting(settings.value('host', QtCore.QVariant('')).toString())
info['port'] = _decode_setting(settings.value('port', QtCore.QVariant('')).toString())
info['database'] = _decode_setting(settings.value('database', QtCore.QVariant('')).toString())
info['user'] = _decode_setting(settings.value('user', QtCore.QVariant('')).toString())
info['pass'] = _decode_setting(settings.value('pass', QtCore.QVariant('')).toString())
info['media_location'] = _decode_setting(settings.value('media_location', QtCore.QVariant('')).toString())
info['locale_language'] = _decode_setting(settings.value('locale_language', QtCore.QVariant('')).toString())
info['proxy_host'] = _decode_setting(settings.value('proxy_host', QtCore.QVariant('')).toString())
info['proxy_port'] = _decode_setting(settings.value('proxy_port', QtCore.QVariant('')).toString())
info['proxy_username'] = _decode_setting(settings.value('proxy_username', QtCore.QVariant('')).toString())
info['proxy_password'] = _decode_setting(settings.value('proxy_password', QtCore.QVariant('')).toString())
profiles[profilename] = info
settings.endArray()
except Exception, e:
logger.warn('Could not read existing profiles, proceed with what was available', exc_info=e)
return profiles
def store_profiles(profiles, to_file=None):
if to_file is None:
settings = QtCore.QSettings()
else:
settings = QtCore.QSettings(to_file, QtCore.QSettings.IniFormat)
settings.beginWriteArray('database_profiles')
for index, (profilename, info) in enumerate(profiles.items()):
settings.setArrayIndex(index)
settings.setValue('profilename', QtCore.QVariant(unicode(profilename).encode('utf-8')))
settings.setValue('dialect', QtCore.QVariant(_encode_setting(info['dialect'])))
settings.setValue('host', QtCore.QVariant(_encode_setting(info['host'])))
settings.setValue('port', QtCore.QVariant(_encode_setting(info['port'])))
settings.setValue('database', QtCore.QVariant(_encode_setting(info['database'])))
settings.setValue('user', QtCore.QVariant(_encode_setting(info['user'])))
settings.setValue('pass', QtCore.QVariant(_encode_setting(info['pass'])))
settings.setValue('media_location', QtCore.QVariant(_encode_setting(info['media_location'])))
settings.setValue('locale_language', QtCore.QVariant(_encode_setting(info['locale_language'])))
settings.setValue('proxy_host', QtCore.QVariant(_encode_setting(info['proxy_host'])))
settings.setValue('proxy_port', QtCore.QVariant(_encode_setting(info['proxy_port'])))
settings.setValue('proxy_username', QtCore.QVariant(_encode_setting(info['proxy_username'])))
settings.setValue('proxy_password', QtCore.QVariant(_encode_setting(info['proxy_password'])))
settings.endArray()
def use_chosen_profile(profilename):
settings = QtCore.QSettings()
settings.setValue('last_used_database_profile', unicode(profilename).encode('utf-8') )
class EmptyProxy():
@classmethod
def hostName(cls):
return ''
@classmethod
def user(cls):
return ''
@classmethod
def port(cls):
return ''
@classmethod
def password(cls):
return ''
def get_network_proxy():
# turn this temporary off, because it freezes the app on winblows
return EmptyProxy()
#from PyQt4 import QtNetwork
#proxy = None
#query = QtNetwork.QNetworkProxyQuery(QtCore.QUrl('http://aws.amazon.com'))
##proxies = QtNetwork.QNetworkProxyFactory.systemProxyForQuery(query)
#if proxies:
#logger.info('Proxy servers found: %s' % ['%s:%s' %
#(str(proxy.hostName()),str(proxy.port())) for proxy in proxies])
#if proxies[0].hostName():
#proxy = proxies[0]
## we still need some empty values for the profile
#if proxy is None:
#return EmptyProxy()
#return proxy
| jeroendierckx/Camelot | camelot/core/dbprofiles.py | Python | gpl-2.0 | 9,414 | [
"VisIt"
] | d14d9062b8d39308b4d2afbf661a27e0cd15836df33ac49878810869baff7785 |
"""
.. _sfm-track:
==================================================
Tracking with the Sparse Fascicle Model
==================================================
Tracking requires a per-voxel model. Here, the model is the Sparse Fascicle
Model, described in [Rokem2014]_. This model reconstructs the diffusion signal
as a combination of the signals from different fascicles (see also
:ref:`sfm-reconst`).
To begin, we read the Stanford HARDI data-set into memory:
"""
from dipy.data import read_stanford_labels
hardi_img, gtab, labels_img = read_stanford_labels()
data = hardi_img.get_data()
labels = labels_img.get_data()
affine = hardi_img.get_affine()
"""
This dataset provides a label map (generated using Freesurfer), in which the
white matter voxels are labeled as either 1 or 2:
"""
white_matter = (labels == 1) | (labels == 2)
"""
The first step in tracking is generating a model from which tracking directions
can be extracted in every voxel.
For the SFM, this requires first that we define a canonical response function
that will be used to deconvolve the signal in every voxel
"""
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
"""
We initialize an SFM model object, using this response function and using the
default sphere (362 vertices, symmetrically distributed on the surface of the
sphere):
"""
from dipy.data import get_sphere
sphere = get_sphere()
from dipy.reconst import sfm
sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere,
l1_ratio=0.5, alpha=0.001,
response=response[0])
"""
We fit this model to the data in each voxel in the white-matter mask, so that
we can use these directions in tracking:
"""
from dipy.reconst.peaks import peaks_from_model
pnm = peaks_from_model(sf_model, data, sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=white_matter,
parallel=True
)
"""
A ThresholdTissueClassifier object is used to segment the data to track only
through areas in which the Generalized Fractional Anisotropy (GFA) is
sufficiently high.
"""
from dipy.tracking.local import ThresholdTissueClassifier
classifier = ThresholdTissueClassifier(pnm.gfa, .25)
"""
Tracking will be started from a set of seeds evenly distributed in the white
matter:
"""
from dipy.tracking import utils
seeds = utils.seeds_from_mask(white_matter, density=[2, 2, 2], affine=affine)
"""
For the sake of brevity, we will take only the first 1000 seeds, generating
only 1000 streamlines. Remove this line to track from many more points in all of
the white matter
"""
seeds = seeds[:1000]
"""
We now have the necessary components to construct a tracking pipeline and
execute the tracking
"""
from dipy.tracking.local import LocalTracking
streamlines = LocalTracking(pnm, classifier, seeds, affine, step_size=.5)
streamlines = list(streamlines)
"""
Next, we will create a visualization of these streamlines, relative to this
subject's T1-weighted anatomy:
"""
from dipy.viz import fvtk
from dipy.viz.colormap import line_colors
from dipy.data import read_stanford_t1
from dipy.tracking.utils import move_streamlines
from numpy.linalg import inv
t1 = read_stanford_t1()
t1_data = t1.get_data()
t1_aff = t1.get_affine()
color = line_colors(streamlines)
"""
To speed up visualization, we will select a random sub-set of streamlines to
display. This is particularly important, if you track from seeds throughout the
entire white matter, generating many streamlines. In this case, for
demonstration purposes, we subselect 900 streamlines.
"""
from dipy.tracking.streamline import select_random_set_of_streamlines
plot_streamlines = select_random_set_of_streamlines(streamlines, 900)
streamlines_actor = fvtk.streamtube(
list(move_streamlines(plot_streamlines, inv(t1_aff))),
line_colors(streamlines), linewidth=0.1)
vol_actor = fvtk.slicer(t1_data)
vol_actor.display(40, None, None)
vol_actor2 = vol_actor.copy()
vol_actor2.display(None, None, 35)
ren = fvtk.ren()
fvtk.add(ren, streamlines_actor)
fvtk.add(ren, vol_actor)
fvtk.add(ren, vol_actor2)
fvtk.record(ren, n_frames=1, out_path='sfm_streamlines.png',
size=(800, 800))
"""
.. figure:: sfm_streamlines.png
:align: center
**Sparse Fascicle Model tracks**
Finally, we can save these streamlines to a 'trk' file, for use in other
software, or for further analysis.
"""
from dipy.io.trackvis import save_trk
save_trk("sfm_detr.trk", streamlines, affine, labels.shape)
"""
References
----------
.. [Rokem2014] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
(2014). Evaluating the accuracy of diffusion MRI models in white
matter. http://arxiv.org/abs/1411.0721
"""
| JohnGriffiths/dipy | doc/examples/sfm_tracking.py | Python | bsd-3-clause | 4,924 | [
"Brian"
] | 74363ae87c45b8a76f040a4c2627bf93090e8e7f5a3363e8ceaa18a446a67886 |
#!/usr/bin/env python
import vtk
def get_program_parameters():
import argparse
description = 'Read and display ExodusII data.'
epilogue = '''
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('filename', help='A required filename e.g mug.e.')
parser.add_argument('nodal_var', help='The nodal variable e,g, convected.')
args = parser.parse_args()
return args.filename, args.nodal_var
def main():
colors = vtk.vtkNamedColors()
# Input file and variable
filename, nodal_var = get_program_parameters()
# Read Exodus Data
reader = vtk.vtkExodusIIReader()
reader.SetFileName(filename)
reader.UpdateInformation()
reader.SetTimeStep(10)
reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL, 1) # enables all NODAL variables
reader.Update()
# print(reader) # uncomment this to show the file information
# Create Geometry
geometry = vtk.vtkCompositeDataGeometryFilter()
geometry.SetInputConnection(0, reader.GetOutputPort(0))
geometry.Update()
# Mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(geometry.GetOutputPort())
mapper.SelectColorArray(nodal_var)
mapper.SetScalarModeToUsePointFieldData()
mapper.InterpolateScalarsBeforeMappingOn()
# Actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# Renderer
renderer = vtk.vtkRenderer()
renderer.AddViewProp(actor)
renderer.SetBackground(colors.GetColor3d("DimGray"))
renderer.GetActiveCamera().SetPosition(9.0, 9.0, 7.0)
renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
renderer.GetActiveCamera().SetViewUp(0.2, -0.7, 0.7)
renderer.GetActiveCamera().SetDistance(14.5)
# Window and Interactor
window = vtk.vtkRenderWindow()
window.AddRenderer(renderer)
window.SetSize(600, 600)
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(window)
interactor.Initialize()
# Show the result
window.Render()
interactor.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/IO/ReadExodusData.py | Python | apache-2.0 | 2,101 | [
"VTK"
] | 7acc88e4f0b5cdbc2b51436e0f926f651c63042ed3cb1f927408fc0283219ef3 |
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class emp_test(SimpleVTKClassModuleBase):
"""This is the minimum you need to wrap a single VTK object. This
__doc__ string will be replaced by the __doc__ string of the encapsulated
VTK object, i.e. vtkStripper in this case.
With these few lines, we have error handling, progress reporting, module
help and also: the complete state of the underlying VTK object is also
pickled, i.e. when you save and restore a network, any changes you've
made to the vtkObject will be restored.
"""
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkStripper(), 'Stripping polydata.',
('vtkPolyData',), ('Stripped vtkPolyData',))
| chrisidefix/devide | testing/emp_test/emp_test.py | Python | bsd-3-clause | 889 | [
"VTK"
] | ddd335883a4cd0e3b91b7560113b5c48439ebcb0d5059b95da3413065ffad3ec |
"""
Copyright (C) 2011 N.D. Price Lab
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from AUREA.learner import dirac
import pyBabel.Extensions
class dataPackager:
"""
This class takes a set of data tables and generates data vectors and class vectors (and geneNet vectors if wanted) suitable for processing by the algorithms.
"""
def __init__(self, merge_cache='data'):
self.data_tables = []
self.classifications = []
self.gene_networks = {}
self.synonyms = None
self.class_vector = None
self.gene_data_vector = None
self.probe_data_vector = None
self.gene_net_vector = None
self.gene_net_size_vector = None
self.gene_index = None
self.probe_index = None
self.genes = None
self.probes = None
self.unclassified = None
self.unclassified_data_vector = None
self.merge_cache = merge_cache
self.gene_net_to_genes_map = None #maps a geneNetwork name to a list of gene names that are in the dataset
def setMergeCache(self, cache_dir):
"""
This is for setting the directory that stores cached merge-tables.
This value is passed to pyBabel.
"""
self.merge_cache = cache_dir
def getDataVector(self, type):
"""
This generates a tuple containing (a vector of floats, the size of the columns(number of genes/probes in this context.
It uses the dirac library to form the floatVector.
"""
numSamples = 0
class_vector = self.getClassVector()
for i in xrange(len(class_vector)):
numSamples += class_vector[i]
if self.probe_data_vector == None:
self.createDataVector()
if type == 'probe':
dv = self.probe_data_vector
elif type == 'gene':
dv = self.gene_data_vector
else:
raise Exception, "Invalid row key given"
if dv:
return (dv, len(dv)/numSamples)
else:
self.createDataVector()
if len(dv) % numSamples:
#check if something screwy went on with our data table
Exception, "The data vector is malformed: length: " + str(len(dv)) + " number of samples: " + numSamples
return (dv, len(dv)/numSamples)
def getGeneNetVector(self, minNumberGenes = 3):
"""
This returns a tuple containing (genelocations relative to data vector in intVector, numgenes in each set in intVector format).
"""
if self.gene_net_vector:
return (self.gene_net_vector,self.gene_net_size_vector)
else:
self.createGeneNetVector(minNumberGenes)
return (self.gene_net_vector, self.gene_net_size_vector)
def getGeneNetName(self, geneNetIndex):
"""
Given the index into the geneNetwork array, return the human readable name of the network.
"""
return self.gene_net_map[geneNetIndex]
def getGeneName(self, index, type='gene'):
"""
Given an index into either the probe or gene array(determined by the type parameter,) return the human readable name of the probe/gene.
"""
if type=='gene':
return self.genes[index]
else:
return self.getGeneNameFromProbeName(self.getProbeName(index)) + '-'+ self.getProbeName(index)
def getProbeName(self, probe_index, tbl_indx=0):
if isinstance(self.probes[probe_index], tuple):
#merged probe, so join names
return self.probes[probe_index][tbl_indx]
return self.probes[probe_index]
def getGeneNameFromProbeName(self, probe_name, dt_indx=0):
"""
Given a probe name
return the gene name
"""
#not storing it here so we have to go back to the data tables
dt1 = self.data_tables[dt_indx]
return dt1.genes[dt1.probe_index[probe_name]]
def getGeneNamesFromNetwork(self, network_name):
"""
This returns a list of the genes that were used by dirac (i.e. are available on the microarray) for classifying
this network.
Throws error if networks were never built
"""
if self.gene_net_to_genes_map is None:
raise Exception("Attempt to look up network genes without building networks")
return self.gene_net_to_genes_map[network_name]
def getClassVector(self):
"""
This returns an intVector with the number of genes in each class. This should map to columns in the dataVector, for example a vector containing (4,5,8) means that the first class is the first 4 columns, the second class is the 5th-8th column, etc.) Bear in mind I'm a programmer and count from zero.
"""
if self.class_vector:
return self.class_vector
else:
self.createClassVector()
return self.class_vector
def addDataTable(self, dataTable):
"""
This adds a set of tables from which we will create our data vector.
The tables should come from DataTable class.
Data table names are accessible through dataTable.dt_id
"""
self.data_tables.append( dataTable )
self.genes = None
def clearUnclassified(self):
"""
Clears out data relating to an
unclassified sample (sample to be classified)
"""
self.unclassified = None
self.unclassified_data_vector = None
def clearClassification(self):
"""
Clears the classification settings
"""
self.classifications = []
self.class_vector = None
self.clearData()
def clearClassSamples(self):
"""
Removes the samples from the classifications list
"""
for className, samples in self.classifications:
samples = []
def clearData(self):
"""
Clears the gene and probe data vectors
"""
self.probe_data_vector = None
self.gene_data_vector = None
def createClassification(self, className):
"""
This creates a new class we can add samples to that is identified by the string provided as className
"""
self.classifications.append((className, []))
def addToClassification(self, classToAddTo, data_table, sample):
"""
Adds a sample to a class.
classToAddTo (string) is a string that was added with createClassification.
data_table (string) is the dt_id from a DataTable object
sample (string) is the name of the sample to be added.
"""
for className, samples in self.classifications:
if classToAddTo == className:
samples.append( (data_table, sample) )
def getClassifications(self):
"""
Returns the set of classifications we are training on
organized as
[(className1, [ (table, sample_id), (table, sample_id) ...]),
(className2, [ (table, sample_id), (table, sample_id) ...])]
returns [] if classifications have not been set yet
"""
return self.classifications
def createUnclassifiedDataVector(self, type):
"""
This takes the unclassified data list and converts it to a vector
of doubles.
type is gene/probe
"""
self.unclassified_data_vector = None
self.unclassified_data_vector = dirac.DoubleVector()
prevTable = None
t_obj = self.getTable(self.unclassified[0])
for i, t in enumerate(self.data_tables):
if t==t_obj:
t_indx = i
sample = self.unclassified[1]
if type=='probe':
for p in self.probes:
if isinstance(p, tuple):
p = p[t_indx]
self.unclassified_data_vector.push_back( t_obj.getData(sample, probe_name=p) )
else:
for g in self.genes:
self.unclassified_data_vector.push_back( t_obj.getData(sample, gene_name=g) )
def getUnclassifiedDataVector(self, type):
self.createUnclassifiedDataVector(type)
return self.unclassified_data_vector
def setUnclassified(self, data_table, sample):
"""
Create unclassified tuple containing table and sample name
"""
self.unclassified = (data_table, sample)
def addGeneNetwork(self, geneNetwork):
"""
Adds a gene network object.
"""
for name, genes in geneNetwork.iteritems():
if name not in self.gene_networks:
self.gene_networks[name] = genes
else:
for gene in genes:
self.gene_networks[name].append(gene)
def createDataVector(self):
"""
Builds the probe and gene data vectors
"""
if len(self.data_tables) == 0:
raise Exception("Attempt to create a data vector without a data table")
if self.genes == None:
self.mergeTables()
self.createProbeDataVector()
self.createGeneDataVector()
def buildDataVector(self, type):
"""
This builds the data vector from the given information
"""
if type == 'gene':
dv = self.gene_data_vector = dirac.DoubleVector()
row_list = self.genes
parameter_index = 1
else:#probe
dv = self.probe_data_vector = dirac.DoubleVector()
row_list = self.probes
parameter_index = 2
prevTable = None
#for each class
for classification, samples in self.classifications:
#for each sample in class
for table, sample in samples:
#for each gene in sample in class
if prevTable != table:
t_obj = self.getTable(table)
for i, t in enumerate(self.data_tables):
if t==t_obj:
t_indx = i
prevTable = table
#base parameter template
param = [sample, None, None]
for name in row_list:
if isinstance(name,tuple):#merged data, find probe name for this table
param[parameter_index] = name[t_indx]
else:
param[parameter_index] = name
T = tuple(param)
dv.push_back( t_obj.getData(*T ) )#passing in parameter tuple
def createProbeDataVector(self):
"""
creates self.probe_data_vector
"""
self.buildDataVector('probe')
def createGeneDataVector(self):
"""
Creates self.gene_data_vector
"""
self.buildDataVector('gene')
def createClassVector(self):
"""
Goes through the classification vector and puts the classSize in the order they
will be presented in the data vector
"""
self.class_vector = dirac.IntVector()
for className, samples in self.classifications:
self.class_vector.push_back( len(samples) )
def createGeneNetVector(self, minNetSize = 10):
"""
This builds the geneNet Vector from the provided information. It uses gene synonyms if available
"""
self.gene_net_vector = dirac.IntVector()
self.gene_net_size_vector = dirac.IntVector()
self.gene_net_map = []#a list of the geneNetNames in the order they are sent to Dirac
self.gene_net_data_matrix_start = []
dmstart_counter = 0
#creating a map between gene network names and the gene names they contain
gene_net_to_genes_map = {}
for net_name, network in self.gene_networks.iteritems():#4each net
gene_net_to_genes_map[net_name] = []
network_size_counter = 0#since not all genes in network are in data
for gene in network:#4each gene in net
if gene in self.gene_index:#if the gene is in our data
gene_net_to_genes_map[net_name].append(gene)
row_number = self.gene_index[gene]
self.gene_net_vector.push_back(row_number)
network_size_counter += 1
elif self.synonyms:
#look for synonym
ourSyn = self.synonyms.getSynonyms(gene)
if ourSyn:
for syngene in ourSyn:#go through the synonyms for the gene
if syngene in self.gene_index:
#we found a match in the index so add it
gene_net_to_genes_map[net_name].append(gene)
row_number = self.gene_index[syngene]
self.gene_net_vector.push_back(row_number)
network_size_counter += 1
break
if network_size_counter > minNetSize:
self.gene_net_size_vector.push_back( network_size_counter )
self.gene_net_map.append(net_name)
self.gene_net_data_matrix_start.append(dmstart_counter)
dmstart_counter += (network_size_counter*(network_size_counter -1))/2
else:
#the network is too small so remove the genes we added
while network_size_counter > 0:
self.gene_net_vector.pop_back()
network_size_counter -= 1
self.gene_net_to_genes_map = gene_net_to_genes_map
self.gene_net_data_matrix_start.append(dmstart_counter)
def mergeTables(self):
"""
This function merges the gene lookup tables
"""
pB = pyBabel.Extensions.ext(cache_dir=self.merge_cache)
try:
idLists = []
for table in self.data_tables:
idLists.append([probe for probe in table.probes if probe not in ['MAX', 'MIN', 'AVE']])
self.probes = pB.mergeProbes(idLists=idLists)
except pyBabel.Extensions.pyBabelError, E:#unable to merge on probes
print E.value
self.probes = []
self.probe_index = {}
for i, probeset in enumerate(self.probes):
self.probe_index[probeset] = i
geneset = set(self.data_tables[0].genes)
if len(self.data_tables) > 1:
for table in self.data_tables[1:]:
geneset.intersection_update(table.genes)
self.genes = [x for x in geneset]
self.gene_index = {}
for i, gene in enumerate(self.genes):
self.gene_index[gene] = i
def getDataCount(self):
"""
Returns a tuple with
(num genes in merge, numprobes in merge)
"""
numgenes = len(self.genes)
numprobes = len(self.probes)
return (numgenes, numprobes)
def getGeneNetCount(self):
if self.gene_networks is not None and len(self.gene_networks) > 0:
self.createGeneNetVector(1)
numnetworks = len(self.gene_net_size_vector)
if numnetworks > 0:
min = 10000
max = 0
sum = 0
for geneset in self.gene_net_size_vector:
if geneset > max:
max = geneset
if geneset < min:
min = geneset
sum += geneset
ave = float(sum)/numnetworks
else:
min = None
max = None
ave = None
return (numnetworks, ave, max, min)
return None
def getGeneNetVectorRange(self, net_number):
"""
Returns the start and number of elements in the
"""
s = self.gene_net_data_matrix_start[net_number]
e = self.gene_net_data_matrix_start[net_number+1]
return (s,e)
def getGeneNetDataMatrixStart(self):
"""
Returns a list that details where in the data matrices a given
gene network starts and stops (based on size nchoose2)
"""
return self.gene_net_data_matrix_start
def addSynonyms(self, file):
"""
Adds a table of synonyms to allow cross referencing between geneNets and datasets.
"""
import AUREA.parser.SynonymParser as sp
self.synonyms = sp.SynonymParser()
self.synonyms.importgene_info(file)
def getTables(self):
"""
Returns list of data_table objects
"""
return self.data_tables
def getTable(self, table_id):
"""
Returns table object that has that table_id
"""
for table in self.data_tables:
if table.dt_id == table_id:
return table
def writeToCSV(self, filename, key='gene'):
"""
Writes the current data from your chosen classes to csv format.
With given key (gene/probe)
Note: you must provide a valid path and filename to this function
Thats on you
"""
csv_file = open(filename, 'w')
csv_file.write(self._getCSVHeader(key) + '\n')
dv, numGenes = self.getDataVector(type=key)
numSamples = len(dv) /numGenes
for i in range(numGenes):
str_buff = "'"
if key == 'probe':
tmp = self.getGeneName(index=i, type=key).split('-')
str_buff += tmp[-1] + "','" + " ".join(tmp[:-1])
else:
str_buff += self.getGeneName(index = i, type=key)
str_buff += "'"
for j in range(numSamples):
dv_index = j*numGenes + i
str_buff += ","
str_buff += str(dv[dv_index])
csv_file.write( str_buff + '\n')
csv_file.close()
def _getCSVHeader(self, key='gene'):
"""
Creates the header row for writeToCSV
"""
classifications = self.getClassifications()
if len(classifications) == 0:
raise Exception, "You must add data to classes to print a CSV"
c1name, c1samples = classifications[0]
c2name, c2samples = classifications[1]
numColumns = len(c1samples)
header = "'"
if key == 'probe':
header += "probe','gene"
else:
header += key
for s1heading in [table + "." + sample for table, sample in c1samples]:
header += "','" + s1heading
for s2heading in [table + "." + sample for table, sample in c2samples]:
header += "','" + s2heading
return header + "'"
| JohnCEarls/AUREA | src/AUREA/packager/DataPackager.py | Python | agpl-3.0 | 19,414 | [
"DIRAC"
] | fc8181e4b9dd9dd2a3aa817fb0ca4cf3190cbf41d5b2776ebce7f13993984b45 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import math
from collections import namedtuple, defaultdict
import six
import ruamel.yaml as yaml
import os
import json
from copy import deepcopy
from pymatgen.analysis.molecule_structure_comparator import CovalentRadius
from pymatgen.core.sites import PeriodicSite
"""
This module provides classes to perform analyses of
the local environments (e.g., finding near neighbors)
of single sites in molecules and structures.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Sai Jayaraman,"+\
" Nils E. R. Zimmermann, Bharat Medasani"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Nils E. R. Zimmermann"
__email__ = "nils.e.r.zimmermann@gmail.com"
__status__ = "Production"
__date__ = "August 17, 2017"
from math import pow, pi, asin, atan, sqrt, exp, sin, cos, acos, fabs
import numpy as np
from bisect import bisect_left
from scipy.spatial import Voronoi
from pymatgen import Element
from pymatgen.core.structure import Structure
from pymatgen.analysis.bond_valence import BV_PARAMS, BVAnalyzer
default_op_params = {}
with open(os.path.join(os.path.dirname(
__file__), 'op_params.yaml'), "rt") as f:
default_op_params = yaml.safe_load(f)
f.close()
cn_opt_params = {}
with open(os.path.join(os.path.dirname(
__file__), 'cn_opt_params.yaml'), 'r') as f:
cn_opt_params = yaml.safe_load(f)
f.close()
file_dir = os.path.dirname(__file__)
rad_file = os.path.join(file_dir, 'ionic_radii.json')
with open(rad_file, 'r') as fp:
_ion_radii = json.load(fp)
class ValenceIonicRadiusEvaluator(object):
"""
Computes site valences and ionic radii for a structure using bond valence
analyzer
Args:
structure: pymatgen.core.structure.Structure
"""
def __init__(self, structure):
self._structure = structure.copy()
self._valences = self._get_valences()
self._ionic_radii = self._get_ionic_radii()
@property
def radii(self):
"""
List of ionic radii of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
radii_dict = dict(zip(el, self._ionic_radii))
#print radii_dict
return radii_dict
@property
def valences(self):
"""
List of oxidation states of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
valence_dict = dict(zip(el, self._valences))
return valence_dict
@property
def structure(self):
"""
Returns oxidation state decorated structure.
"""
return self._structure.copy()
def _get_ionic_radii(self):
"""
Computes ionic radii of elements for all sites in the structure.
If valence is zero, atomic radius is used.
"""
radii = []
vnn = VoronoiNN()
def nearest_key(sorted_vals, key):
i = bisect_left(sorted_vals, key)
if i == len(sorted_vals):
return sorted_vals[-1]
if i == 0:
return sorted_vals[0]
before = sorted_vals[i-1]
after = sorted_vals[i]
if after-key < key-before:
return after
else:
return before
for i in range(len(self._structure.sites)):
site = self._structure.sites[i]
if isinstance(site.specie,Element):
radius = site.specie.atomic_radius
# Handle elements with no atomic_radius
# by using calculated values instead.
if radius is None:
radius = site.specie.atomic_radius_calculated
if radius is None:
raise ValueError(
"cannot assign radius to element {}".format(
site.specie))
radii.append(radius)
continue
el = site.specie.symbol
oxi_state = int(round(site.specie.oxi_state))
coord_no = int(round(vnn.get_cn(self._structure, i)))
try:
tab_oxi_states = sorted(map(int, _ion_radii[el].keys()))
oxi_state = nearest_key(tab_oxi_states, oxi_state)
radius = _ion_radii[el][str(oxi_state)][str(coord_no)]
except KeyError:
if vnn.get_cn(self._structure, i)-coord_no > 0:
new_coord_no = coord_no + 1
else:
new_coord_no = coord_no - 1
try:
radius = _ion_radii[el][str(oxi_state)][str(new_coord_no)]
coord_no = new_coord_no
except:
tab_coords = sorted(map(int, _ion_radii[el][str(oxi_state)].keys()))
new_coord_no = nearest_key(tab_coords, coord_no)
i = 0
for val in tab_coords:
if val > coord_no:
break
i = i + 1
if i == len(tab_coords):
key = str(tab_coords[-1])
radius = _ion_radii[el][str(oxi_state)][key]
elif i == 0:
key = str(tab_coords[0])
radius = _ion_radii[el][str(oxi_state)][key]
else:
key = str(tab_coords[i-1])
radius1 = _ion_radii[el][str(oxi_state)][key]
key = str(tab_coords[i])
radius2 = _ion_radii[el][str(oxi_state)][key]
radius = (radius1+radius2)/2
#implement complex checks later
radii.append(radius)
return radii
def _get_valences(self):
"""
Computes ionic valences of elements for all sites in the structure.
"""
try:
bv = BVAnalyzer()
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except:
try:
bv = BVAnalyzer(symm_tol=0.0)
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except:
valences = []
for site in self._structure.sites:
if len(site.specie.common_oxidation_states) > 0:
valences.append(site.specie.common_oxidation_states[0])
# Handle noble gas species
# which have no entries in common_oxidation_states.
else:
valences.append(0)
if sum(valences):
valences = [0]*self._structure.num_sites
else:
self._structure.add_oxidation_state_by_site(valences)
#raise
#el = [site.specie.symbol for site in self._structure.sites]
#el = [site.species_string for site in self._structure.sites]
#el = [site.specie for site in self._structure.sites]
#valence_dict = dict(zip(el, valences))
#print valence_dict
return valences
class NearNeighbors(object):
"""
Base class to determine near neighbors that typically include nearest
neighbors and others that are within some tolerable distance.
"""
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __hash__(self):
return len(self.__dict__.items())
def get_cn(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (integer or float): coordination number.
"""
siw = self.get_nn_info(structure, n)
return sum([e['weight'] for e in siw]) if use_weights else len(siw)
def get_cn_dict(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of each element bonded to site with index n in structure
Args:
structure (Structure): input structure
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (dict): dictionary of CN of each element bonded to site
"""
siw = self.get_nn_info(structure, n)
cn_dict = {}
for i in siw:
site_element = i['site'].species_string
if site_element not in cn_dict:
if use_weights:
cn_dict[site_element] = i['weight']
else:
cn_dict[site_element] = 1
else:
if use_weights:
cn_dict[site_element] += i['weight']
else:
cn_dict[site_element] += 1
return cn_dict
def get_nn(self, structure, n):
"""
Get near neighbors of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site in structure for which to determine
neighbors.
Returns:
sites (list of Site objects): near neighbors.
"""
return [e['site'] for e in self.get_nn_info(structure, n)]
def get_weights_of_nn_sites(self, structure, n):
"""
Get weight associated with each near neighbor of site with
index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the weights.
Returns:
weights (list of floats): near-neighbor weights.
"""
return [e['weight'] for e in self.get_nn_info(structure, n)]
def get_nn_images(self, structure, n):
"""
Get image location of all near neighbors of site with index n in
structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the image
location of near neighbors.
Returns:
images (list of 3D integer array): image locations of
near neighbors.
"""
return [e['image'] for e in self.get_nn_info(structure, n)]
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
information.
Returns:
siw (list of dicts): each dictionary provides information
about a single near neighbor, where key 'site' gives
access to the corresponding Site object, 'image' gives
the image location, and 'weight' provides the weight
that a given near-neighbor site contributes
to the coordination number (1 or smaller), 'site_index'
gives index of the corresponding site in
the original structure.
"""
raise NotImplementedError("get_nn_info(structure, n)"
" is not defined!")
def get_all_nn_info(self, structure):
"""Get a listing of all neighbors for all sites in a structure
Args:
structure (Structure): Input structure
Return:
List of NN site information for each site in the structure. Each
entry has the same format as `get_nn_info`
"""
return [self.get_nn_info(structure, n) for n in range(len(structure))]
def get_nn_shell_info(self, structure, site_idx, shell):
"""Get a certain nearest neighbor shell for a certain site.
Determines all non-backtracking paths through the neighbor network
computed by `get_nn_info`. The weight is determined by multiplying
the weight of the neighbor at each hop through the network. For
example, a 2nd-nearest-neighbor that has a weight of 1 from its
1st-nearest-neighbor and weight 0.5 from the original site will
be assigned a weight of 0.5.
As this calculation may involve computing the nearest neighbors of
atoms multiple times, the calculation starts by computing all of the
neighbor info and then calling `_get_nn_shell_info`. If you are likely
to call this method for more than one site, consider calling `get_all_nn`
first and then calling this protected method yourself.
Args:
structure (Structure): Input structure
site_idx (int): index of site for which to determine neighbor
information.
shell (int): Which neighbor shell to retrieve (1 == 1st NN shell)
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`.
"""
all_nn_info = self.get_all_nn_info(structure)
sites = self._get_nn_shell_info(structure, all_nn_info, site_idx, shell)
# Update the site positions
# Did not do this during NN options because that can be slower
output = []
for info in sites:
orig_site = structure[info['site_index']]
info['site'] = PeriodicSite(orig_site.species_and_occu,
np.add(orig_site._fcoords,
info['image']),
structure.lattice,
properties=orig_site.properties)
output.append(info)
return output
def _get_nn_shell_info(self, structure, all_nn_info, site_idx, shell,
_previous_steps=frozenset(), _cur_image=(0,0,0)):
"""Private method for computing the neighbor shell information
Args:
structure (Structure) - Structure being assessed
all_nn_info ([[dict]]) - Results from `get_all_nn_info`
site_idx (int) - index of site for which to determine neighbor
information.
shell (int) - Which neighbor shell to retrieve (1 == 1st NN shell)
_previous_step ({(site_idx, image}) - Internal use only: Set of
sites that have already been traversed.
_cur_image (tuple) - Internal use only Image coordinates of current atom
Returns:
list of dictionaries. Each entry in the list is information about
a certain neighbor in the structure, in the same format as
`get_nn_info`. Does not update the site positions
"""
if shell <= 0:
raise ValueError('Shell must be positive')
# Append this site to the list of previously-visited sites
_previous_steps = _previous_steps.union({(site_idx, _cur_image)})
# Get all the neighbors of this site
possible_steps = list(all_nn_info[site_idx])
for i, step in enumerate(possible_steps):
# Update the image information
# Note: We do not update the site position yet, as making a
# PeriodicSite for each intermediate step is too costly
step = dict(step)
step['image'] = tuple(np.add(step['image'], _cur_image).tolist())
possible_steps[i] = step
# Get only the non-backtracking steps
allowed_steps = [x for x in possible_steps if
(x['site_index'], x['image']) not in _previous_steps]
# If we are the last step (i.e., shell == 1), done!
if shell == 1:
# No further work needed, just package these results
return allowed_steps
else:
# If not, Get the N-1 NNs of these allowed steps
terminal_neighbors = [self._get_nn_shell_info(structure,
all_nn_info,
x['site_index'],
shell - 1,
_previous_steps,
x['image'])
for x in allowed_steps]
# Each allowed step results in many terminal neighbors
# And, different first steps might results in the same neighbor
# Now, we condense those neighbors into a single entry per neighbor
all_sites = dict()
for first_site, term_sites in zip(allowed_steps, terminal_neighbors):
for term_site in term_sites:
key = (term_site['site_index'], tuple(term_site['image']))
# The weight for this site is equal to the weight of the
# first step multiplied by the weight of the terminal neighbor
term_site['weight'] *= first_site['weight']
# Check if this site is already known
value = all_sites.get(key)
if value is not None:
# If so, add to its weight
value['weight'] += term_site['weight']
else:
# If not, prepare to add it
value = term_site
all_sites[key] = value
return list(all_sites.values())
@staticmethod
def _get_image(frac_coords):
"""Private convenience method for get_nn_info,
gives lattice image from provided PeriodicSite."""
images = [0,0,0]
for j, f in enumerate(frac_coords):
if f >= 0:
images[j] = int(f)
else:
images[j] = int(f - 1)
if f % 1 == 0:
images[j] += 1
return images
@staticmethod
def _get_original_site(structure, site):
"""Private convenience method for get_nn_info,
gives original site index from ProvidedPeriodicSite."""
for i, s in enumerate(structure):
if site.is_periodic_image(s):
return i
raise Exception('Site not found!')
def get_bonded_structure(self, structure, decorate=False):
"""
Obtain a StructureGraph object using this NearNeighbor
class. Requires the optional dependency networkx
(pip install networkx).
Args:
structure: Structure object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns: a pymatgen.analysis.graphs.BondedStructure object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import StructureGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n)
for n in range(len(structure))]
structure.add_site_property('order_parameters', order_parameters)
sg = StructureGraph.with_local_env_strategy(structure, self)
return sg
def get_local_order_parameters(self, structure, n):
"""
Calculate those local structure order parameters for
the given site whose ideal CN corresponds to the
underlying motif (e.g., CN=4, then calculate the
square planar, tetrahedral, see-saw-like,
rectangular see-saw-like order paramters).
Args:
structure: Structure object
n (int): site index.
Returns (Dict[str, float]):
A dict of order parameters (values) and the
underlying motif type (keys; for example, tetrahedral).
"""
# code from @nisse3000, moved here from graphs to avoid circular
# import, also makes sense to have this as a general NN method
cn = self.get_cn(structure, n)
if cn in [int(k_cn) for k_cn in cn_opt_params.keys()]:
names = [k for k in cn_opt_params[cn].keys()]
types = []
params = []
for name in names:
types.append(cn_opt_params[cn][name][0])
tmp = cn_opt_params[cn][name][1] \
if len(cn_opt_params[cn][name]) > 1 else None
params.append(tmp)
lostops = LocalStructOrderParams(types, parameters=params)
sites = [structure[n]] + self.get_nn(structure, n)
lostop_vals = lostops.get_order_parameters(
sites, 0, indices_neighs=[i for i in range(1, cn+1)])
d = {}
for i, lostop in enumerate(lostop_vals):
d[names[i]] = lostop
return d
else:
return None
class VoronoiNN(NearNeighbors):
"""
Uses a Voronoi algorithm to determine near neighbors for each site in a
structure.
Args:
tol (float): tolerance parameter for near-neighbor finding
(default: 0).
targets (Element or list of Elements): target element(s).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 10.0.
allow_pathological (bool): whether to allow infinite vertices in
determination of Voronoi coordination.
weight (string) - Statistic used to weigh neighbors (see the statistics
available in get_voronoi_polyhedra)
extra_nn_info (bool) - Add all polyhedron info to `get_nn_info`
"""
def __init__(self, tol=0, targets=None, cutoff=10.0,
allow_pathological=False, weight='solid_angle',
extra_nn_info=True):
super(VoronoiNN, self).__init__()
self.tol = tol
self.cutoff = cutoff
self.allow_pathological = allow_pathological
self.targets = targets
self.weight = weight
self.extra_nn_info = extra_nn_info
def get_voronoi_polyhedra(self, structure, n):
"""
Gives a weighted polyhedra around a site.
See ref: A Proposed Rigorous Definition of Coordination Number,
M. O'Keeffe, Acta Cryst. (1979). A35, 772-775
Args:
structure (Structure): structure for which to evaluate the
coordination environment.
n (integer): site index.
Returns:
A dict of sites sharing a common Voronoi facet with the site
n mapped to a directory containing statistics about the facet:
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
"""
# Assemble the list of neighbors used in the tessellation
# Gets all atoms within a certain radius
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
center = structure[n]
neighbors = structure.get_sites_in_sphere(
center.coords, self.cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])]
# Run the Voronoi tessellation
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(qvoronoi_input) # can give a seg fault if cutoff is too small
# Extract data about the site in question
return self._extract_cell_info(structure, 0, neighbors, targets, voro)
def _get_elements(self, site):
try:
if isinstance(site.specie, Element):
return [site.specie]
return [Element(site.specie)]
except:
return site.species_and_occu.elements
def _is_in_targets(self, site, targets):
elems = self._get_elements(site)
for elem in elems:
if elem not in targets:
return False
return True
def _extract_cell_info(self, structure, site_idx, sites, targets, voro):
"""Get the information about a certain atom from the results of a tessellation
Args:
structure (Structure) - Structure being assessed
site_idx (int) - Index of the atom in question
sites ([Site]) - List of all sites in the tessellation
targets ([Element]) - Target elements
voro - Output of qvoronoi
Returns:
A dict of sites sharing a common Voronoi facet. Key is facet id
(not useful) and values are dictionaries containing statistics
about the facet:
- site: Pymatgen site
- solid_angle - Solid angle subtended by face
- angle_normalized - Solid angle normalized such that the
faces with the largest
- area - Area of the facet
- face_dist - Distance between site n and the facet
- volume - Volume of Voronoi cell for this face
- n_verts - Number of vertices on the facet
"""
# Get the coordinates of every vertex
all_vertices = voro.vertices
# Get the coordinates of the central site
center_coords = sites[site_idx].coords
# Iterate through all the faces in the tessellation
results = {}
for nn, vind in voro.ridge_dict.items():
# Get only those that include the cite in question
if site_idx in nn:
other_site = nn[0] if nn[1] == site_idx else nn[1]
if -1 in vind:
# -1 indices correspond to the Voronoi cell
# missing a face
if self.allow_pathological:
continue
else:
raise RuntimeError("This structure is pathological,"
" infinite vertex in the voronoi "
"construction")
# Get the solid angle of the face
facets = [all_vertices[i] for i in vind]
angle = solid_angle(center_coords, facets)
# Compute the volume of associated with this face
volume = 0
# qvoronoi returns vertices in CCW order, so I can break
# the face up in to segments (0,1,2), (0,2,3), ... to compute
# its area where each number is a vertex size
for j, k in zip(vind[1:], vind[2:]):
volume += vol_tetra(center_coords,
all_vertices[vind[0]],
all_vertices[j],
all_vertices[k])
# Compute the distance of the site to the face
face_dist = np.linalg.norm(
center_coords - sites[other_site].coords) / 2
# Compute the area of the face (knowing V=Ad/3)
face_area = 3 * volume / face_dist
# Store by face index
results[other_site] = {
'site': sites[other_site],
'solid_angle': angle,
'volume': volume,
'face_dist': face_dist,
'area': face_area,
'n_verts': len(vind)
}
# Get only target elements
resultweighted = {}
for nn_index, nstats in results.items():
# Check if this is a target site
nn = nstats['site']
if nn.is_ordered:
if nn.specie in targets:
resultweighted[nn_index] = nstats
else: # is nn site is disordered
for disordered_sp in nn.species_and_occu.keys():
if disordered_sp in targets:
resultweighted[nn_index] = nstats
return resultweighted
def get_nn_info(self, structure, n):
""""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure
using Voronoi decomposition.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
siw = []
# Run the tessellation
nns = self.get_voronoi_polyhedra(structure, n)
# Determine the maximum weight
max_weight = max(nn[self.weight] for nn in nns.values())
for nstats in nns.values():
site = nstats['site']
if nstats[self.weight] > self.tol * max_weight \
and self._is_in_targets(site, targets):
nn_info = {'site': site,
'image': self._get_image(site.frac_coords),
'weight': nstats[self.weight] / max_weight,
'site_index': self._get_original_site(
structure, site)}
if self.extra_nn_info:
# Add all the information about the site
poly_info = nstats
del poly_info['site']
nn_info['poly_info'] = poly_info
siw.append(nn_info)
return siw
class VoronoiNN_modified(VoronoiNN):
"""
Modified VoronoiNN that only considers neighbors
with at least 50% weight of max(weight).
"""
def get_nn_info(self, structure, n):
result = super(VoronoiNN_modified, self).get_nn_info(structure, n)
max_weight = max(i['weight'] for i in result)
return [i for i in result if i['weight'] > 0.5 * max_weight]
class JMolNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using an emulation
of JMol's default autoBond() algorithm. This version of the algorithm
does not take into account any information regarding known charge
states.
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-3).
el_radius_updates: (dict) symbol->float to override default atomic
radii table values
"""
def __init__(self, tol=1E-3, el_radius_updates=None):
self.tol = tol
# Load elemental radii table
bonds_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"bonds_jmol_ob.yaml")
with open(bonds_file, 'r') as f:
self.el_radius = yaml.safe_load(f)
# Update any user preference elemental radii
if el_radius_updates:
self.el_radius.update(el_radius_updates)
def get_max_bond_distance(self, el1_sym, el2_sym, constant=0.56):
"""
Use JMol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
constant: (float) factor to tune model
Returns: (float) max bond length
"""
return sqrt(
(self.el_radius[el1_sym] + self.el_radius[el2_sym] + constant) ** 2)
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the bond identification
algorithm underlying JMol.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
# Determine relevant bond lengths based on atomic radii table
bonds = {}
for el in structure.composition.elements:
bonds[site.specie, el] = self.get_max_bond_distance(
site.specie.symbol, el.symbol)
# Search for neighbors up to max bond length + tolerance
max_rad = max(bonds.values()) + self.tol
min_rad = min(bonds.values())
siw = []
for neighb, dist in structure.get_neighbors(site, max_rad):
# Confirm neighbor based on bond length specific to atom pair
if dist <= bonds[(site.specie, neighb.specie)] + self.tol:
weight = min_rad / dist
siw.append({'site': neighb,
'image': self._get_image(neighb.frac_coords),
'weight': weight,
'site_index': self._get_original_site(structure, neighb)})
return siw
class MinimumDistanceNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
nearest neighbor(s) at distance, d_min, plus all neighbors
within a distance (1 + delta) * d_min, where delta is a
(relative) distance tolerance parameter.
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
def __init__(self, tol=0.1, cutoff=10.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest neighbor
distance-based method.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
min_dist = min([dist for neigh, dist in neighs_dists])
siw = []
for s, dist in neighs_dists:
if dist < (1.0 + self.tol) * min_dist:
w = min_dist / dist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
class MinimumOKeeffeNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
neighbor(s) at closest relative distance, d_min_OKeffee, plus some
relative tolerance, where bond valence parameters from O'Keeffe's
bond valence method (J. Am. Chem. Soc. 1991, 3226-3229) are used
to calculate relative distances.
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
def __init__(self, tol=0.1, cutoff=10.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with O'Keeffe parameters.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
try:
eln = site.specie.element
except:
eln = site.species_string
reldists_neighs = []
for neigh, dist in neighs_dists:
try:
el2 = neigh.specie.element
except:
el2 = neigh.species_string
reldists_neighs.append([dist / get_okeeffe_distance_prediction(
eln, el2), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
class MinimumVIRENN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
neighbor(s) at closest relative distance, d_min_VIRE, plus some
relative tolerance, where atom radii from the
ValenceIonicRadiusEvaluator (VIRE) are used
to calculate relative distances.
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
def __init__(self, tol=0.1, cutoff=10.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with VIRE atomic/ionic radii.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
vire = ValenceIonicRadiusEvaluator(structure)
site = vire.structure[n]
neighs_dists = vire.structure.get_neighbors(site, self.cutoff)
rn = vire.radii[vire.structure[n].species_string]
reldists_neighs = []
for neigh, dist in neighs_dists:
reldists_neighs.append([dist / (
vire.radii[neigh.species_string] + rn), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(vire.structure, s)})
return siw
def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
# Compute the displacement from the center
r = [np.subtract(c, center) for c in coords]
# Compute the magnitude of each vector
r_norm = [np.linalg.norm(i) for i in r]
# Compute the solid angle for each tetrahedron that makes up the facet
# Following: https://en.wikipedia.org/wiki/Solid_angle#Tetrahedron
angle = 0
for i in range(1, len(r)-1):
j = i + 1
tp = np.abs(np.dot(r[0], np.cross(r[i], r[j])))
de = r_norm[0] * r_norm[i] * r_norm[j] + \
r_norm[j] * np.dot(r[0], r[i]) + \
r_norm[i] * np.dot(r[0], r[j]) + \
r_norm[0] * np.dot(r[i], r[j])
if de == 0:
my_angle = 0.5 * pi if tp > 0 else -0.5 * pi
else:
my_angle = np.arctan(tp / de)
angle += (my_angle if my_angle > 0 else my_angle + np.pi) * 2
return angle
def vol_tetra(vt1, vt2, vt3, vt4):
"""
Calculate the volume of a tetrahedron, given the four vertices of vt1,
vt2, vt3 and vt4.
Args:
vt1 (array-like): coordinates of vertex 1.
vt2 (array-like): coordinates of vertex 2.
vt3 (array-like): coordinates of vertex 3.
vt4 (array-like): coordinates of vertex 4.
Returns:
(float): volume of the tetrahedron.
"""
vol_tetra = np.abs(np.dot((vt1 - vt4),
np.cross((vt2 - vt4), (vt3 - vt4))))/6
return vol_tetra
def get_okeeffe_params(el_symbol):
"""
Returns the elemental parameters related to atom size and
electronegativity which are used for estimating bond-valence
parameters (bond length) of pairs of atoms on the basis of data
provided in 'Atoms Sizes and Bond Lengths in Molecules and Crystals'
(O'Keeffe & Brese, 1991).
Args:
el_symbol (str): element symbol.
Returns:
(dict): atom-size ('r') and electronegativity-related ('c')
parameter.
"""
el = Element(el_symbol)
if el not in list(BV_PARAMS.keys()):
raise RuntimeError("Could not find O'Keeffe parameters for element"
" \"{}\" in \"BV_PARAMS\"dictonary"
" provided by pymatgen".format(el_symbol))
return BV_PARAMS[el]
def get_okeeffe_distance_prediction(el1, el2):
"""
Returns an estimate of the bond valence parameter (bond length) using
the derived parameters from 'Atoms Sizes and Bond Lengths in Molecules
and Crystals' (O'Keeffe & Brese, 1991). The estimate is based on two
experimental parameters: r and c. The value for r is based off radius,
while c is (usually) the Allred-Rochow electronegativity. Values used
are *not* generated from pymatgen, and are found in
'okeeffe_params.json'.
Args:
el1, el2 (Element): two Element objects
Returns:
a float value of the predicted bond length
"""
el1_okeeffe_params = get_okeeffe_params(el1)
el2_okeeffe_params = get_okeeffe_params(el2)
r1 = el1_okeeffe_params['r']
r2 = el2_okeeffe_params['r']
c1 = el1_okeeffe_params['c']
c2 = el2_okeeffe_params['c']
return r1 + r2 - r1 * r2 * pow(
sqrt(c1) - sqrt(c2), 2) / (c1 * r1 + c2 * r2)
def get_neighbors_of_site_with_index(struct, n, approach="min_dist", delta=0.1, \
cutoff=10.0):
"""
Returns the neighbors of a given site using a specific neighbor-finding
method.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
Returns: neighbor sites.
"""
if approach == "min_dist":
return MinimumDistanceNN(tol=delta, cutoff=cutoff).get_nn(
struct, n)
elif approach == "voronoi":
return VoronoiNN(tol=delta, cutoff=cutoff).get_nn(
struct, n)
elif approach == "min_OKeeffe":
return MinimumOKeeffeNN(tol=delta, cutoff=cutoff).get_nn(
struct, n)
elif approach == "min_VIRE":
return MinimumVIRENN(tol=delta, cutoff=cutoff).get_nn(
struct, n)
else:
raise RuntimeError("unsupported neighbor-finding method ({}).".format(
approach))
def site_is_of_motif_type(struct, n, approach="min_dist", delta=0.1, \
cutoff=10.0, thresh=None):
"""
Returns the motif type of the site with index n in structure struct;
currently featuring "tetrahedral", "octahedral", "bcc", and "cp"
(close-packed: fcc and hcp) as well as "square pyramidal" and
"trigonal bipyramidal". If the site is not recognized,
"unrecognized" is returned. If a site should be assigned to two
different motifs, "multiple assignments" is returned.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
thresh (dict): thresholds for motif criteria (currently, required
keys and their default values are "qtet": 0.5,
"qoct": 0.5, "qbcc": 0.5, "q6": 0.4).
Returns: motif type (str).
"""
if thresh is None:
thresh = {
"qtet": 0.5, "qoct": 0.5, "qbcc": 0.5, "q6": 0.4,
"qtribipyr": 0.8, "qsqpyr": 0.8}
ops = LocalStructOrderParams([
"cn", "tet", "oct", "bcc", "q6", "sq_pyr", "tri_bipyr"])
neighs_cent = get_neighbors_of_site_with_index(
struct, n, approach=approach, delta=delta, cutoff=cutoff)
neighs_cent.append(struct.sites[n])
opvals = ops.get_order_parameters(
neighs_cent, len(neighs_cent)-1, indices_neighs=[
i for i in range(len(neighs_cent)-1)])
cn = int(opvals[0] + 0.5)
motif_type = "unrecognized"
nmotif = 0
if cn == 4 and opvals[1] > thresh["qtet"]:
motif_type = "tetrahedral"
nmotif += 1
if cn == 5 and opvals[5] > thresh["qsqpyr"]:
motif_type = "square pyramidal"
nmotif += 1
if cn == 5 and opvals[6] > thresh["qtribipyr"]:
motif_type = "trigonal bipyramidal"
nmotif += 1
if cn == 6 and opvals[2] > thresh["qoct"]:
motif_type = "octahedral"
nmotif += 1
if cn == 8 and (opvals[3] > thresh["qbcc"] and opvals[1] < thresh["qtet"]):
motif_type = "bcc"
nmotif += 1
if cn == 12 and (opvals[4] > thresh["q6"] and opvals[1] < thresh["q6"] and \
opvals[2] < thresh["q6"] and opvals[3] < thresh["q6"]):
motif_type = "cp"
nmotif += 1
if nmotif > 1:
motif_type = "multiple assignments"
return motif_type
def gramschmidt(vin, uin):
"""
Returns that part of the first input vector
that is orthogonal to the second input vector.
The output vector is not normalized.
Args:
vin (numpy array):
first input vector
uin (numpy array):
second input vector
"""
vin_uin = np.inner(vin, uin)
uin_uin = np.inner(uin, uin)
if uin_uin <= 0.0:
raise ValueError("Zero or negative inner product!")
return vin - (vin_uin / uin_uin) * uin
class LocalStructOrderParams(object):
"""
This class permits the calculation of various types of local
structure order parameters.
"""
__supported_types = (
"cn", "sgl_bd", "bent", "tri_plan", "tri_plan_max", "reg_tri", "sq_plan", \
"sq_plan_max", "pent_plan", "pent_plan_max", "sq", "tet", "tet_max", "tri_pyr", \
"sq_pyr", "sq_pyr_legacy", "tri_bipyr", "sq_bipyr", "oct", \
"oct_legacy", "pent_pyr", "hex_pyr", "pent_bipyr", "hex_bipyr", \
"T", "cuboct", "cuboct_max", "see_saw_rect", "bcc", "q2", "q4", "q6", "oct_max", "hex_plan_max")
def __init__(self, types, parameters=None, cutoff=-10.0):
"""
Args:
types ([string]): list of strings representing the types of
order parameters to be calculated. Note that multiple
mentions of the same type may occur. Currently available
types recognize following environments:
"cn": simple coordination number---normalized
if desired;
"sgl_bd": single bonds;
"bent": bent (angular) coordinations
(Zimmermann & Jain, in progress, 2017);
"T": T-shape coordinations;
"see_saw_rect": see saw-like coordinations;
"tet": tetrahedra
(Zimmermann et al., submitted, 2017);
"oct": octahedra
(Zimmermann et al., submitted, 2017);
"bcc": body-centered cubic environments (Peters,
J. Chem. Phys., 131, 244103, 2009);
"tri_plan": trigonal planar environments;
"sq_plan": square planar environments;
"pent_plan": pentagonal planar environments;
"tri_pyr": trigonal pyramids (coordinated atom is in
the center of the basal plane);
"sq_pyr": square pyramids;
"pent_pyr": pentagonal pyramids;
"hex_pyr": hexagonal pyramids;
"tri_bipyr": trigonal bipyramids;
"sq_bipyr": square bipyramids;
"pent_bipyr": pentagonal bipyramids;
"hex_bipyr": hexagonal bipyramids;
"cuboct": cuboctahedra;
"q2": motif-unspecific bond orientational order
parameter (BOOP) of weight l=2 (Steinhardt
et al., Phys. Rev. B, 28, 784-805, 1983);
"q4": BOOP of weight l=4;
"q6": BOOP of weight l=6.
"reg_tri": regular triangle with varying height
to basal plane;
"sq": square coordination (cf., "reg_tri");
"oct_legacy": original Peters-style OP recognizing
octahedral coordination environments
(Zimmermann et al., J. Am. Chem. Soc.,
137, 13352-13361, 2015) that can, however,
produce small negative values sometimes.
"sq_pyr_legacy": square pyramids (legacy);
parameters ([dict]): list of dictionaries
that store float-type parameters associated with the
definitions of the different order parameters
(length of list = number of OPs). If an entry
is None, default values are used that are read from
the op_params.yaml file. With few exceptions, 9 different
parameters are used across all OPs:
"norm": normalizing constant (used in "cn"
(default value: 1)).
"TA": target angle (TA) in fraction of 180 degrees
("bent" (1), "tet" (0.6081734479693927),
"tri_plan" (0.66666666667), "pent_plan" (0.6),
"sq_pyr_legacy" (0.5)).
"IGW_TA": inverse Gaussian width (IGW) for penalizing
angles away from the target angle in inverse
fractions of 180 degrees to ("bent" and "tet" (15),
"tri_plan" (13.5), "pent_plan" (18),
"sq_pyr_legacy" (30)).
"IGW_EP": IGW for penalizing angles away from the
equatorial plane (EP) at 90 degrees ("T", "see_saw_rect",
"oct", "sq_plan", "tri_pyr", "sq_pyr", "pent_pyr",
"hex_pyr", "tri_bipyr", "sq_bipyr", "pent_bipyr",
"hex_bipyr", and "oct_legacy" (18)).
"fac_AA": factor applied to azimuth angle (AA) in cosine
term ("T", "tri_plan", and "sq_plan" (1), "tet",
"tri_pyr", and "tri_bipyr" (1.5), "oct", "sq_pyr",
"sq_bipyr", and "oct_legacy" (2), "pent_pyr"
and "pent_bipyr" (2.5), "hex_pyr" and
"hex_bipyr" (3)).
"exp_cos_AA": exponent applied to cosine term of AA
("T", "tet", "oct", "tri_plan", "sq_plan",
"tri_pyr", "sq_pyr", "pent_pyr", "hex_pyr",
"tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr",
and "oct_legacy" (2)).
"min_SPP": smallest angle (in radians) to consider
a neighbor to be
at South pole position ("see_saw_rect", "oct", "bcc",
"sq_plan", "tri_bipyr", "sq_bipyr", "pent_bipyr",
"hex_bipyr", "cuboct", and "oct_legacy"
(2.792526803190927)).
"IGW_SPP": IGW for penalizing angles away from South
pole position ("see_saw_rect", "oct", "bcc", "sq_plan",
"tri_bipyr", "sq_bipyr", "pent_bipyr", "hex_bipyr",
"cuboct", and "oct_legacy" (15)).
"w_SPP": weight for South pole position relative to
equatorial positions ("see_saw_rect" and "sq_plan" (1),
"cuboct" (1.8), "tri_bipyr" (2), "oct",
"sq_bipyr", and "oct_legacy" (3), "pent_bipyr" (4),
"hex_bipyr" (5), "bcc" (6)).
cutoff (float): Cutoff radius to determine which nearest
neighbors are supposed to contribute to the order
parameters. If the value is negative the neighboring
sites found by distance and cutoff radius are further
pruned using the get_nn method from the
VoronoiNN class.
"""
for t in types:
if t not in LocalStructOrderParams.__supported_types:
raise ValueError("Unknown order parameter type (" + \
t + ")!")
self._types = tuple(types)
self._params = []
for i, t in enumerate(self._types):
d = deepcopy(default_op_params[t]) if default_op_params[t] is not None \
else None
if parameters is None:
self._params.append(d)
elif parameters[i] is None:
self._params.append(d)
else:
self._params.append(deepcopy(parameters[i]))
self._computerijs = self._computerjks = self._geomops = False
self._geomops2 = self._boops = False
self._max_trig_order = -1
# Add here any additional flags to be used during calculation.
if "sgl_bd" in self._types:
self._computerijs = True
if not set(self._types).isdisjoint(
["tet", "oct", "bcc", "sq_pyr", "sq_pyr_legacy",
"tri_bipyr", "sq_bipyr", "oct_legacy", "tri_plan",
"sq_plan", "pent_plan", "tri_pyr", "pent_pyr", "hex_pyr",
"pent_bipyr", "hex_bipyr", "T", "cuboct", "oct_max", "tet_max",
"tri_plan_max", "sq_plan_max", "pent_plan_max", "cuboct_max",
"bent", "see_saw_rect", "hex_plan_max"]):
self._computerijs = self._geomops = True
if not set(self._types).isdisjoint(["reg_tri", "sq"]):
self._computerijs = self._computerjks = self._geomops2 = True
if not set(self._types).isdisjoint(["q2", "q4", "q6"]):
self._computerijs = self._boops = True
if "q2" in self._types:
self._max_trig_order = 2
if "q4" in self._types:
self._max_trig_order = 4
if "q6" in self._types:
self._max_trig_order = 6
# Finish parameter treatment.
if cutoff < 0.0:
self._cutoff = -cutoff
self._voroneigh = True
elif cutoff > 0.0:
self._cutoff = cutoff
self._voroneigh = False
else:
raise ValueError("Cutoff radius is zero!")
# Further variable definitions.
self._last_nneigh = -1
self._pow_sin_t = {}
self._pow_cos_t = {}
self._sin_n_p = {}
self._cos_n_p = {}
@property
def num_ops(self):
""""
Returns:
int: the number of different order parameters that are targeted
to be calculated.
"""
return len(self._types)
@property
def last_nneigh(self):
""""
Returns:
int: the number of neighbors encountered during the most
recent order parameter calculation. A value of -1 indicates
that no such calculation has yet been performed for this
instance.
"""
return len(self._last_nneigh)
def compute_trigonometric_terms(self, thetas, phis):
""""
Computes trigonometric terms that are required to
calculate bond orientational order parameters using
internal variables.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
The list of
azimuth angles of all neighbors in radians. The list of
azimuth angles is expected to have the same size as the
list of polar angles; otherwise, a ValueError is raised.
Also, the two lists of angles have to be coherent in
order. That is, it is expected that the order in the list
of azimuth angles corresponds to a distinct sequence of
neighbors. And, this sequence has to equal the sequence
of neighbors in the list of polar angles.
"""
if len(thetas) != len(phis):
raise ValueError("List of polar and azimuthal angles have to be"
" equal!")
self._pow_sin_t.clear()
self._pow_cos_t.clear()
self._sin_n_p.clear()
self._cos_n_p.clear()
self._pow_sin_t[1] = [sin(float(t)) for t in thetas]
self._pow_cos_t[1] = [cos(float(t)) for t in thetas]
self._sin_n_p[1] = [sin(float(p)) for p in phis]
self._cos_n_p[1] = [cos(float(p)) for p in phis]
for i in range(2, self._max_trig_order + 1):
self._pow_sin_t[i] = [e[0] * e[1] for e in zip(
self._pow_sin_t[i - 1], self._pow_sin_t[1])]
self._pow_cos_t[i] = [e[0] * e[1] for e in zip(
self._pow_cos_t[i - 1], self._pow_cos_t[1])]
self._sin_n_p[i] = [sin(float(i) * float(p)) \
for p in phis]
self._cos_n_p[i] = [cos(float(i) * float(p)) \
for p in phis]
def get_q2(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=2. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=2
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
sqrt_15_2pi = sqrt(15.0 / (2.0 * pi))
sqrt_5_pi = sqrt(5.0 / pi)
pre_y_2_2 = [0.25 * sqrt_15_2pi * val for val in self._pow_sin_t[2]]
pre_y_2_1 = [0.5 * sqrt_15_2pi * val[0] * val[1]
for val in zip(self._pow_sin_t[1], self._pow_cos_t[1])]
acc = 0.0
# Y_2_-2
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_2[i] * self._cos_n_p[2][i]
imag -= pre_y_2_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_2_-1
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_1[i] * self._cos_n_p[1][i]
imag -= pre_y_2_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_2_0
real = imag = 0.0
for i in nnn_range:
real += 0.25 * sqrt_5_pi * (3.0 * self._pow_cos_t[2][i] - 1.0)
acc += (real * real)
# Y_2_1
real = imag = 0.0
for i in nnn_range:
real -= pre_y_2_1[i] * self._cos_n_p[1][i]
imag -= pre_y_2_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_2_2
real = imag = 0.0
for i in nnn_range:
real += pre_y_2_2[i] * self._cos_n_p[2][i]
imag += pre_y_2_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
q2 = sqrt(4.0 * pi * acc / (5.0 * float(nnn * nnn)))
return q2
def get_q4(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=4. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=4
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
i16_3 = 3.0 / 16.0
i8_3 = 3.0 / 8.0
sqrt_35_pi = sqrt(35.0 / pi)
sqrt_35_2pi = sqrt(35.0 / (2.0 * pi))
sqrt_5_pi = sqrt(5.0 / pi)
sqrt_5_2pi = sqrt(5.0 / (2.0 * pi))
sqrt_1_pi = sqrt(1.0 / pi)
pre_y_4_4 = [i16_3 * sqrt_35_2pi * val for val in self._pow_sin_t[4]]
pre_y_4_3 = [i8_3 * sqrt_35_pi * val[0] * val[1] \
for val in zip(self._pow_sin_t[3], self._pow_cos_t[1])]
pre_y_4_2 = [i8_3 * sqrt_5_2pi * val[0] * (7.0 * val[1] - 1.0) \
for val in zip(self._pow_sin_t[2], self._pow_cos_t[2])]
pre_y_4_1 = [i8_3 * sqrt_5_pi * val[0] * (7.0 * val[1] - 3.0 * val[2]) \
for val in zip(self._pow_sin_t[1], self._pow_cos_t[3], \
self._pow_cos_t[1])]
acc = 0.0
# Y_4_-4
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_4[i] * self._cos_n_p[4][i]
imag -= pre_y_4_4[i] * self._sin_n_p[4][i]
acc += (real * real + imag * imag)
# Y_4_-3
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_3[i] * self._cos_n_p[3][i]
imag -= pre_y_4_3[i] * self._sin_n_p[3][i]
acc += (real * real + imag * imag)
# Y_4_-2
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_2[i] * self._cos_n_p[2][i]
imag -= pre_y_4_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_4_-1
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_1[i] * self._cos_n_p[1][i]
imag -= pre_y_4_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_4_0
real = imag = 0.0
for i in nnn_range:
real += i16_3 * sqrt_1_pi * (35.0 * self._pow_cos_t[4][i] - \
30.0 * self._pow_cos_t[2][i] + 3.0)
acc += (real * real)
# Y_4_1
real = imag = 0.0
for i in nnn_range:
real -= pre_y_4_1[i] * self._cos_n_p[1][i]
imag -= pre_y_4_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_4_2
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_2[i] * self._cos_n_p[2][i]
imag += pre_y_4_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_4_3
real = imag = 0.0
for i in nnn_range:
real -= pre_y_4_3[i] * self._cos_n_p[3][i]
imag -= pre_y_4_3[i] * self._sin_n_p[3][i]
acc += (real * real + imag * imag)
# Y_4_4
real = imag = 0.0
for i in nnn_range:
real += pre_y_4_4[i] * self._cos_n_p[4][i]
imag += pre_y_4_4[i] * self._sin_n_p[4][i]
acc += (real * real + imag * imag)
q4 = sqrt(4.0 * pi * acc / (9.0 * float(nnn * nnn)))
return q4
def get_q6(self, thetas=None, phis=None):
"""
Calculates the value of the bond orientational order parameter of
weight l=6. If the function is called with non-empty lists of
polar and azimuthal angles the corresponding trigonometric terms
are computed afresh. Otherwise, it is expected that the
compute_trigonometric_terms function has been just called.
Args:
thetas ([float]): polar angles of all neighbors in radians.
phis ([float]): azimuth angles of all neighbors in radians.
Returns:
float: bond orientational order parameter of weight l=6
corresponding to the input angles thetas and phis.
"""
if thetas is not None and phis is not None:
self.compute_trigonometric_terms(thetas, phis)
nnn = len(self._pow_sin_t[1])
nnn_range = range(nnn)
i64 = 1.0 / 64.0
i32 = 1.0 / 32.0
i32_3 = 3.0 / 32.0
i16 = 1.0 / 16.0
sqrt_3003_pi = sqrt(3003.0 / pi)
sqrt_1001_pi = sqrt(1001.0 / pi)
sqrt_91_2pi = sqrt(91.0 / (2.0 * pi))
sqrt_1365_pi = sqrt(1365.0 / pi)
sqrt_273_2pi = sqrt(273.0 / (2.0 * pi))
sqrt_13_pi = sqrt(13.0 / pi)
pre_y_6_6 = [i64 * sqrt_3003_pi * val for val in self._pow_sin_t[6]]
pre_y_6_5 = [i32_3 * sqrt_1001_pi * val[0] * val[1]
for val in zip(self._pow_sin_t[5], self._pow_cos_t[1])]
pre_y_6_4 = [i32_3 * sqrt_91_2pi * val[0] * (11.0 * val[1] - 1.0)
for val in zip(self._pow_sin_t[4], self._pow_cos_t[2])]
pre_y_6_3 = [
i32 * sqrt_1365_pi * val[0] * (11.0 * val[1] - 3.0 * val[2])
for val in zip(self._pow_sin_t[3], self._pow_cos_t[3],
self._pow_cos_t[1])]
pre_y_6_2 = [i64 * sqrt_1365_pi * val[0] * (33.0 * val[1] -
18.0 * val[2] + 1.0) for val
in zip(self._pow_sin_t[2],
self._pow_cos_t[4], self._pow_cos_t[2])]
pre_y_6_1 = [i16 * sqrt_273_2pi * val[0] * (33.0 * val[1] -
30.0 * val[2] + 5.0 * val[
3]) for val in
zip(self._pow_sin_t[1],
self._pow_cos_t[5], self._pow_cos_t[3],
self._pow_cos_t[1])]
acc = 0.0
# Y_6_-6
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_6[i] * self._cos_n_p[6][i] # cos(x) = cos(-x)
imag -= pre_y_6_6[i] * self._sin_n_p[6][i] # sin(x) = -sin(-x)
acc += (real * real + imag * imag)
# Y_6_-5
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_5[i] * self._cos_n_p[5][i]
imag -= pre_y_6_5[i] * self._sin_n_p[5][i]
acc += (real * real + imag * imag)
# Y_6_-4
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_4[i] * self._cos_n_p[4][i]
imag -= pre_y_6_4[i] * self._sin_n_p[4][i]
acc += (real * real + imag * imag)
# Y_6_-3
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_3[i] * self._cos_n_p[3][i]
imag -= pre_y_6_3[i] * self._sin_n_p[3][i]
acc += (real * real + imag * imag)
# Y_6_-2
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_2[i] * self._cos_n_p[2][i]
imag -= pre_y_6_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_6_-1
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_1[i] * self._cos_n_p[1][i]
imag -= pre_y_6_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_6_0
real = 0.0
imag = 0.0
for i in nnn_range:
real += i32 * sqrt_13_pi * (231.0 * self._pow_cos_t[6][i] -
315.0 * self._pow_cos_t[4][i] + 105.0 *
self._pow_cos_t[2][i] - 5.0)
acc += (real * real)
# Y_6_1
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_1[i] * self._cos_n_p[1][i]
imag -= pre_y_6_1[i] * self._sin_n_p[1][i]
acc += (real * real + imag * imag)
# Y_6_2
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_2[i] * self._cos_n_p[2][i]
imag += pre_y_6_2[i] * self._sin_n_p[2][i]
acc += (real * real + imag * imag)
# Y_6_3
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_3[i] * self._cos_n_p[3][i]
imag -= pre_y_6_3[i] * self._sin_n_p[3][i]
acc += (real * real + imag * imag)
# Y_6_4
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_4[i] * self._cos_n_p[4][i]
imag += pre_y_6_4[i] * self._sin_n_p[4][i]
acc += (real * real + imag * imag)
# Y_6_5
real = 0.0
imag = 0.0
for i in nnn_range:
real -= pre_y_6_5[i] * self._cos_n_p[5][i]
imag -= pre_y_6_5[i] * self._sin_n_p[5][i]
acc += (real * real + imag * imag)
# Y_6_6
real = 0.0
imag = 0.0
for i in nnn_range:
real += pre_y_6_6[i] * self._cos_n_p[6][i]
imag += pre_y_6_6[i] * self._sin_n_p[6][i]
acc += (real * real + imag * imag)
q6 = sqrt(4.0 * pi * acc / (13.0 * float(nnn * nnn)))
return q6
def get_type(self, index):
"""
Return type of order parameter at the index provided and
represented by a short string.
Args:
index (int): index of order parameter for which type is
to be returned.
Returns:
str: OP type.
"""
if index < 0 or index >= len(self._types):
raise ValueError("Index for getting order parameter type"
" out-of-bounds!")
return self._types[index]
def get_parameters(self, index):
"""
Returns list of floats that represents
the parameters associated
with calculation of the order
parameter that was defined at the index provided.
Attention: the parameters do not need to equal those originally
inputted because of processing out of efficiency reasons.
Args:
index (int):
index of order parameter for which associated parameters
are to be returned.
Returns:
[float]: parameters of a given OP.
"""
if index < 0 or index >= len(self._types):
raise ValueError("Index for getting parameters associated with"
" order parameter calculation out-of-bounds!")
return self._params[index]
def get_order_parameters(self, structure, n, indices_neighs=None, \
tol=0.0, target_spec=None):
"""
Compute all order parameters of site n.
Args:
structure (Structure): input structure.
n (int): index of site in input structure,
for which OPs are to be
calculated. Note that we do not use the sites iterator
here, but directly access sites via struct[index].
indices_neighs ([int]): list of indices of those neighbors
in Structure object
structure that are to be considered for OP computation.
This optional argument overwrites the way neighbors are
to be determined as defined in the constructor (i.e.,
Voronoi coordination finder via negative cutoff radius
vs constant cutoff radius if cutoff was positive).
We do not use information about the underlying
structure lattice if the neighbor indices are explicitly
provided. This has two important consequences. First,
the input Structure object can, in fact, be a
simple list of Site objects. Second, no nearest images
of neighbors are determined when providing an index list.
Note furthermore that this neighbor
determination type ignores the optional target_spec
argument.
tol (float): threshold of weight
(= solid angle / maximal solid angle)
to determine if a particular pair is
considered neighbors; this is relevant only in the case
when Voronoi polyhedra are used to determine coordination
target_spec (Specie): target species to be considered
when calculating the order
parameters of site n; None includes all species of input
structure.
Returns:
[floats]: representing order parameters. Should it not be
possible to compute a given OP for a conceptual reason, the
corresponding entry is None instead of a float. For Steinhardt
et al.'s bond orientational OPs and the other geometric OPs
("tet", "oct", "bcc", etc.),
this can happen if there is a single
neighbor around site n in the structure because that
does not permit calculation of angles between multiple
neighbors.
"""
# Do error-checking and initialization.
if n < 0:
raise ValueError("Site index smaller zero!")
if n >= len(structure):
raise ValueError("Site index beyond maximum!")
if indices_neighs is not None:
for index in indices_neighs:
if index >= len(structure):
raise ValueError("Neighbor site index beyond maximum!")
if tol < 0.0:
raise ValueError("Negative tolerance for weighted solid angle!")
left_of_unity = 1.0 - 1.0e-12
# The following threshold has to be adapted to non-Angstrom units.
very_small = 1.0e-12
fac_bcc = 1.0 / exp(-0.5)
# Find central site and its neighbors.
# Note that we adopt the same way of accessing sites here as in
# VoronoiNN; that is, not via the sites iterator.
centsite = structure[n]
if indices_neighs is not None:
neighsites = [structure[index] for index in indices_neighs]
elif self._voroneigh:
vnn = VoronoiNN(tol=tol, targets=target_spec)
neighsites = vnn.get_nn(structure, n)
else:
# Structure.get_sites_in_sphere --> also other periodic images
neighsitestmp = [i[0] for i in structure.get_sites_in_sphere(
centsite.coords, self._cutoff)]
neighsites = []
if centsite not in neighsitestmp:
raise ValueError("Could not find center site!")
else:
neighsitestmp.remove(centsite)
if target_spec is None:
neighsites = list(neighsitestmp)
else:
neighsites[:] = [site for site in neighsitestmp \
if site.specie.symbol == target_spec]
nneigh = len(neighsites)
self._last_nneigh = nneigh
# Prepare angle calculations, if applicable.
rij = []
rjk = []
rijnorm = []
rjknorm = []
dist = []
distjk_unique = []
distjk = []
centvec = centsite.coords
if self._computerijs:
for j, neigh in enumerate(neighsites):
rij.append((neigh.coords - centvec))
dist.append(np.linalg.norm(rij[j]))
rijnorm.append((rij[j] / dist[j]))
if self._computerjks:
for j, neigh in enumerate(neighsites):
rjk.append([])
rjknorm.append([])
distjk.append([])
kk = 0
for k in range(len(neighsites)):
if j != k:
rjk[j].append(neighsites[k].coords - neigh.coords)
distjk[j].append(np.linalg.norm(rjk[j][kk]))
if k > j:
distjk_unique.append(distjk[j][kk])
rjknorm[j].append(rjk[j][kk] / distjk[j][kk])
kk = kk + 1
# Initialize OP list and, then, calculate OPs.
ops = [0.0 for t in self._types]
#norms = [[[] for j in range(nneigh)] for t in self._types]
# First, coordination number and distance-based OPs.
for i, t in enumerate(self._types):
if t == "cn":
ops[i] = nneigh / self._params[i]['norm']
elif t == "sgl_bd":
dist_sorted = sorted(dist)
if len(dist_sorted) == 1:
ops[i] = 1.0
elif len(dist_sorted) > 1:
ops[i] = 1.0 - dist_sorted[0] / dist_sorted[1]
# Then, bond orientational OPs based on spherical harmonics
# according to Steinhardt et al., Phys. Rev. B, 28, 784-805, 1983.
if self._boops:
thetas = []
phis = []
for j, vec in enumerate(rijnorm):
# z is North pole --> theta between vec and (0, 0, 1)^T.
# Because vec is normalized, dot product is simply vec[2].
thetas.append(acos(max(-1.0, min(vec[2], 1.0))))
tmpphi = 0.0
# Compute phi only if it is not (almost) perfectly
# aligned with z-axis.
if -left_of_unity < vec[2] < left_of_unity:
# x is prime meridian --> phi between projection of vec
# into x-y plane and (1, 0, 0)^T
tmpphi = acos(max(-1.0, min(vec[0] / (sqrt(
vec[0] * vec[0] + vec[1] * vec[1])), 1.0)))
if vec[1] < 0.0:
tmpphi = -tmpphi
phis.append(tmpphi)
# Note that None flags that we have too few neighbors
# for calculating BOOPS.
for i, t in enumerate(self._types):
if t == "q2":
ops[i] = self.get_q2(thetas, phis) if len(
thetas) > 0 else None
elif t == "q4":
ops[i] = self.get_q4(thetas, phis) if len(
thetas) > 0 else None
elif t == "q6":
ops[i] = self.get_q6(thetas, phis) if len(
thetas) > 0 else None
# Then, deal with the Peters-style OPs that are tailor-made
# to recognize common structural motifs
# (Peters, J. Chem. Phys., 131, 244103, 2009;
# Zimmermann et al., J. Am. Chem. Soc., under revision, 2015).
if self._geomops:
gaussthetak = [0.0 for t in self._types] # not used by all OPs
qsptheta = [[[] for j in range(nneigh)] for t in self._types]
norms = [[[] for j in range(nneigh)] for t in self._types]
ipi = 1.0 / pi
piover2 = pi / 2.0
tetangoverpi = acos(-1.0 / 3.0) * ipi # xxx: delete
itetangminuspihalfoverpi = 1.0 / (tetangoverpi - 0.5)
onethird = 1.0 / 3.0
twothird = 2.0 / 3.0
for j in range(nneigh): # Neighbor j is put to the North pole.
zaxis = rijnorm[j]
kc = 0
for k in range(nneigh): # From neighbor k, we construct
if j != k: # the prime meridian.
for i in range(len(self._types)):
qsptheta[i][j].append(0.0)
norms[i][j].append(0)
tmp = max(
-1.0, min(np.inner(zaxis, rijnorm[k]), 1.0))
thetak = acos(tmp)
xaxistmp = gramschmidt(rijnorm[k], zaxis)
if np.linalg.norm(xaxistmp) < very_small:
flag_xaxis = True
else:
xaxis = xaxistmp / np.linalg.norm(xaxistmp)
flag_xaxis = False
# Contributions of j-i-k angles, where i represents the
# central atom and j and k two of the neighbors.
for i, t in enumerate(self._types):
if t in ["bent", "sq_pyr_legacy"]:
tmp = self._params[i]['IGW_TA'] * (
thetak * ipi - self._params[i]['TA'])
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in ["tri_plan", "tri_plan_max", "tet", "tet_max"]:
tmp = self._params[i]['IGW_TA'] * (
thetak * ipi - self._params[i]['TA'])
gaussthetak[i] = exp(-0.5 * tmp * tmp)
if t in ["tri_plan_max", "tet_max"]:
qsptheta[i][j][kc] += gaussthetak[i]
norms[i][j][kc] += 1
elif t in ["T", "tri_pyr", "sq_pyr", "pent_pyr", "hex_pyr"]:
tmp = self._params[i]['IGW_EP'] * (thetak * ipi - 0.5)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in ["sq_plan", "oct", "oct_legacy",
"cuboct", "cuboct_max"]:
if thetak >= self._params[i]['min_SPP']:
tmp = self._params[i]['IGW_SPP'] * (
thetak * ipi - 1.0)
qsptheta[i][j][kc] += (
self._params[i]['w_SPP'] *
exp(-0.5 * tmp * tmp))
norms[i][j][kc] += self._params[i]['w_SPP']
elif t in ["see_saw_rect", "tri_bipyr", "sq_bipyr",
"pent_bipyr", "hex_bipyr", "oct_max",
"sq_plan_max", "hex_plan_max"]:
if thetak < self._params[i]['min_SPP']:
tmp = self._params[i]['IGW_EP'] * (
thetak * ipi - 0.5) if t != "hex_plan_max" else \
self._params[i]['IGW_TA'] * (
fabs(thetak * ipi - 0.5) - self._params[i]['TA'])
qsptheta[i][j][kc] += exp(
-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t in ["pent_plan", "pent_plan_max"]:
tmp = 0.4 if thetak <= self._params[i]['TA'] * pi \
else 0.8
tmp2 = self._params[i]['IGW_TA'] * (
thetak * ipi - tmp)
gaussthetak[i] = exp(-0.5 * tmp2 * tmp2)
if t == "pent_plan_max":
qsptheta[i][j][kc] += gaussthetak[i]
norms[i][j][kc] += 1
elif t == "bcc" and j < k:
if thetak >= self._params[i]['min_SPP']:
tmp = self._params[i]['IGW_SPP'] * (
thetak * ipi - 1.0)
qsptheta[i][j][kc] += (self._params[i]['w_SPP'] *
exp(-0.5 * tmp * tmp))
norms[i][j][kc] += self._params[i]['w_SPP']
for m in range(nneigh):
if (m != j) and (m != k) and (not flag_xaxis):
tmp = max(
-1.0, min(np.inner(zaxis, rijnorm[m]), 1.0))
thetam = acos(tmp)
xtwoaxistmp = gramschmidt(rijnorm[m], zaxis)
l = np.linalg.norm(xtwoaxistmp)
if l < very_small:
flag_xtwoaxis = True
else:
xtwoaxis = xtwoaxistmp / l
phi = acos(max(
-1.0,
min(np.inner(xtwoaxis, xaxis), 1.0)))
flag_xtwoaxis = False
# South pole contributions of m.
if t in ["tri_bipyr", "sq_bipyr", "pent_bipyr",
"hex_bipyr", "oct_max", "sq_plan_max",
"hex_plan_max", "see_saw_rect"]:
if thetam >= self._params[i]['min_SPP']:
tmp = self._params[i]['IGW_SPP'] * (
thetam * ipi - 1.0)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
# Contributions of j-i-m angle and
# angles between plane j-i-k and i-m vector.
if not flag_xaxis and not flag_xtwoaxis:
for i, t in enumerate(self._types):
if t in ["tri_plan", "tri_plan_max", \
"tet", "tet_max"]:
tmp = self._params[i]['IGW_TA'] * (
thetam * ipi -
self._params[i]['TA'])
tmp2 = cos(
self._params[i]['fac_AA'] *
phi) ** self._params[i][
'exp_cos_AA']
tmp3 = 1 if t in ["tri_plan_max", "tet_max"] \
else gaussthetak[i]
qsptheta[i][j][kc] += tmp3 * exp(
-0.5 * tmp * tmp) * tmp2
norms[i][j][kc] += 1
elif t in ["pent_plan", "pent_plan_max"]:
tmp = 0.4 if thetam <= self._params[i]['TA'] * pi \
else 0.8
tmp2 = self._params[i]['IGW_TA'] * (
thetam * ipi - tmp)
tmp3 = cos(phi)
tmp4 = 1 if t == "pent_plan_max" \
else gaussthetak[i]
qsptheta[i][j][kc] += tmp4 * exp(
-0.5 * tmp2 * tmp2) * tmp3 * tmp3
norms[i][j][kc] += 1
elif t in ["T", "tri_pyr", "sq_pyr",
"pent_pyr", "hex_pyr"]:
tmp = cos(self._params[i]['fac_AA'] *
phi) ** self._params[i][
'exp_cos_AA']
tmp3 = self._params[i]['IGW_EP'] * (
thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * exp(
-0.5 * tmp3 * tmp3)
norms[i][j][kc] += 1
elif t in ["sq_plan", "oct", "oct_legacy"]:
if thetak < self._params[i]['min_SPP'] and \
thetam < self._params[i]['min_SPP']:
tmp = cos(self._params[i]['fac_AA'] *
phi) ** self._params[i]['exp_cos_AA']
tmp2 = self._params[i]['IGW_EP'] * (
thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
if t == "oct_legacy":
qsptheta[i][j][kc] -= tmp * self._params[i][6] * self._params[i][7]
norms[i][j][kc] += 1
elif t in ["tri_bipyr", "sq_bipyr", "pent_bipyr",
"hex_bipyr", "oct_max", "sq_plan_max",
"hex_plan_max"]:
if thetam < self._params[i]['min_SPP']:
if thetak < self._params[i]['min_SPP']:
tmp = cos(self._params[i]['fac_AA'] *
phi) ** self._params[i]['exp_cos_AA']
tmp2 = self._params[i]['IGW_EP'] * (
thetam * ipi - 0.5) if t != "hex_plan_max" else \
self._params[i]['IGW_TA'] * (
fabs(thetam * ipi - 0.5) - self._params[i]['TA'])
qsptheta[i][j][kc] += tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1
elif t == "bcc" and j < k:
if thetak < self._params[i]['min_SPP']:
if thetak > piover2:
fac = 1.0
else:
fac = -1.0
tmp = (thetam - piover2) / asin(1/3)
qsptheta[i][j][kc] += fac * cos(
3.0 * phi) * fac_bcc * \
tmp * exp(-0.5 * tmp * tmp)
norms[i][j][kc] += 1
elif t == "see_saw_rect":
if thetam < self._params[i]['min_SPP']:
if thetak < self._params[i]['min_SPP'] and phi < 0.75 * pi:
tmp = cos(self._params[i]['fac_AA'] *
phi) ** self._params[i]['exp_cos_AA']
tmp2 = self._params[i]['IGW_EP'] * (
thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * \
exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1.0
elif t in ["cuboct", "cuboct_max"]:
if thetam < self._params[i]['min_SPP'] and \
thetak > self._params[i][4] and \
thetak < self._params[i][2]:
if thetam > self._params[i][4] and \
thetam < self._params[i][2]:
tmp = cos(phi)
tmp2 = self._params[i][5] * (thetam * ipi - 0.5)
qsptheta[i][j][kc] += tmp * tmp * exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1.0
elif thetam < self._params[i][4]:
tmp = 0.0556 * (cos(
phi - 0.5 * pi) - 0.81649658)
tmp2 = self._params[i][6] * (
thetam * ipi - onethird)
qsptheta[i][j][kc] += exp(
-0.5 * tmp * tmp) * \
exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1.0
elif thetam > self._params[i][2]:
tmp = 0.0556 * (cos(phi - 0.5 * pi) - \
0.81649658)
tmp2 = self._params[i][6] * (thetam * ipi - \
twothird)
qsptheta[i][j][kc] += exp(-0.5 * tmp * tmp) * \
exp(-0.5 * tmp2 * tmp2)
norms[i][j][kc] += 1.0
kc += 1
# Normalize Peters-style OPs.
for i, t in enumerate(self._types):
#if t == "pent_plan":
# ops[i] = ops[i] / sum(norms[i]) \
# if sum(norms[i]) > 1.0e-12 else None
if t in ["tri_plan", "tet", "bent", "sq_plan",
"oct", "oct_legacy", "cuboct", "pent_plan"]:
ops[i] = tmp_norm = 0.0
for j in range(nneigh):
ops[i] += sum(qsptheta[i][j])
tmp_norm += float(sum(norms[i][j]))
ops[i] = ops[i] / tmp_norm if tmp_norm > 1.0e-12 else None
elif t in ["T", "tri_pyr", "see_saw_rect", "sq_pyr", "tri_bipyr",
"sq_bipyr", "pent_pyr", "hex_pyr", "pent_bipyr",
"hex_bipyr", "oct_max", "tri_plan_max", "tet_max",
"sq_plan_max", "pent_plan_max", "cuboct_max", "hex_plan_max"]:
ops[i] = None
if nneigh > 1:
for j in range(nneigh):
for k in range(len(qsptheta[i][j])):
qsptheta[i][j][k] = qsptheta[i][j][k] / norms[i][j][k] \
if norms[i][j][k] > 1.0e-12 else 0.0
ops[i] = max(qsptheta[i][j]) if j == 0 \
else max(ops[i], max(qsptheta[i][j]))
#ops[i] = max(qsptheta[i]) if len(qsptheta[i]) > 0 else None
elif t == "bcc":
ops[i] = 0.0
for j in range(nneigh):
ops[i] += sum(qsptheta[i][j])
ops[i] = ops[i] / float(0.5 * float(
nneigh * (6 + (nneigh - 2) * (nneigh - 3)))) \
if nneigh > 3 else None
elif t == "sq_pyr_legacy":
if nneigh > 1:
dmean = np.mean(dist)
acc = 0.0
for d in dist:
tmp = self._params[i][2] * (d - dmean)
acc = acc + exp(-0.5 * tmp * tmp)
for j in range(nneigh):
ops[i] = max(qsptheta[i][j]) if j == 0 \
else max(ops[i], max(qsptheta[i][j]))
ops[i] = acc * ops[i] / float(nneigh)
#nneigh * (nneigh - 1))
else:
ops[i] = None
# Then, deal with the new-style OPs that require vectors between
# neighbors.
if self._geomops2:
# Compute all (unique) angles and sort the resulting list.
aij = []
for ir, r in enumerate(rijnorm):
for j in range(ir + 1, len(rijnorm)):
aij.append(acos(max(-1.0, min(np.inner(r, rijnorm[j]), 1.0))))
aijs = sorted(aij)
# Compute height, side and diagonal length estimates.
neighscent = np.array([0.0, 0.0, 0.0])
for j, neigh in enumerate(neighsites):
neighscent = neighscent + neigh.coords
if nneigh > 0:
neighscent = (neighscent / float(nneigh))
h = np.linalg.norm(neighscent - centvec)
b = min(distjk_unique) if len(distjk_unique) > 0 else 0
dhalf = max(distjk_unique) / 2.0 if len(distjk_unique) > 0 else 0
for i, t in enumerate(self._types):
if t == "reg_tri" or t == "sq":
if nneigh < 3:
ops[i] = None
else:
ops[i] = 1.0
if t == "reg_tri":
a = 2.0 * asin(b / (2.0 * sqrt(h * h + (b / (
2.0 * cos(3.0 * pi / 18.0))) ** 2.0)))
nmax = 3
elif t == "sq":
a = 2.0 * asin(
b / (2.0 * sqrt(h * h + dhalf * dhalf)))
nmax = 4
for j in range(min([nneigh, nmax])):
ops[i] = ops[i] * exp(-0.5 * ((
aijs[j] - a) *
self._params[i][
0]) ** 2)
return ops
class BrunnerNN_reciprocal(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
largest reciprocal gap in interatomic distances.
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = [i[-1] for i in neighs_dists]
ds.sort()
ns = [1.0 / ds[i] - 1.0 / ds[i + 1] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for s, dist in neighs_dists:
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
class BrunnerNN_relative(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
of largest relative gap in interatomic distances.
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = [i[-1] for i in neighs_dists]
ds.sort()
ns = [ds[i] / ds[i + 1] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for s, dist in neighs_dists:
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
class BrunnerNN_real(NearNeighbors):
"""
Determine coordination number using Brunner's algorithm which counts the
atoms that are within the largest gap in differences in real space
interatomic distances. This algorithm uses Brunner's method of
largest gap in interatomic distances.
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 8.0.
"""
def __init__(self, tol=1.0e-4, cutoff=8.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
ds = [i[-1] for i in neighs_dists]
ds.sort()
ns = [ds[i] - ds[i + 1] for i in range(len(ds) - 1)]
d_max = ds[ns.index(max(ns))]
siw = []
for s, dist in neighs_dists:
if dist < d_max + self.tol:
w = ds[0] / dist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
class EconNN(NearNeighbors):
"""
Determines the average effective coordination number for each cation in a given structure
using Hoppe's algorithm.
This method finds all cation-centered polyhedrals in the structure, calculates the bond
weight for each peripheral ion in the polyhedral, and sums up the bond weights
to obtain the effective coordination number for each polyhedral. It then
averages the effective coordination of all polyhedrals with the same cation at the
central site.
Args:
tol (float): tolerance parameter for bond determination
(default: 1e-4).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 10.0.
"""
def __init__(self, tol=1.0e-4, cutoff=10.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
all_bond_lengths = [i[-1] for i in neighs_dists]
weighted_avg = calculate_weighted_avg(all_bond_lengths)
siw = []
for s, dist in neighs_dists:
if dist < self.cutoff:
w = exp(1 - (dist / weighted_avg)**6)
if w > self.tol:
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
class CrystalNN(NearNeighbors):
"""
This is custom near neighbor method intended for use in all kinds of
periodic structures (metals, minerals, porous structures, etc). It is based
on a Voronoi algorithm and uses the solid angle weights to determine the
probability of various coordination environments. The algorithm can also
modify probability using smooth distance cutoffs as well as Pauling
electronegativity differences. The output can either be the most probable
coordination environment or a weighted list of coordination environments.
"""
NNData = namedtuple("nn_data", ["all_nninfo", "cn_weights", "cn_nninfo"])
def __init__(self, weighted_cn=False, cation_anion=False,
distance_cutoffs=(0.5, 1.0), x_diff_weight=3.0,
porous_adjustment=True, search_cutoff=7,
fingerprint_length=None):
"""
Initialize CrystalNN with desired parameters. Default parameters assume
"chemical bond" type behavior is desired. For geometric neighbor
finding (e.g., structural framework), set (i) distance_cutoffs=None,
(ii) x_diff_weight=0.0 and (optionally) (iii) porous_adjustment=False
which will disregard the atomic identities and perform best for a purely
geometric match.
Args:
weighted_cn: (bool) if set to True, will return fractional weights
for each potential near neighbor.
cation_anion: (bool) if set True, will restrict bonding targets to
sites with opposite or zero charge. Requires an oxidation states
on all sites in the structure.
distance_cutoffs: ([float, float]) - if not None, penalizes neighbor
distances greater than sum of covalent radii plus
distance_cutoffs[0]. Distances greater than covalent radii sum
plus distance_cutoffs[1] are enforced to have zero weight.
x_diff_weight: (float) - if multiple types of neighbor elements are
possible, this sets preferences for targets with higher
electronegativity difference.
porous_adjustment: (bool) - if True, readjusts Voronoi weights to
better describe layered / porous structures
search_cutoff: (float) cutoff in Angstroms for initial neighbor
search; this will be adjusted if needed internally
fingerprint_length: (int) if a fixed_length CN "fingerprint" is
desired from get_nn_data(), set this parameter
"""
self.weighted_cn=weighted_cn
self.cation_anion = cation_anion
self.distance_cutoffs = distance_cutoffs
self.x_diff_weight = x_diff_weight if x_diff_weight is not None else 0
self.search_cutoff = search_cutoff
self.porous_adjustment = porous_adjustment
self.fingerprint_length = fingerprint_length
def get_nn_info(self, structure, n):
"""
Get all near-neighbor information.
Args:
structure: (Structure) pymatgen Structure
n: (int) index of target site
Returns:
siw (list of dicts): each dictionary provides information
about a single near neighbor, where key 'site' gives
access to the corresponding Site object, 'image' gives
the image location, and 'weight' provides the weight
that a given near-neighbor site contributes
to the coordination number (1 or smaller), 'site_index'
gives index of the corresponding site in
the original structure.
"""
nndata = self.get_nn_data(structure, n)
if not self.weighted_cn:
max_key = max(nndata.cn_weights, key=lambda k: nndata.cn_weights[k])
nn = nndata.cn_nninfo[max_key]
for entry in nn:
entry["weight"] = 1
return nn
else:
for entry in nndata.all_nninfo:
weight = 0
for cn in nndata.cn_nninfo:
for cn_entry in nndata.cn_nninfo[cn]:
if entry["site"] == cn_entry["site"]:
weight += nndata.cn_weights[cn]
entry["weight"] = weight
return nndata.all_nninfo
def get_nn_data(self, structure, n, length=None):
"""
The main logic of the method to compute near neighbor.
Args:
structure: (Structure) enclosing structure object
n: (int) index of target site to get NN info for
length: (int) if set, will return a fixed range of CN numbers
Returns:
a namedtuple (NNData) object that contains:
- all near neighbor sites with weights
- a dict of CN -> weight
- a dict of CN -> associated near neighbor sites
"""
length = length or self.fingerprint_length
# determine possible bond targets
target = None
if self.cation_anion:
target = []
m_oxi = structure[n].specie.oxi_state
for site in structure:
if site.specie.oxi_state * m_oxi <= 0: # opposite charge
target.append(site.specie)
if not target:
raise ValueError(
"No valid targets for site within cation_anion constraint!")
# get base VoronoiNN targets
cutoff = self.search_cutoff
max_cutoff = np.linalg.norm(structure.lattice.lengths_and_angles[0])
while True:
try:
vnn = VoronoiNN(weight="solid_angle", targets=target,
cutoff=cutoff)
nn = vnn.get_nn_info(structure, n)
break
except RuntimeError:
if cutoff > max_cutoff:
raise RuntimeError("CrystalNN error in Voronoi finding.")
cutoff = cutoff * 2
# solid angle weights can be misleading in open / porous structures
# adjust weights to correct for this behavior
if self.porous_adjustment:
for x in nn:
x["weight"] *= x["poly_info"][
"solid_angle"]/x["poly_info"]["area"]
# adjust solid angle weight based on electronegativity difference
if self.x_diff_weight > 0:
for entry in nn:
X1 = structure[n].specie.X
X2 = entry["site"].specie.X
if math.isnan(X1) or math.isnan(X2):
chemical_weight = 1
else:
# note: 3.3 is max deltaX between 2 elements
chemical_weight = 1 + self.x_diff_weight * \
math.sqrt(abs(X1 - X2)/3.3)
entry["weight"] = entry["weight"] * chemical_weight
# sort nearest neighbors from highest to lowest weight
nn = sorted(nn, key=lambda x: x["weight"], reverse=True)
if nn[0]["weight"] == 0:
return self.transform_to_length(self.NNData([], {0: 1.0}, {0: []}),
length)
# renormalize weights so the highest weight is 1.0
highest_weight = nn[0]["weight"]
for entry in nn:
entry["weight"] = entry["weight"] / highest_weight
# adjust solid angle weights based on distance
if self.distance_cutoffs:
r1 = self._get_radius(structure[n])
for entry in nn:
r2 = self._get_radius(entry["site"])
dist = np.linalg.norm(
structure[n].coords - entry["site"].coords)
dist_weight = 0
cutoff_low = (r1 + r2) + self.distance_cutoffs[0]
cutoff_high = (r1 + r2) + self.distance_cutoffs[1]
if dist <= cutoff_low:
dist_weight = 1
elif dist < cutoff_high:
dist_weight = (math.cos((dist - cutoff_low) / (
cutoff_high - cutoff_low) * math.pi) + 1) * 0.5
entry["weight"] = entry["weight"] * dist_weight
# sort nearest neighbors from highest to lowest weight
nn = sorted(nn, key=lambda x: x["weight"], reverse=True)
if nn[0]["weight"] == 0:
return self.transform_to_length(self.NNData([], {0: 1.0}, {0: []}),
length)
for entry in nn:
entry["weight"] = round(entry["weight"], 3)
del entry["poly_info"] # trim
# remove entries with no weight
nn = [x for x in nn if x["weight"] > 0]
# get the transition distances, i.e. all distinct weights
dist_bins = []
for entry in nn:
if not dist_bins or dist_bins[-1] != entry["weight"]:
dist_bins.append(entry["weight"])
dist_bins.append(0)
# main algorithm to determine fingerprint from bond weights
cn_weights = {} # CN -> score for that CN
cn_nninfo = {} # CN -> list of nearneighbor info for that CN
for idx, val in enumerate(dist_bins):
if val != 0:
nn_info = []
for entry in nn:
if entry["weight"] >= val:
nn_info.append(entry)
cn = len(nn_info)
cn_nninfo[cn] = nn_info
cn_weights[cn] = self._semicircle_integral(dist_bins, idx)
# add zero coord
cn0_weight = 1.0 - sum(cn_weights.values())
if cn0_weight > 0:
cn_nninfo[0] = []
cn_weights[0] = cn0_weight
return self.transform_to_length(self.NNData(nn, cn_weights, cn_nninfo),
length)
def get_cn(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (integer or float): coordination number.
"""
if self.weighted_cn != use_weights:
raise ValueError("The weighted_cn parameter and use_weights "
"parameter should match!")
return super(CrystalNN, self).get_cn(structure, n, use_weights)
def get_cn_dict(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of each element bonded to site with index n in structure
Args:
structure (Structure): input structure
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (dict): dictionary of CN of each element bonded to site
"""
if self.weighted_cn != use_weights:
raise ValueError("The weighted_cn parameter and use_weights "
"parameter should match!")
return super(CrystalNN, self).get_cn_dict(structure, n, use_weights)
@staticmethod
def _semicircle_integral(dist_bins, idx):
"""
An internal method to get an integral between two bounds of a unit
semicircle. Used in algorithm to determine bond probabilities.
Args:
dist_bins: (float) list of all possible bond weights
idx: (float) index of starting bond weight
Returns:
(float) integral of portion of unit semicircle
"""
r = 1
x1 = dist_bins[idx]
x2 = dist_bins[idx + 1]
if dist_bins[idx] == 1:
area1 = 0.25 * math.pi * r ** 2
else:
area1 = 0.5 * ((x1 * math.sqrt(r ** 2 - x1 ** 2)) + (
r ** 2 * math.atan(x1 / math.sqrt(r ** 2 - x1 ** 2))))
area2 = 0.5 * ((x2 * math.sqrt(r ** 2 - x2 ** 2)) + (
r ** 2 * math.atan(x2 / math.sqrt(r ** 2 - x2 ** 2))))
return (area1 - area2) / (0.25 * math.pi * r ** 2)
@staticmethod
def _get_radius(site):
"""
An internal method to get the expected radius for a site.
Args:
site: (Site)
Returns:
Covalent radius of element on site, or Atomic radius if unavailable
"""
try:
return CovalentRadius.radius[site.specie.symbol]
except:
return site.specie.atomic_radius
@staticmethod
def transform_to_length(nndata, length):
"""
Given NNData, transforms data to the specified fingerprint length
Args:
nndata: (NNData)
length: (int) desired length of NNData
"""
if length is None:
return nndata
if length:
for cn in range(length):
if cn not in nndata.cn_weights:
nndata.cn_weights[cn] = 0
nndata.cn_nninfo[cn] = []
return nndata
def calculate_weighted_avg(bonds):
"""
Returns the weighted average bond length given by
Hoppe's effective coordination number formula.
Args:
bonds (list): list of floats that are the
bond distances between a cation and its
peripheral ions
"""
minimum_bond = min(bonds)
weighted_sum = 0.0
total_sum = 0.0
for entry in bonds:
weighted_sum += entry*exp(1 - (entry/minimum_bond)**6)
total_sum += exp(1-(entry/minimum_bond)**6)
return weighted_sum/total_sum
class CutOffDictNN(NearNeighbors):
"""
A very basic NN class using a dictionary of fixed
cut-off distances. Can also be used with no dictionary
defined for a Null/Empty NN class.
"""
def __init__(self, cut_off_dict=None):
"""
Args:
cut_off_dict (Dict[str, float]): a dictionary
of cut-off distances, e.g. {('Fe','O'): 2.0} for
a maximum Fe-O bond length of 2.0 Angstroms.
Note that if your structure is oxidation state
decorated, the cut-off distances will have to
explicitly include the oxidation state, e.g.
{('Fe2+', 'O2-'): 2.0}
"""
self.cut_off_dict = cut_off_dict or {}
# for convenience
self._max_dist = 0.0
lookup_dict = defaultdict(dict)
for (sp1, sp2), dist in self.cut_off_dict.items():
lookup_dict[sp1][sp2] = dist
lookup_dict[sp2][sp1] = dist
if dist > self._max_dist:
self._max_dist = dist
self._lookup_dict = lookup_dict
def get_nn_info(self, structure, n):
site = structure[n]
neighs_dists = structure.get_neighbors(site, self._max_dist)
nn_info = []
for n_site, dist in neighs_dists:
neigh_cut_off_dist = self._lookup_dict\
.get(site.species_string, {})\
.get(n_site.species_string, 0.0)
if dist < neigh_cut_off_dist:
nn_info.append({
'site': n_site,
'image': self._get_image(n_site.frac_coords),
'weight': dist,
'site_index': self._get_original_site(structure, n_site)
})
return nn_info
class Critic2NN(NearNeighbors):
"""
Performs a topological analysis using critic2 to obtain
neighbor information, using a sum of atomic charge
densities. If an actual charge density is available
(e.g. from a VASP CHGCAR), see Critic2Caller directly
instead.
"""
def __init__(self):
# we cache the last-used structure, in case user
# calls get_nn_info() repeatedly for different
# sites in the same structure to save redundant
# computations
self.__last_structure = None
self.__last_bonded_structure = None
def get_bonded_structure(self, structure, decorate=False):
# not a top-level import because critic2 is an optional
# dependency, only want to raise an import error if
# Critic2NN() is used
from pymatgen.command_line.critic2_caller import Critic2Caller
if structure == self.__last_structure:
sg = self.__last_bonded_structure
else:
c2_output = Critic2Caller(structure).output
sg = c2_output.structure_graph()
self.__last_structure = structure
self.__last_bonded_structure = sg
if decorate:
order_parameters = [self.get_local_order_parameters(structure, n)
for n in range(len(structure))]
sg.structure.add_site_property('order_parameters', order_parameters)
return sg
def get_nn_info(self, structure, n):
sg = self.get_bonded_structure(structure)
return [
{
'site': connected_site.site,
'image': connected_site.jimage,
'weight': connected_site.weight,
'site_index': connected_site.index
} for connected_site in sg.get_connected_sites(n)
]
| nisse3000/pymatgen | pymatgen/analysis/local_env.py | Python | mit | 125,931 | [
"Gaussian",
"Jmol",
"VASP",
"pymatgen"
] | 693060442095191cdf7925b16ec4ad0bff07e0d73491df4ff707ea696ed2072a |
from setuptools import setup
setup(
name='trefoil',
version='0.3.2',
packages=['trefoil',
'trefoil.analysis', 'trefoil.cli',
'trefoil.geometry', 'trefoil.geometry.tests',
'trefoil.netcdf', 'trefoil.render',
'trefoil.render.renderers', 'trefoil.render.renderers.tests',
'trefoil.utilities', 'trefoil.utilities.tests',
# for temporary backward compatibility only! Will be removed in near future
'clover'],
url='https://github.com/consbio/trefoil',
license='see LICENSE',
author='databasin',
author_email='databasinadmin@consbio.org',
description='Useful tools for spatial analysis using numpy and NetCDF',
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
install_requires=[
'affine>=1.0',
'click',
'jinja2',
'palettable',
'pytz',
'six',
'fiona>=1.6.0',
'netCDF4>=1.1.1',
'Numpy',
'Pillow>=2.9.0',
'pyproj',
'rasterio>=1.0a12',
],
entry_points='''
[console_scripts]
trefoil=trefoil.cli.main:cli
'''
)
| consbio/clover | setup.py | Python | bsd-3-clause | 1,246 | [
"NetCDF"
] | 71bbd49b891680eb87c631d21b5a90e5aa89d80f1d161434d9b008748e8b6215 |
#!/usr/local/bin/python-2.5/bin/python
Info="""
Module name: cfg2lammps.py
Author: (c) Andres Jaramillo-Botero
California Institute of Technology
ajaramil@wag.caltech.edu
Project: pEFF
Version: August 2009
Reads in an eff .cfg file and produces the corresponding lammps data and input files
NOTE: Unsupported functions will be reported in the output log
12/2010: Added support for fixed-core and pseudo-core structures
"""
# import essentials:
import sys, os
from math import log10
from shutil import rmtree
from getopt import gnu_getopt as getopt
import numpy
def printHelp():
print Info
print "Usage: python cfg2lammps cfgfile\n"
return
general="""
# Created %s
# General parameters
variable sname index %s
log ${sname}.log
units electron
newton on
boundary %s
atom_style electron
read_data data.${sname}
pair_style eff/cut %s
pair_coeff * *
compute energies all pair eff/cut
variable eke equal c_energies[1]
variable epauli equal c_energies[2]
variable estatics equal c_energies[3]
variable errestrain equal c_energies[4]
communicate single vel yes
compute peratom all stress/atom
compute p all reduce sum c_peratom[1] c_peratom[2] c_peratom[3]
variable press equal -(c_p[1]+c_p[2]+c_p[3])/(3*vol)
compute effTemp all temp/eff
compute effPress all pressure effTemp
thermo %s
thermo_style custom step etotal pe ke v_eke v_epauli v_estatics v_errestrain temp press v_press
thermo_modify temp effTemp press effPress
"""
#%(date,name,boundary,cutoff,period)
minimize="""
# Minimization
min_style cg
dump 1 %s xyz %s ${sname}.min.xyz
dump 2 %s custom %s ${sname}.min.lammpstrj id type q spin eradius x y z fx fy fz erforce
min_modify line quadratic
minimize 0 1.0e-5 %s %s
undump 1
undump 2
"""
#%(group,period,group,period,iterations,fcalls)
single_pt="""
# Single point energy
run 0
"""
dynamics="""
# %s Dynamics
timestep %s
fix %s
dump 1 %s custom %s ${sname}.%s.lammpstrj id type q spin eradius x y z
dump 2 %s custom %s ${sname}.%s.xyz
run %s
unfix 1
undump 1
undump 2
"""
task={'single_pt':single_pt,'minimize':minimize,'dynamics':dynamics}
q2m={1:'1.007940',2:'4.002602',3:'6.941000',4:'9.012182',5:'10.811000',6:'12.010700',7:'14.006700',8:'15.999400',
9:'18.9984032',10:'20.179700',11:'22.98976928',12:'24.305000',13:'26.9815386',14:'28.085500',15:'30.973762',
16:'32.065000',17:'35.453000',18:'39.948000'}
def generate_lammps_input(infile):
# Defaults values
ensemble={"nve":"1 %s nve/eff",'nvt':"1 %s nvt/eff %s %s %s %s",'npt':"1 %s npt/eff %s %s %s %s %s %s"}
boundary="f f f"
xbound="-1000.000 1000.0 xlo xhi\n"
ybound="-1000.000 1000.0 ylo yhi\n"
zbound="-1000.000 1000.0 zlo zhi\n"
cutoff=1000.0
period="1"
emass=0
vels=""
datafile=open("data."+infile[:-4],'w')
scriptfile=open("in."+infile[:-4],'w')
print "Reading %s ... [WAIT]"%infile,
fin = open(infile,'r')
lines = fin.xreadlines()
print 7*"\b"+"[DONE]"
numcores=0
numnuclei=0
numelec=0
cores={}
nuclei={}
electrons={}
masses=[]
massstr="Masses\n\n"
types=1
q2type={}
Tflag=False # Default ensemble is NVE
steps='1000'
print "Extracting run parameters from %s ... "%(infile),
for line in lines:
# 1st level keywords
if line.find("@params")==0:
flag='params'
continue
elif line.find("@cores")==0:
flag='cores'
continue
elif line.find("@nuclei")==0:
flag='nuclei'
continue
elif line.find("@electrons")==0:
flag='electrons'
continue
elif line.find("@nuc_velocities")==0:
flag='n_vels'
continue
elif line.find("@elec_velocities")==0:
flag='e_vels'
continue
elif line.find("@nuc_masses")==0:
flag='n_mass'
continue
elif line.find("@elec_masses")==0:
flag='e_mass'
continue
elif line.find("@restraints")==0:
flag='restraints'
continue
# 2nd level keywords
if flag=='params':
if line.find("calc")>=0:
op=line.split()[2]
if line.find("print_every")>=0:
period=line.split()[2]
if line.find("num_steps")>=0:
steps=line.split()[2]
if line.find("min_freeze")>=0:
setforce="velocity\t% set 0.0 0.0 0.0\nfix\tfreeze %s setforce 0.0 0.0 0.0"%(line.split()[2],line.split()[2])
if line.find("thermostat")>=0:
tflag=True
#ensemble="fix\t1 all nvt/eff "
if line.find("start_temperature")>=0:
Tstart=line.split()[2]
#ensemble+=Tstart
if line.find("end_temperature")>=0:
Tstop=line.split()[2]
#ensemble+=Tstop
if line.find("andersen_coupling")>=0 or line.find("nose_hoover_coupling")>=0:
Tdamp=line.split()[2]
#ensemble+=Tdamp
if line.find("dt")>=0:
dt=line.split()[2]
if line.find("electron_mass")>=0:
emass=line.split()[2]
if line.find("adaptive_step_size")>=0:
continue
if line.find("adaptive_energy")>=0:
continue
if line.find("e_field_freq")>=0:
continue
if line.find("e_field_packet_duration")>=0:
continue
if line.find("e_field")>=0:
field=line.split()[2:5]
efield="fix\field all efield %s %s %s"%(field[0],field[1],field[2])
if line.find("e_field_packet_duration")>=0:
continue
if line.find("set_limit")>=0:
continue # need to add this constraint
if line.find("set_limit_stiffness")>=0:
continue
if line.find("output_position")>=0:
dump_pos="dump\t1 all custom %s ${sname}.lammpstrj id type q spin eradius x y z "%(period)
if line.find("output_velocities")>=0:
dump_pos+="vx vy vz "
if line.find("output_energy_forces")>=0:
dump_pos="compute\tenergy all pe/atom\n"+dump_pos
dump_pos+="c_energy fx fy fz\n"
if line.find("output_restart")>=0:
restart="restart\t%s ${sname}.restart1 ${sname}.restart2"%(period)
if line.find("output_restraints")>=0:
continue
if line.find("ewald_re_cutoff")>=0 or line.find("ewald_autoset")>=0 or line.find("ewald_log_precision")>=0 or line.find("ewald_max_re")>=0 or \
line.find("ewald_r_cutoff")>=0 or line.find("ewald_k_cutoff")>=0 or line.find("ewald_nuc_r")>=0:
continue
if line.find("periodic")>=0:
bounds=line.split()[2]
if bounds=="True": boundary="p p p"
elif bounds=="minimage_x": boundary="p f f"
elif bounds=="minimage_xy": boundary="p p f"
elif bounds=="minimage_y": boundary="f p f"
elif bounds=="minimage_xyz": boundary="p p p"
elif bounds=="minimage_z": boundary="f f p"
if line.find("x_bound")>=0:
xbnds=line.split()[2:4]
xbound="%s %s xlo xhi\n"%(xbnds[0],xbnds[1])
if line.find("y_bound")>=0:
ybnds=line.split()[2:4]
ybound="%s %s ylo yhi\n"%(ybnds[0],ybnds[1])
if line.find("z_bound")>=0:
zbnds=line.split()[2:4]
zbound="%s %s zlo zhi\n"%(zbnds[0],zbnds[1])
if line.find("taper_cutoff")>=0:
cutoff=line.split()[2]
continue
if flag=='cores' and len(line)>1:
numcores+=1
ln=line.split()
nc=' '.join(ln[0:3])
q=ln[3]
spin='3'
radius=ln[4]
m=q2m[int(float(q))]
if m not in masses:
masses.append(m)
massstr+="%d %s\n"%(types,m)
q2type[q]=types
types+=1
cores[numcores]=[nc,q,spin,radius]
continue
if flag=='nuclei' and len(line)>1:
numnuclei+=1
ln=line.split()
np=' '.join(ln[0:3])
q=ln[3]
m=q2m[int(float(q))]
if m not in masses:
masses.append(m)
massstr+="%d %s\n"%(types,m)
q2type[q]=types
types+=1
nuclei[numnuclei]=[np,q]
continue
if flag=='electrons' and len(line)>1:
numelec+=1
ln=line.split()
ep=' '.join(ln[0:3])
spin=ln[3]
radius=ln[4]
electrons[numelec]=[ep,spin,radius]
if numelec==1:
if emass!=0: massstr+="%d %s\n\n"%(types,emass) # electron mass=1
else: massstr+="%d 1.000000\n\n"%(types)
continue
if flag=='n_vels' and len(line)>1:
vels+=line+" 0.0"
continue
if flag=='e_vels' and len(line)>1:
ln=line.split()
ln[0]=ln[0]+numnuclei
vels+=ln[0]+" "+ln[1]+" "+ln[2]+" "+ln[3]+" "+ln[4]+"\n"
continue
if flag=='n_mass' and len(line)>1:
print "Setting nuclear masses is unsupported\n"
continue
if flag=='e_mass' and len(line)>1:
print "Setting electron masses is unsupported\n"
continue
print "\bDone"
# Build data file
print "Writing datafile to %s ... "%('data.'+infile),
sys.stdout.flush()
print "\b"*19+"General section ",
datafile.writelines("Created using cfg2lammps (c) AJB-2009\n\n%d atoms\n%d atom types\n\n%s%s%s\n"%(numcores+numnuclei+numelec,types,xbound,ybound,zbound))
print "\b"*19+"Masses section ",
datafile.writelines(massstr)
print "\b"*19+"Atoms section ",
datafile.writelines("Atoms\n\n")
for n in range(numcores):
datafile.writelines("%d %d %2.2f %s %s %s\n"%(n+1,q2type[cores[n+1][1]],float(cores[n+1][1]),cores[n+1][2],cores[n+1][3],cores[n+1][0]))
for n in range(numnuclei):
datafile.writelines("%d %d %2.2f 0 0.0 %s\n"%(n+numcores+1,q2type[nuclei[n+1][1]],float(nuclei[n+1][1]),nuclei[n+1][0]))
for e in range(numelec):
datafile.write("%d %d 0.0 %s %s %s\n"%(e+numnuclei+numcores+1,types,electrons[e+1][1],electrons[e+1][2],electrons[e+1][0]))
print "\b"*19+"Velocities section\n",
datafile.writelines(vels)
datafile.writelines("\n")
print "DONE .... GOODBYE !!"
datafile.close()
# Build input script
import datetime
scriptfile.writelines(general%(datetime.date.today(),infile[:-4],boundary,cutoff,period))
if op=='minimize':
scriptfile.writelines(minimize%('all',period,'all',period,steps,'10000'))
#%(group,period,group,period,iterations,fcalls)
elif op=='single_pt':
scriptfile.writelines(single_pt%())
elif op=='dynamics':
if Tflag==True:
scriptfile.writelines(dynamics%('NVT',dt,ensemble['nvt']%('all',Tstart,Tstop,Tdamp,''),'all',period,'nvt','all',period,'nve',steps))
#%(ensemble,dt,group,ensemble%(group,tstart,tstop,tdamp,options))
else:
scriptfile.writelines(dynamics%('NVE',dt,ensemble['nve']%('all'),'all',period,'nve','all',period,'nve',steps))
#%(ensemble,dt,group,ensemble%(group))
scriptfile.writelines("\n")
if __name__ == '__main__':
# set defaults
# check for input:
opts, argv = getopt(sys.argv[1:], 'h')
# if no input, print help and exit
if len(argv) != 1:
printHelp()
sys.exit(1)
else:
infile=argv[0]
# read options
for opt, arg in opts:
if opt == '-h': # -h: print help
printHelp()
generate_lammps_input(infile)
| quang-ha/lammps | tools/eff/cfg2lammps.py | Python | gpl-2.0 | 11,564 | [
"LAMMPS"
] | 1754ffc6bc523a1c0656d77384dc1f46bfcc315ad04bd52f6f7d9d851cb63306 |
##############################################################################
# adaptiveMD: A Python Framework to Run Adaptive Molecular Dynamics (MD)
# Simulations on HPC Resources
# Copyright 2017 FU Berlin and the Authors
#
# Authors: Jan-Hendrik Prinz
# Contributors:
#
# `adaptiveMD` is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import pyemma.coordinates as coor
import pyemma.msm as msm
import argparse
from sys import exit
from pyemma import config
import ujson
import logging
logging.disable(logging.CRITICAL)
##############################################################################
# this is only a stub and not used. If you want to create an pyemma script
# start here
##############################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Analyze a number of files and compute an MSM')
parser.add_argument(
'file',
metavar='input.dcd',
help='the output .dcd file',
type=str, nargs='+')
parser.add_argument(
'-c', '--tica-lagtime', dest='tica_lag',
type=int, default=2, nargs='?',
help='the lagtime used for tica')
parser.add_argument(
'-d', '--tica-dimensions', dest='tica_dim',
type=int, default=2, nargs='?',
help='the lagtime used for tica')
parser.add_argument(
'-s', '--stride', dest='stride',
type=int, default=1, nargs='?',
help='the lagtime used for tica')
parser.add_argument(
'-l', '--msm-lagtime', dest='msm_lag',
type=int, default=2, nargs='?',
help='the lagtime used for the final msm')
parser.add_argument(
'-k', '--msm-states', dest='msm_states',
type=int, default=5, nargs='?',
help='number of k means centers and number of msm states')
parser.add_argument(
'-t', '--topology', dest='topology_pdb',
type=str, default='topology.pdb', nargs='?',
help='the path to the topology.pdb file')
parser.add_argument(
'-v', '--verbose',
dest='verbose', action='store_true',
default=False,
help='if set then text output is send to the ' +
'console.')
args = parser.parse_args()
# Load files / replace by linked files
trajfiles = args.file
topfile = args.topology_pdb
# Choose parameters to be used in the task
config.show_progress_bars = False
lag = args.tica_lag
feat = coor.featurizer(topfile)
feat.add_backbone_torsions()
inp = coor.source(trajfiles, feat)
dim = args.tica_dim
tica_obj = coor.tica(inp, lag=lag, dim=dim, kinetic_map=False)
Y = tica_obj.get_output()
cl = coor.cluster_kmeans(data=Y, k=args.msm_states, stride=args.stride)
M = msm.estimate_markov_model(cl.dtrajs, args.msm_lag)
# with open("model.dtraj", "w") as f:
# f.write("\n".join(" ".join(map(str, x)) for x in cl.dtrajs))
#
# # np.savetxt("model.dtraj", cl.dtrajs, delimiter=" ", fmt='%d')
# np.savetxt("model.msm", M.P, delimiter=",")
data = {
'input': {
'frames': inp.n_frames_total(),
'dimension': inp.dimension(),
'trajectories': inp.number_of_trajectories(),
'lengths': inp.trajectory_lengths().tolist(),
},
'tica': {
'dimension': tica_obj.dimension()
},
'clustering': {
'dtrajs': [
t.tolist() for t in cl.dtrajs
]
},
'msm': {
'P': M.P.tolist()
}
}
print ujson.dumps(data)
exit(0)
| thempel/adaptivemd | adaptivemd/analysis/pyemma/msmanalyze.py | Python | lgpl-2.1 | 4,318 | [
"MDTraj"
] | 490ac1235c4032014626189bf910e7887f8788ea19776bbd90863ba5a2878b11 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import sys
import os
import socket
import glob
import shutil
import llnl.util.tty as tty
from os import environ as env
def cmake_cache_entry(name, value, vtype=None):
"""
Helper that creates CMake cache entry strings used in
'host-config' files.
"""
if vtype is None:
if value == "ON" or value == "OFF":
vtype = "BOOL"
else:
vtype = "PATH"
return 'set({0} "{1}" CACHE {2} "")\n\n'.format(name, value, vtype)
class Ascent(Package, CudaPackage):
"""Ascent is an open source many-core capable lightweight in situ
visualization and analysis infrastructure for multi-physics HPC
simulations."""
homepage = "https://github.com/Alpine-DAV/ascent"
git = "https://github.com/Alpine-DAV/ascent.git"
url = "https://github.com/Alpine-DAV/ascent/releases/download/v0.5.1/ascent-v0.5.1-src-with-blt.tar.gz"
maintainers = ['cyrush']
version('develop',
branch='develop',
submodules=True,
preferred=True)
###########################################################################
# package variants
###########################################################################
variant("shared", default=True, description="Build Ascent as shared libs")
variant('test', default=True, description='Enable Ascent unit tests')
variant("mpi", default=True, description="Build Ascent MPI Support")
variant("serial", default=True, description="build serial (non-mpi) libraries")
# variants for language support
variant("python", default=False, description="Build Ascent Python support")
variant("fortran", default=True, description="Build Ascent Fortran support")
# variants for runtime features
variant("vtkh", default=True,
description="Build VTK-h filter and rendering support")
variant("openmp", default=(sys.platform != 'darwin'),
description="build openmp support")
variant("cuda", default=False, description="Build cuda support")
variant("mfem", default=False, description="Build MFEM filter support")
variant("adios", default=False, description="Build Adios filter support")
variant("dray", default=False, description="Build with Devil Ray support")
# variants for dev-tools (docs, etc)
variant("doc", default=False, description="Build Ascent's documentation")
# variant for BabelFlow runtime
variant("babelflow", default=False, description="Build with BabelFlow")
##########################################################################
# package dependencies
###########################################################################
# use cmake 3.14, newest that provides proper cuda support
# and we have seen errors with cuda in 3.15
depends_on("cmake@3.14.1:3.14.99", type='build')
depends_on("conduit~python", when="~python")
depends_on("conduit+python", when="+python+shared")
depends_on("conduit~shared~python", when="~shared")
depends_on("conduit~python~mpi", when="~python~mpi")
depends_on("conduit+python~mpi", when="+python+shared~mpi")
depends_on("conduit~shared~python~mpi", when="~shared~mpi")
#######################
# Python
#######################
# we need a shared version of python b/c linking with static python lib
# causes duplicate state issues when running compiled python modules.
depends_on("python+shared", when="+python+shared")
extends("python", when="+python+shared")
depends_on("py-numpy", when="+python+shared", type=('build', 'run'))
depends_on("py-pip", when="+python+shared", type=('build', 'run'))
#######################
# MPI
#######################
depends_on("mpi", when="+mpi")
depends_on("py-mpi4py", when="+mpi+python+shared")
#######################
# BabelFlow
#######################
depends_on('babelflow@develop', when='+babelflow+mpi')
depends_on('parallelmergetree@develop', when='+babelflow+mpi')
#############################
# TPLs for Runtime Features
#############################
depends_on("vtk-h", when="+vtkh")
depends_on("vtk-h~openmp", when="+vtkh~openmp")
depends_on("vtk-h+cuda+openmp", when="+vtkh+cuda+openmp")
depends_on("vtk-h+cuda~openmp", when="+vtkh+cuda~openmp")
depends_on("vtk-h~shared", when="~shared+vtkh")
depends_on("vtk-h~shared~openmp", when="~shared+vtkh~openmp")
depends_on("vtk-h~shared+cuda", when="~shared+vtkh+cuda")
depends_on("vtk-h~shared+cuda~openmp", when="~shared+vtkh+cuda~openmp")
# mfem
depends_on("mfem~threadsafe~openmp+shared+mpi+conduit", when="+shared+mfem+mpi")
depends_on("mfem~threadsafe~openmp~shared+mpi+conduit", when="~shared+mfem+mpi")
depends_on("mfem~threadsafe~openmp+shared~mpi+conduit", when="+shared+mfem~mpi")
depends_on("mfem~threadsafe~openmp~shared~mpi+conduit", when="~shared+mfem~mpi")
depends_on("adios", when="+adios")
# devil ray variants with mpi
# we have to specify both because mfem makes us
depends_on("dray@develop+mpi~test~utils+shared+cuda", when="+dray+mpi+cuda+shared")
depends_on("dray@develop+mpi~test~utils+shared+openmp", when="+dray+mpi+openmp+shared")
depends_on("dray@develop+mpi~test~utils+shared~openmp~cuda", when="+dray+mpi~openmp~cuda+shared")
depends_on("dray@develop+mpi~test~utils~shared+cuda", when="+dray+mpi+cuda~shared")
depends_on("dray@develop+mpi~test~utils~shared+openmp", when="+dray+mpi+openmp~shared")
depends_on("dray@develop+mpi~test~utils~shared~openmp~cuda", when="+dray+mpi~openmp~cuda~shared")
# devil ray variants without mpi
depends_on("dray@develop~mpi~test~utils+shared+cuda", when="+dray~mpi+cuda+shared")
depends_on("dray@develop~mpi~test~utils+shared+openmp", when="+dray~mpi+openmp+shared")
depends_on("dray@develop~mpi~test~utils+shared~openmp~cuda", when="+dray~mpi~openmp~cuda+shared")
depends_on("dray@develop~mpi~test~utils~shared+cuda", when="+dray~mpi+cuda~shared")
depends_on("dray@develop~mpi~test~utils~shared+openmp", when="+dray~mpi+openmp~shared")
depends_on("dray@develop~mpi~test~utils~shared~openmp~cuda", when="+dray~mpi~openmp~cuda~shared")
#######################
# Documentation related
#######################
depends_on("py-sphinx", when="+python+doc", type='build')
depends_on("py-sphinx-rtd-theme", when="+python+doc", type='build')
def setup_build_environment(self, env):
env.set('CTEST_OUTPUT_ON_FAILURE', '1')
def install(self, spec, prefix):
"""
Build and install Ascent.
"""
with working_dir('spack-build', create=True):
py_site_pkgs_dir = None
if "+python" in spec:
py_site_pkgs_dir = site_packages_dir
host_cfg_fname = self.create_host_config(spec,
prefix,
py_site_pkgs_dir)
cmake_args = []
# if we have a static build, we need to avoid any of
# spack's default cmake settings related to rpaths
# (see: https://github.com/LLNL/spack/issues/2658)
if "+shared" in spec:
cmake_args.extend(std_cmake_args)
else:
for arg in std_cmake_args:
if arg.count("RPATH") == 0:
cmake_args.append(arg)
cmake_args.extend(["-C", host_cfg_fname, "../src"])
print("Configuring Ascent...")
cmake(*cmake_args)
print("Building Ascent...")
make()
# run unit tests if requested
if "+test" in spec and self.run_tests:
print("Running Ascent Unit Tests...")
make("test")
print("Installing Ascent...")
make("install")
# install copy of host config for provenance
install(host_cfg_fname, prefix)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
"""
Checks the spack install of ascent using ascents's
using-with-cmake example
"""
print("Checking Ascent installation...")
spec = self.spec
install_prefix = spec.prefix
example_src_dir = join_path(install_prefix,
"examples",
"ascent",
"using-with-cmake")
print("Checking using-with-cmake example...")
with working_dir("check-ascent-using-with-cmake-example",
create=True):
cmake_args = ["-DASCENT_DIR={0}".format(install_prefix),
"-DCONDUIT_DIR={0}".format(spec['conduit'].prefix),
"-DVTKM_DIR={0}".format(spec['vtk-m'].prefix),
"-DVTKH_DIR={0}".format(spec['vtk-h'].prefix),
example_src_dir]
cmake(*cmake_args)
make()
example = Executable('./ascent_render_example')
example()
print("Checking using-with-make example...")
example_src_dir = join_path(install_prefix,
"examples",
"ascent",
"using-with-make")
example_files = glob.glob(join_path(example_src_dir, "*"))
with working_dir("check-ascent-using-with-make-example",
create=True):
for example_file in example_files:
shutil.copy(example_file, ".")
make("ASCENT_DIR={0}".format(install_prefix))
example = Executable('./ascent_render_example')
example()
def create_host_config(self, spec, prefix, py_site_pkgs_dir=None):
"""
This method creates a 'host-config' file that specifies
all of the options used to configure and build ascent.
For more details about 'host-config' files see:
http://ascent.readthedocs.io/en/latest/BuildingAscent.html
Note:
The `py_site_pkgs_dir` arg exists to allow a package that
subclasses this package provide a specific site packages
dir when calling this function. `py_site_pkgs_dir` should
be an absolute path or `None`.
This is necessary because the spack `site_packages_dir`
var will not exist in the base class. For more details
on this issue see: https://github.com/spack/spack/issues/6261
"""
#######################
# Compiler Info
#######################
c_compiler = env["SPACK_CC"]
cpp_compiler = env["SPACK_CXX"]
f_compiler = None
if self.compiler.fc:
# even if this is set, it may not exist so do one more sanity check
f_compiler = env["SPACK_FC"]
#######################################################################
# By directly fetching the names of the actual compilers we appear
# to doing something evil here, but this is necessary to create a
# 'host config' file that works outside of the spack install env.
#######################################################################
sys_type = spec.architecture
# if on llnl systems, we can use the SYS_TYPE
if "SYS_TYPE" in env:
sys_type = env["SYS_TYPE"]
##############################################
# Find and record what CMake is used
##############################################
if "+cmake" in spec:
cmake_exe = spec['cmake'].command.path
else:
cmake_exe = which("cmake")
if cmake_exe is None:
msg = 'failed to find CMake (and cmake variant is off)'
raise RuntimeError(msg)
cmake_exe = cmake_exe.path
host_cfg_fname = "%s-%s-%s-ascent.cmake" % (socket.gethostname(),
sys_type,
spec.compiler)
cfg = open(host_cfg_fname, "w")
cfg.write("##################################\n")
cfg.write("# spack generated host-config\n")
cfg.write("##################################\n")
cfg.write("# {0}-{1}\n".format(sys_type, spec.compiler))
cfg.write("##################################\n\n")
# Include path to cmake for reference
cfg.write("# cmake from spack \n")
cfg.write("# cmake executable path: %s\n\n" % cmake_exe)
#######################
# Compiler Settings
#######################
cfg.write("#######\n")
cfg.write("# using %s compiler spec\n" % spec.compiler)
cfg.write("#######\n\n")
cfg.write("# c compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_C_COMPILER", c_compiler))
cfg.write("# cpp compiler used by spack\n")
cfg.write(cmake_cache_entry("CMAKE_CXX_COMPILER", cpp_compiler))
cfg.write("# fortran compiler used by spack\n")
if "+fortran" in spec and f_compiler is not None:
cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "ON"))
cfg.write(cmake_cache_entry("CMAKE_Fortran_COMPILER",
f_compiler))
else:
cfg.write("# no fortran compiler found\n\n")
cfg.write(cmake_cache_entry("ENABLE_FORTRAN", "OFF"))
# shared vs static libs
if "+shared" in spec:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "ON"))
else:
cfg.write(cmake_cache_entry("BUILD_SHARED_LIBS", "OFF"))
#######################
# Unit Tests
#######################
if "+test" in spec:
cfg.write(cmake_cache_entry("ENABLE_TESTS", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_TESTS", "OFF"))
#######################################################################
# Core Dependencies
#######################################################################
#######################
# Conduit
#######################
cfg.write("# conduit from spack \n")
cfg.write(cmake_cache_entry("CONDUIT_DIR", spec['conduit'].prefix))
#######################################################################
# Optional Dependencies
#######################################################################
#######################
# Python
#######################
cfg.write("# Python Support\n")
if "+python" in spec and "+shared" in spec:
cfg.write("# Enable python module builds\n")
cfg.write(cmake_cache_entry("ENABLE_PYTHON", "ON"))
cfg.write("# python from spack \n")
cfg.write(cmake_cache_entry("PYTHON_EXECUTABLE",
spec['python'].command.path))
# only set dest python site packages dir if passed
if py_site_pkgs_dir:
cfg.write(cmake_cache_entry("PYTHON_MODULE_INSTALL_PREFIX",
py_site_pkgs_dir))
else:
cfg.write(cmake_cache_entry("ENABLE_PYTHON", "OFF"))
if "+doc" in spec and "+python" in spec:
cfg.write(cmake_cache_entry("ENABLE_DOCS", "ON"))
cfg.write("# sphinx from spack \n")
sphinx_build_exe = join_path(spec['py-sphinx'].prefix.bin,
"sphinx-build")
cfg.write(cmake_cache_entry("SPHINX_EXECUTABLE", sphinx_build_exe))
else:
cfg.write(cmake_cache_entry("ENABLE_DOCS", "OFF"))
#######################
# Serial
#######################
if "+serial" in spec:
cfg.write(cmake_cache_entry("ENABLE_SERIAL", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_SERIAL", "OFF"))
#######################
# MPI
#######################
cfg.write("# MPI Support\n")
if "+mpi" in spec:
mpicc_path = spec['mpi'].mpicc
mpicxx_path = spec['mpi'].mpicxx
mpifc_path = spec['mpi'].mpifc
# if we are using compiler wrappers on cray systems
# use those for mpi wrappers, b/c spec['mpi'].mpicxx
# etc make return the spack compiler wrappers
# which can trip up mpi detection in CMake 3.14
if cpp_compiler == "CC":
mpicc_path = "cc"
mpicxx_path = "CC"
mpifc_path = "ftn"
cfg.write(cmake_cache_entry("ENABLE_MPI", "ON"))
cfg.write(cmake_cache_entry("MPI_C_COMPILER", mpicc_path))
cfg.write(cmake_cache_entry("MPI_CXX_COMPILER", mpicxx_path))
cfg.write(cmake_cache_entry("MPI_Fortran_COMPILER", mpifc_path))
mpiexe_bin = join_path(spec['mpi'].prefix.bin, 'mpiexec')
if os.path.isfile(mpiexe_bin):
# starting with cmake 3.10, FindMPI expects MPIEXEC_EXECUTABLE
# vs the older versions which expect MPIEXEC
if self.spec["cmake"].satisfies('@3.10:'):
cfg.write(cmake_cache_entry("MPIEXEC_EXECUTABLE",
mpiexe_bin))
else:
cfg.write(cmake_cache_entry("MPIEXEC",
mpiexe_bin))
###################################
# BABELFLOW (also depends on mpi)
###################################
if "+babelflow" in spec:
cfg.write(cmake_cache_entry("ENABLE_BABELFLOW", "ON"))
cfg.write(cmake_cache_entry("BabelFlow_DIR",
spec['babelflow'].prefix))
cfg.write(cmake_cache_entry("PMT_DIR",
spec['parallelmergetree'].prefix))
else:
cfg.write(cmake_cache_entry("ENABLE_MPI", "OFF"))
#######################
# CUDA
#######################
cfg.write("# CUDA Support\n")
if "+cuda" in spec:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_CUDA", "OFF"))
if "+openmp" in spec:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "ON"))
else:
cfg.write(cmake_cache_entry("ENABLE_OPENMP", "OFF"))
#######################
# VTK-h (and deps)
#######################
cfg.write("# vtk-h support \n")
if "+vtkh" in spec:
cfg.write("# vtk-m from spack\n")
cfg.write(cmake_cache_entry("VTKM_DIR", spec['vtk-m'].prefix))
cfg.write("# vtk-h from spack\n")
cfg.write(cmake_cache_entry("VTKH_DIR", spec['vtk-h'].prefix))
if "+cuda" in spec:
cfg.write(cmake_cache_entry("VTKm_ENABLE_CUDA", "ON"))
cfg.write(cmake_cache_entry("CMAKE_CUDA_HOST_COMPILER",
env["SPACK_CXX"]))
else:
cfg.write(cmake_cache_entry("VTKm_ENABLE_CUDA", "OFF"))
else:
cfg.write("# vtk-h not built by spack \n")
#######################
# MFEM
#######################
if "+mfem" in spec:
cfg.write("# mfem from spack \n")
cfg.write(cmake_cache_entry("MFEM_DIR", spec['mfem'].prefix))
else:
cfg.write("# mfem not built by spack \n")
#######################
# Devil Ray
#######################
if "+dray" in spec:
cfg.write("# devil ray from spack \n")
cfg.write(cmake_cache_entry("DRAY_DIR", spec['dray'].prefix))
else:
cfg.write("# devil ray not built by spack \n")
#######################
# Adios
#######################
cfg.write("# adios support\n")
if "+adios" in spec:
cfg.write(cmake_cache_entry("ADIOS_DIR", spec['adios'].prefix))
else:
cfg.write("# adios not built by spack \n")
cfg.write("##################################\n")
cfg.write("# end spack generated host-config\n")
cfg.write("##################################\n")
cfg.close()
host_cfg_fname = os.path.abspath(host_cfg_fname)
tty.info("spack generated conduit host-config file: " + host_cfg_fname)
return host_cfg_fname
| iulian787/spack | var/spack/repos/builtin/packages/ascent/package.py | Python | lgpl-2.1 | 21,094 | [
"VTK"
] | 1c029d7e06c08ae2723115cbb5765ecea3c6a164805f665bb174dc4512f71d2d |
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
import json
from datetime import datetime, timedelta
import ddt
from flaky import flaky
from nose.plugins.attrib import attr
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ...pages.common.auto_auth import AutoAuthPage
from ...pages.common.logout import LogoutPage
from ...pages.lms.course_home import CourseHomePage
from ...pages.lms.courseware import CoursewarePage, CoursewareSequentialTabPage
from ...pages.lms.create_mode import ModeCreationPage
from ...pages.lms.dashboard import DashboardPage
from ...pages.lms.pay_and_verify import FakePaymentPage, FakeSoftwareSecureVerificationPage, PaymentAndVerificationFlow
from ...pages.lms.problem import ProblemPage
from ...pages.lms.progress import ProgressPage
from ...pages.lms.staff_view import StaffCoursewarePage
from ...pages.lms.track_selection import TrackSelectionPage
from ...pages.studio.overview import CourseOutlinePage as StudioCourseOutlinePage
from ..helpers import EventsTestMixin, UniqueCourseTest, auto_auth, create_multiple_choice_problem
@attr(shard=9)
class CoursewareTest(UniqueCourseTest):
"""
Test courseware.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 2')
)
)
).install()
# Auto-auth register for the course.
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
def _goto_problem_page(self):
"""
Open problem page with assertion.
"""
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser) # pylint: disable=attribute-defined-outside-init
self.assertEqual(self.problem_page.problem_name, 'Test Problem 1')
def test_courseware(self):
"""
Test courseware if recent visited subsection become unpublished.
"""
# Visit problem page as a student.
self._goto_problem_page()
# Logout and login as a staff user.
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
# Visit course outline page in studio.
self.studio_course_outline.visit()
# Set release date for subsection in future.
self.studio_course_outline.change_problem_release_date()
# Logout and login as a student.
LogoutPage(self.browser).visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
# Visit courseware as a student.
self.courseware_page.visit()
# Problem name should be "Test Problem 2".
self.assertEqual(self.problem_page.problem_name, 'Test Problem 2')
def test_course_tree_breadcrumb(self):
"""
Scenario: Correct course tree breadcrumb is shown.
Given that I am a registered user
And I visit my courseware page
Then I should see correct course tree breadcrumb
"""
xblocks = self.course_fix.get_nested_xblocks(category="problem")
for index in range(1, len(xblocks) + 1):
test_section_title = 'Test Section {}'.format(index)
test_subsection_title = 'Test Subsection {}'.format(index)
test_unit_title = 'Test Problem {}'.format(index)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section(test_section_title, test_subsection_title)
course_nav = self.courseware_page.nav
self.assertEqual(course_nav.breadcrumb_section_title, test_section_title)
self.assertEqual(course_nav.breadcrumb_subsection_title, test_subsection_title)
self.assertEqual(course_nav.breadcrumb_unit_title, test_unit_title)
@attr(shard=9)
@ddt.ddt
class ProctoredExamTest(UniqueCourseTest):
"""
Tests for proctored exams.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(ProctoredExamTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_advanced_settings({
"enable_proctored_exams": {"value": "true"}
})
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1')
)
)
).install()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(
self.browser, self.course_id, entry_point='verify-now'
)
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
self.problem_page = ProblemPage(self.browser)
# Add a verified mode to the course
ModeCreationPage(
self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate',
min_price=10, suggested_prices='10,20'
).visit()
# Auto-auth register for the course.
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
def _login_as_a_verified_user(self):
"""
login as a verififed user
"""
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
# the track selection page cannot be visited. see the other tests to see if any prereq is there.
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
def _verify_user(self):
"""
Takes user through the verification flow and then marks the verification as 'approved'.
"""
# Immediately verify the user
self.immediate_verification_page.immediate_verification()
# Take face photo and proceed to the ID photo step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Take ID photo and proceed to the review photos step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Submit photos and proceed to the enrollment confirmation step
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Mark the verification as passing.
verification = FakeSoftwareSecureVerificationPage(self.browser).visit()
verification.mark_approved()
def test_can_create_proctored_exam_in_studio(self):
"""
Given that I am a staff member
When I visit the course outline page in studio.
And open the subsection edit dialog
Then I can view all settings related to Proctored and timed exams
"""
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.assertTrue(self.studio_course_outline.proctoring_items_are_displayed())
def test_proctored_exam_flow(self):
"""
Given that I am a staff member on the exam settings section
select advanced settings tab
When I Make the exam proctored.
And I login as a verified student.
And I verify the user's ID.
And visit the courseware as a verified student.
Then I can see an option to take the exam as a proctored exam.
"""
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.make_exam_proctored()
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self._verify_user()
self.courseware_page.visit()
self.assertTrue(self.courseware_page.can_start_proctored_exam)
def _setup_and_take_timed_exam(self, hide_after_due=False):
"""
Helper to perform the common action "set up a timed exam as staff,
then take it as student"
"""
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.make_exam_timed(hide_after_due=hide_after_due)
LogoutPage(self.browser).visit()
self._login_as_a_verified_user()
self.courseware_page.visit()
self.courseware_page.start_timed_exam()
self.assertTrue(self.courseware_page.is_timer_bar_present)
self.courseware_page.stop_timed_exam()
self.assertTrue(self.courseware_page.has_submitted_exam_message())
LogoutPage(self.browser).visit()
@ddt.data(True, False)
def test_timed_exam_flow(self, hide_after_due):
"""
Given that I am a staff member on the exam settings section
select advanced settings tab
When I Make the exam timed.
And I login as a verified student.
And visit the courseware as a verified student.
And I start the timed exam
Then I am taken to the exam with a timer bar showing
When I finish the exam
Then I see the exam submitted dialog in place of the exam
When I log back into studio as a staff member
And change the problem's due date to be in the past
And log back in as the original verified student
Then I see the exam or message in accordance with the hide_after_due setting
"""
self._setup_and_take_timed_exam(hide_after_due)
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
last_week = (datetime.today() - timedelta(days=7)).strftime("%m/%d/%Y")
self.studio_course_outline.change_problem_due_date(last_week)
LogoutPage(self.browser).visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.assertEqual(self.courseware_page.has_submitted_exam_message(), hide_after_due)
def test_masquerade_visibility_override(self):
"""
Given that a timed exam problem exists in the course
And a student has taken that exam
And that exam is hidden to the student
And I am a staff user masquerading as the student
Then I should be able to see the exam content
"""
self._setup_and_take_timed_exam()
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.courseware_page.visit()
staff_page = StaffCoursewarePage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
staff_page.set_staff_view_mode_specific_student(self.USERNAME)
self.assertFalse(self.courseware_page.has_submitted_exam_message())
def test_field_visiblity_with_all_exam_types(self):
"""
Given that I am a staff member
And I have visited the course outline page in studio.
And the subsection edit dialog is open
select advanced settings tab
For each of None, Timed, Proctored, and Practice exam types
The time allotted and review rules fields have proper visibility
None: False, False
Timed: True, False
Proctored: True, True
Practice: True, False
"""
LogoutPage(self.browser).visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.studio_course_outline.select_advanced_tab()
self.studio_course_outline.select_none_exam()
self.assertFalse(self.studio_course_outline.time_allotted_field_visible())
self.assertFalse(self.studio_course_outline.exam_review_rules_field_visible())
self.studio_course_outline.select_timed_exam()
self.assertTrue(self.studio_course_outline.time_allotted_field_visible())
self.assertFalse(self.studio_course_outline.exam_review_rules_field_visible())
self.studio_course_outline.select_proctored_exam()
self.assertTrue(self.studio_course_outline.time_allotted_field_visible())
self.assertTrue(self.studio_course_outline.exam_review_rules_field_visible())
self.studio_course_outline.select_practice_exam()
self.assertTrue(self.studio_course_outline.time_allotted_field_visible())
self.assertFalse(self.studio_course_outline.exam_review_rules_field_visible())
class CoursewareMultipleVerticalsTestBase(UniqueCourseTest, EventsTestMixin):
"""
Base class with setup for testing courseware with multiple verticals
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(CoursewareMultipleVerticalsTestBase, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data='<problem>problem 1 dummy body</problem>'),
XBlockFixtureDesc('html', 'html 1', data="<html>html 1 dummy body</html>"),
XBlockFixtureDesc('problem', 'Test Problem 2', data="<problem>problem 2 dummy body</problem>"),
XBlockFixtureDesc('html', 'html 2', data="<html>html 2 dummy body</html>"),
),
XBlockFixtureDesc('sequential', 'Test Subsection 1,2').add_children(
XBlockFixtureDesc('problem', 'Test Problem 3', data='<problem>problem 3 dummy body</problem>'),
),
XBlockFixtureDesc(
'sequential', 'Test HIDDEN Subsection', metadata={'visible_to_staff_only': True}
).add_children(
XBlockFixtureDesc('problem', 'Test HIDDEN Problem', data='<problem>hidden problem</problem>'),
),
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2,1').add_children(
XBlockFixtureDesc('problem', 'Test Problem 4', data='<problem>problem 4 dummy body</problem>'),
),
),
XBlockFixtureDesc('chapter', 'Test HIDDEN Section', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('sequential', 'Test HIDDEN Subsection'),
),
).install()
# Auto-auth register for the course.
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False).visit()
@attr(shard=9)
class CoursewareMultipleVerticalsTest(CoursewareMultipleVerticalsTestBase):
"""
Test courseware with multiple verticals
"""
@flaky # PLAT-1198; should be fixed, but verify that failures stop before removing
def test_navigation_buttons(self):
self.courseware_page.visit()
# start in first section
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 0, next_enabled=True, prev_enabled=False)
# next takes us to next tab in sequential
self.courseware_page.click_next_button_on_top()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 1, next_enabled=True, prev_enabled=True)
# go to last sequential position
self.courseware_page.go_to_sequential_position(4)
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 3, next_enabled=True, prev_enabled=True)
# next takes us to next sequential
self.courseware_page.click_next_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,2', 0, next_enabled=True, prev_enabled=True)
# next takes us to next chapter
self.courseware_page.click_next_button_on_top()
self.assert_navigation_state('Test Section 2', 'Test Subsection 2,1', 0, next_enabled=False, prev_enabled=True)
# previous takes us to previous chapter
self.courseware_page.click_previous_button_on_top()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,2', 0, next_enabled=True, prev_enabled=True)
# previous takes us to last tab in previous sequential
self.courseware_page.click_previous_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 3, next_enabled=True, prev_enabled=True)
# previous takes us to previous tab in sequential
self.courseware_page.click_previous_button_on_bottom()
self.assert_navigation_state('Test Section 1', 'Test Subsection 1,1', 2, next_enabled=True, prev_enabled=True)
# test UI events emitted by navigation
filter_sequence_ui_event = lambda event: event.get('name', '').startswith('edx.ui.lms.sequence.')
sequence_ui_events = self.wait_for_events(event_filter=filter_sequence_ui_event, timeout=2)
legacy_events = [ev for ev in sequence_ui_events if ev['event_type'] in {'seq_next', 'seq_prev', 'seq_goto'}]
nonlegacy_events = [ev for ev in sequence_ui_events if ev not in legacy_events]
self.assertTrue(all('old' in json.loads(ev['event']) for ev in legacy_events))
self.assertTrue(all('new' in json.loads(ev['event']) for ev in legacy_events))
self.assertFalse(any('old' in json.loads(ev['event']) for ev in nonlegacy_events))
self.assertFalse(any('new' in json.loads(ev['event']) for ev in nonlegacy_events))
self.assert_events_match(
[
{
'event_type': 'seq_next',
'event': {
'old': 1,
'new': 2,
'current_tab': 1,
'tab_count': 4,
'widget_placement': 'top',
}
},
{
'event_type': 'seq_goto',
'event': {
'old': 2,
'new': 4,
'current_tab': 2,
'target_tab': 4,
'tab_count': 4,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.next_selected',
'event': {
'current_tab': 4,
'tab_count': 4,
'widget_placement': 'bottom',
}
},
{
'event_type': 'edx.ui.lms.sequence.next_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.previous_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'top',
}
},
{
'event_type': 'edx.ui.lms.sequence.previous_selected',
'event': {
'current_tab': 1,
'tab_count': 1,
'widget_placement': 'bottom',
}
},
{
'event_type': 'seq_prev',
'event': {
'old': 4,
'new': 3,
'current_tab': 4,
'tab_count': 4,
'widget_placement': 'bottom',
}
},
],
sequence_ui_events
)
# TODO: TNL-6546: Delete this whole test if these events are going away(?)
def test_outline_selected_events(self):
self.courseware_page.visit()
self.courseware_page.nav.go_to_section('Test Section 1', 'Test Subsection 1,2')
self.courseware_page.nav.go_to_section('Test Section 2', 'Test Subsection 2,1')
# test UI events emitted by navigating via the course outline
filter_selected_events = lambda event: event.get('name', '') == 'edx.ui.lms.outline.selected'
selected_events = self.wait_for_events(event_filter=filter_selected_events, timeout=2)
# note: target_url is tested in unit tests, as the url changes here with every test (it includes GUIDs).
self.assert_events_match(
[
{
'event_type': 'edx.ui.lms.outline.selected',
'name': 'edx.ui.lms.outline.selected',
'event': {
'target_name': 'Test Subsection 1,2 ',
'widget_placement': 'accordion',
}
},
{
'event_type': 'edx.ui.lms.outline.selected',
'name': 'edx.ui.lms.outline.selected',
'event': {
'target_name': 'Test Subsection 2,1 ',
'widget_placement': 'accordion',
}
},
],
selected_events
)
# TODO: Delete as part of TNL-6546 / LEARNER-71
def test_link_clicked_events(self):
"""
Given that I am a user in the courseware
When I navigate via the left-hand nav
Then a link clicked event is logged
"""
self.courseware_page.visit()
self.courseware_page.nav.go_to_section('Test Section 1', 'Test Subsection 1,2')
self.courseware_page.nav.go_to_section('Test Section 2', 'Test Subsection 2,1')
filter_link_clicked = lambda event: event.get('name', '') == 'edx.ui.lms.link_clicked'
link_clicked_events = self.wait_for_events(event_filter=filter_link_clicked, timeout=2)
self.assertEqual(len(link_clicked_events), 2)
def assert_navigation_state(
self, section_title, subsection_title, subsection_position, next_enabled, prev_enabled
):
"""
Verifies that the navigation state is as expected.
"""
self.assertTrue(self.courseware_page.nav.is_on_section(section_title, subsection_title))
self.assertEquals(self.courseware_page.sequential_position, subsection_position)
self.assertEquals(self.courseware_page.is_next_button_enabled, next_enabled)
self.assertEquals(self.courseware_page.is_previous_button_enabled, prev_enabled)
def test_tab_position(self):
# test that using the position in the url direct to correct tab in courseware
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 1,1')
subsection_url = self.browser.current_url
url_part_list = subsection_url.split('/')
course_id = url_part_list[-5]
chapter_id = url_part_list[-3]
subsection_id = url_part_list[-2]
problem1_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=1
).visit()
self.assertIn('problem 1 dummy body', problem1_page.get_selected_tab_content())
html1_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=2
).visit()
self.assertIn('html 1 dummy body', html1_page.get_selected_tab_content())
problem2_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=3
).visit()
self.assertIn('problem 2 dummy body', problem2_page.get_selected_tab_content())
html2_page = CoursewareSequentialTabPage(
self.browser,
course_id=course_id,
chapter=chapter_id,
subsection=subsection_id,
position=4
).visit()
self.assertIn('html 2 dummy body', html2_page.get_selected_tab_content())
@attr('a11y')
class CoursewareMultipleVerticalsA11YTest(CoursewareMultipleVerticalsTestBase):
"""
Test a11y for courseware with multiple verticals
"""
def test_courseware_a11y(self):
"""
Run accessibility audit for the problem type.
"""
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section 1', 'Test Subsection 1,1')
# Set the scope to the sequence navigation
self.courseware_page.a11y_audit.config.set_scope(
include=['div.sequence-nav'])
self.courseware_page.a11y_audit.check_for_accessibility_errors()
@attr(shard=9)
class ProblemStateOnNavigationTest(UniqueCourseTest):
"""
Test courseware with problems in multiple verticals.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
problem1_name = 'MULTIPLE CHOICE TEST PROBLEM 1'
problem2_name = 'MULTIPLE CHOICE TEST PROBLEM 2'
def setUp(self):
super(ProblemStateOnNavigationTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with section, tabs and multiple choice problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1,1').add_children(
create_multiple_choice_problem(self.problem1_name),
create_multiple_choice_problem(self.problem2_name),
),
),
).install()
# Auto-auth register for the course.
AutoAuthPage(
self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=False
).visit()
self.courseware_page.visit()
self.problem_page = ProblemPage(self.browser)
def go_to_tab_and_assert_problem(self, position, problem_name):
"""
Go to sequential tab and assert that we are on problem whose name is given as a parameter.
Args:
position: Position of the sequential tab
problem_name: Name of the problem
"""
self.courseware_page.go_to_sequential_position(position)
self.problem_page.wait_for_element_presence(
self.problem_page.CSS_PROBLEM_HEADER,
'wait for problem header'
)
self.assertEqual(self.problem_page.problem_name, problem_name)
def test_perform_problem_submit_and_navigate(self):
"""
Scenario:
I go to sequential position 1
Facing problem1, I select 'choice_1'
Then I click submit button
Then I go to sequential position 2
Then I came back to sequential position 1 again
Facing problem1, I observe the problem1 content is not
outdated before and after sequence navigation
"""
# Go to sequential position 1 and assert that we are on problem 1.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
# Update problem 1's content state by clicking check button.
self.problem_page.click_choice('choice_choice_1')
self.problem_page.click_submit()
self.problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect')
# Save problem 1's content state as we're about to switch units in the sequence.
problem1_content_before_switch = self.problem_page.problem_content
before_meta = self.problem_page.problem_meta
# Go to sequential position 2 and assert that we are on problem 2.
self.go_to_tab_and_assert_problem(2, self.problem2_name)
# Come back to our original unit in the sequence and assert that the content hasn't changed.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
problem1_content_after_coming_back = self.problem_page.problem_content
after_meta = self.problem_page.problem_meta
self.assertEqual(problem1_content_before_switch, problem1_content_after_coming_back)
self.assertEqual(before_meta, after_meta)
def test_perform_problem_save_and_navigate(self):
"""
Scenario:
I go to sequential position 1
Facing problem1, I select 'choice_1'
Then I click save button
Then I go to sequential position 2
Then I came back to sequential position 1 again
Facing problem1, I observe the problem1 content is not
outdated before and after sequence navigation
"""
# Go to sequential position 1 and assert that we are on problem 1.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
# Update problem 1's content state by clicking save button.
self.problem_page.click_choice('choice_choice_1')
self.problem_page.click_save()
self.problem_page.wait_for_save_notification()
# Save problem 1's content state as we're about to switch units in the sequence.
problem1_content_before_switch = self.problem_page.problem_input_content
before_meta = self.problem_page.problem_meta
# Go to sequential position 2 and assert that we are on problem 2.
self.go_to_tab_and_assert_problem(2, self.problem2_name)
self.problem_page.wait_for_expected_status('span.unanswered', 'unanswered')
# Come back to our original unit in the sequence and assert that the content hasn't changed.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
problem1_content_after_coming_back = self.problem_page.problem_input_content
after_meta = self.problem_page.problem_meta
self.assertIn(problem1_content_after_coming_back, problem1_content_before_switch)
self.assertEqual(before_meta, after_meta)
def test_perform_problem_reset_and_navigate(self):
"""
Scenario:
I go to sequential position 1
Facing problem1, I select 'choice_1'
Then perform the action – check and reset
Then I go to sequential position 2
Then I came back to sequential position 1 again
Facing problem1, I observe the problem1 content is not
outdated before and after sequence navigation
"""
# Go to sequential position 1 and assert that we are on problem 1.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
# Update problem 1's content state – by performing reset operation.
self.problem_page.click_choice('choice_choice_1')
self.problem_page.click_submit()
self.problem_page.wait_for_expected_status('label.choicegroup_incorrect', 'incorrect')
self.problem_page.click_reset()
self.problem_page.wait_for_expected_status('span.unanswered', 'unanswered')
# Save problem 1's content state as we're about to switch units in the sequence.
problem1_content_before_switch = self.problem_page.problem_content
before_meta = self.problem_page.problem_meta
# Go to sequential position 2 and assert that we are on problem 2.
self.go_to_tab_and_assert_problem(2, self.problem2_name)
# Come back to our original unit in the sequence and assert that the content hasn't changed.
self.go_to_tab_and_assert_problem(1, self.problem1_name)
problem1_content_after_coming_back = self.problem_page.problem_content
after_meta = self.problem_page.problem_meta
self.assertEqual(problem1_content_before_switch, problem1_content_after_coming_back)
self.assertEqual(before_meta, after_meta)
@attr(shard=9)
class SubsectionHiddenAfterDueDateTest(UniqueCourseTest):
"""
Tests the "hide after due date" setting for
subsections.
"""
USERNAME = "STUDENT_TESTER"
EMAIL = "student101@example.com"
def setUp(self):
super(SubsectionHiddenAfterDueDateTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.logout_page = LogoutPage(self.browser)
self.studio_course_outline = StudioCourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section 1').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 1').add_children(
create_multiple_choice_problem('Test Problem 1')
)
)
).install()
self.progress_page = ProgressPage(self.browser, self.course_id)
self._setup_subsection()
# Auto-auth register for the course.
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
def _setup_subsection(self):
"""
Helper to set up a problem subsection as staff, then take
it as a student.
"""
self.logout_page.visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
self.studio_course_outline.open_subsection_settings_dialog()
self.studio_course_outline.select_visibility_tab()
self.studio_course_outline.make_subsection_hidden_after_due_date()
self.logout_page.visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.logout_page.visit()
def test_subsecton_hidden_after_due_date(self):
"""
Given that I am a staff member on the subsection settings section
And I select the advanced settings tab
When I Make the subsection hidden after its due date.
And I login as a student.
And visit the subsection in the courseware as a verified student.
Then I am able to see the subsection
And when I visit the progress page
Then I should be able to see my grade on the progress page
When I log in as staff
And I make the subsection due in the past so that the current date is past its due date
And I log in as a student
And I visit the subsection in the courseware
Then the subsection should be hidden with a message that its due date has passed
And when I visit the progress page
Then I should be able to see my grade on the progress page
"""
self.logout_page.visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.assertFalse(self.courseware_page.content_hidden_past_due_date())
self.progress_page.visit()
self.assertEqual(self.progress_page.scores('Test Section 1', 'Test Subsection 1'), [(0, 1)])
self.logout_page.visit()
auto_auth(self.browser, "STAFF_TESTER", "staff101@example.com", True, self.course_id)
self.studio_course_outline.visit()
last_week = (datetime.today() - timedelta(days=7)).strftime("%m/%d/%Y")
self.studio_course_outline.change_problem_due_date(last_week)
self.logout_page.visit()
auto_auth(self.browser, self.USERNAME, self.EMAIL, False, self.course_id)
self.courseware_page.visit()
self.assertTrue(self.courseware_page.content_hidden_past_due_date())
self.progress_page.visit()
self.assertEqual(self.progress_page.scores('Test Section 1', 'Test Subsection 1'), [(0, 1)])
| fintech-circle/edx-platform | common/test/acceptance/tests/lms/test_lms_courseware.py | Python | agpl-3.0 | 40,066 | [
"VisIt"
] | 15d7a8557a3f1387c20696a440e975c8dcbe74345bf9fc466e5b59a2ac084b3c |
"""
Demonstration application for range search using kd tree.
Left mouse adds point.
Right mouse click begins drag of rectangle.
"""
import tkinter
from adk.kd import KDTree, X, Y, VERTICAL
from adk.region import Region, minValue, maxValue
RectangleSize = 4
class KDTreeApp:
def __init__(self):
"""App for creating KD tree dynamically and executing range queries."""
self.tree = KDTree()
self.static = False
# for range query
self.selectedRegion = None
self.queryRect = None
self.master = tkinter.Tk()
self.master.title('KD Tree Range Query Application')
self.w = tkinter.Frame(self.master, width=410, height=410)
self.canvas = tkinter.Canvas(self.w, width=400, height=400)
self.paint()
self.canvas.bind("<Button-1>", self.click)
self.canvas.bind("<Motion>", self.moved)
self.canvas.bind("<Button-3>", self.range) # when right mouse clicked
self.canvas.bind("<ButtonRelease-3>", self.clear)
self.canvas.bind("<B3-Motion>", self.range) # only when right mouse dragged
self.w.pack()
def toCartesian(self, y):
"""Convert tkinter point into Cartesian."""
return self.w.winfo_height() - y
def toTk(self,y):
"""Convert Cartesian into tkinter point."""
if y == maxValue: return 0
tk_y = self.w.winfo_height()
if y != minValue:
tk_y -= y
return tk_y
def clear(self, event):
"""End of range search."""
self.selectedRegion = None
self.paint()
def range(self, event):
"""Initiate a range search using a selected rectangular region."""
p = (event.x, self.toCartesian(event.y))
if self.selectedRegion is None:
self.selectedStart = Region(p[X],p[Y], p[X],p[Y])
self.selectedRegion = self.selectedStart.unionPoint(p)
self.paint()
# return (node,status) where status is True if draining entire tree rooted at node. Draw these
# as shaded red rectangle to identify whole sub-tree is selected.
for pair in self.tree.range(self.selectedRegion):
p = pair[0].point
if pair[1]:
self.canvas.create_rectangle(pair[0].region.x_min, self.toTk(pair[0].region.y_min),
pair[0].region.x_max, self.toTk(pair[0].region.y_max),
fill='Red', stipple='gray12')
else:
self.canvas.create_rectangle(p[X] - RectangleSize, self.toTk(p[Y]) - RectangleSize,
p[X] + RectangleSize, self.toTk(p[Y]) + RectangleSize, fill='Red')
self.queryRect = self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min),
self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max),
outline='Red', dash=(2, 4))
def moved(self, event):
"""Only here for static option."""
if self.static:
self.paint()
def click(self, event):
"""Add point to KDtree."""
p = (event.x, self.toCartesian(event.y))
self.tree.add(p)
self.paint()
def drawPartition (self, r, p, orient):
"""Draw partitioning line and points itself as a small square."""
if orient == VERTICAL:
self.canvas.create_line(p[X], self.toTk(r.y_min), p[X], self.toTk(r.y_max))
else:
xlow = r.x_min
if r.x_min <= minValue: xlow = 0
xhigh = r.x_max
if r.x_max >= maxValue: xhigh = self.w.winfo_width()
self.canvas.create_line(xlow, self.toTk(p[Y]), xhigh, self.toTk(p[Y]))
self.canvas.create_rectangle(p[X] - RectangleSize, self.toTk(p[Y]) - RectangleSize,
p[X] + RectangleSize, self.toTk(p[Y]) + RectangleSize, fill='Black')
def visit (self, n):
""" Visit node to paint properly."""
if n == None: return
self.drawPartition(n.region, n.point, n.orient)
self.visit (n.below)
self.visit (n.above)
def prepare(self, event):
"""prepare to add points."""
if self.label:
self.label.destroy()
self.label = None
self.canvas.pack()
def paint(self):
"""Paint quad tree by visiting all nodes, or show introductory message."""
if self.tree.root:
self.canvas.delete(tkinter.ALL)
self.visit(self.tree.root)
else:
self.label = tkinter.Label(self.w, width=100, height = 40, text="Click To Add Points")
self.label.bind("<Button-1>", self.prepare)
self.label.pack()
if __name__ == "__main__":
app = KDTreeApp()
app.w.mainloop()
| heineman/algorithms-nutshell-2ed | PythonCode/demo/app_kd_range.py | Python | mit | 5,297 | [
"VisIt"
] | b313ebe01dd8a84cfc1090ce0976414859b8ac00bff566bb693fcde61a83cb24 |
from numpy import array_split, concatenate
from pandas import DataFrame
from ._match_randomly_sampled_target_and_data_to_compute_margin_of_errors import (
_match_randomly_sampled_target_and_data_to_compute_margin_of_errors,
)
from ._match_target_and_data import _match_target_and_data
from ._permute_target_and_match_target_and_data import (
_permute_target_and_match_target_and_data,
)
from .compute_empirical_p_values_and_fdrs import compute_empirical_p_values_and_fdrs
from .multiprocess import multiprocess
from .select_series_indices import select_series_indices
def _match(
target,
data,
n_job,
match_function,
n_required_for_match_function,
raise_for_n_less_than_required,
n_extreme,
fraction_extreme,
random_seed,
n_sampling,
n_permutation,
):
score_moe_p_value_fdr = DataFrame(columns=("Score", "0.95 MoE", "P-Value", "FDR"))
n_job = min(data.shape[0], n_job)
print(
"Computing score using {} with {} process ...".format(
match_function.__name__, n_job
)
)
data_split = array_split(data, n_job)
score_moe_p_value_fdr["Score"] = concatenate(
multiprocess(
_match_target_and_data,
(
(
target,
data_,
match_function,
n_required_for_match_function,
raise_for_n_less_than_required,
)
for data_ in data_split
),
n_job,
)
)
if n_extreme is None and fraction_extreme is None:
indices = select_series_indices(
score_moe_p_value_fdr["Score"],
"<>",
n=n_extreme,
fraction=fraction_extreme,
plot=False,
)
score_moe_p_value_fdr.loc[
indices, "0.95 MoE"
] = _match_randomly_sampled_target_and_data_to_compute_margin_of_errors(
target,
data[indices],
random_seed,
n_sampling,
match_function,
n_required_for_match_function,
raise_for_n_less_than_required,
)
score_moe_p_value_fdr[["P-Value", "FDR"]] = compute_empirical_p_values_and_fdrs(
score_moe_p_value_fdr["Score"],
concatenate(
multiprocess(
_permute_target_and_match_target_and_data,
(
(
target,
data_,
random_seed,
n_permutation,
match_function,
n_required_for_match_function,
raise_for_n_less_than_required,
)
for data_ in data_split
),
n_job,
)
).flatten(),
"less_or_great",
raise_for_bad=False,
)
return score_moe_p_value_fdr
| UCSD-CCAL/ccal | ccal/_match.py | Python | mit | 3,073 | [
"MOE"
] | 2117e9089d34a63cd5bf80305b60ed030486f83c13cc2227e8a33bf195647dde |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Matcher classes to be used inside of the testtools assertThat framework."""
import pprint
from lxml import etree
import six
from testtools import content
import testtools.matchers
class DictKeysMismatch(object):
def __init__(self, d1only, d2only):
self.d1only = d1only
self.d2only = d2only
def describe(self):
return ('Keys in d1 and not d2: %(d1only)s.'
' Keys in d2 and not d1: %(d2only)s' %
{'d1only': self.d1only, 'd2only': self.d2only})
def get_details(self):
return {}
class DictMismatch(object):
def __init__(self, key, d1_value, d2_value):
self.key = key
self.d1_value = d1_value
self.d2_value = d2_value
def describe(self):
return ("Dictionaries do not match at %(key)s."
" d1: %(d1_value)s d2: %(d2_value)s" %
{'key': self.key, 'd1_value': self.d1_value,
'd2_value': self.d2_value})
def get_details(self):
return {}
class DictMatches(object):
def __init__(self, d1, approx_equal=False, tolerance=0.001):
self.d1 = d1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictMatches(%s)' % (pprint.pformat(self.d1))
# Useful assertions
def match(self, d2):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
d1keys = set(self.d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = sorted(d1keys - d2keys)
d2only = sorted(d2keys - d1keys)
return DictKeysMismatch(d1only, d2only)
for key in d1keys:
d1value = self.d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= self.tolerance
except (ValueError, TypeError):
# If both values aren't convertible to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
matcher = DictMatches(d1value)
did_match = matcher.match(d2value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (d1value, d2value):
continue
elif self.approx_equal and within_tolerance:
continue
elif d1value != d2value:
return DictMismatch(key, d1value, d2value)
class ListLengthMismatch(object):
def __init__(self, len1, len2):
self.len1 = len1
self.len2 = len2
def describe(self):
return ('Length mismatch: len(L1)=%(len1)d != '
'len(L2)=%(len2)d' % {'len1': self.len1, 'len2': self.len2})
def get_details(self):
return {}
class DictListMatches(object):
def __init__(self, l1, approx_equal=False, tolerance=0.001):
self.l1 = l1
self.approx_equal = approx_equal
self.tolerance = tolerance
def __str__(self):
return 'DictListMatches(%s)' % (pprint.pformat(self.l1))
# Useful assertions
def match(self, l2):
"""Assert a list of dicts are equivalent."""
l1count = len(self.l1)
l2count = len(l2)
if l1count != l2count:
return ListLengthMismatch(l1count, l2count)
for d1, d2 in zip(self.l1, l2):
matcher = DictMatches(d2,
approx_equal=self.approx_equal,
tolerance=self.tolerance)
did_match = matcher.match(d1)
if did_match:
return did_match
class SubDictMismatch(object):
def __init__(self,
key=None,
sub_value=None,
super_value=None,
keys=False):
self.key = key
self.sub_value = sub_value
self.super_value = super_value
self.keys = keys
def describe(self):
if self.keys:
return "Keys between dictionaries did not match"
else:
return("Dictionaries do not match at %s. d1: %s d2: %s"
% (self.key,
self.super_value,
self.sub_value))
def get_details(self):
return {}
class IsSubDictOf(object):
def __init__(self, super_dict):
self.super_dict = super_dict
def __str__(self):
return 'IsSubDictOf(%s)' % (self.super_dict)
def match(self, sub_dict):
"""Assert a sub_dict is subset of super_dict."""
if not set(sub_dict.keys()).issubset(set(self.super_dict.keys())):
return SubDictMismatch(keys=True)
for k, sub_value in sub_dict.items():
super_value = self.super_dict[k]
if isinstance(sub_value, dict):
matcher = IsSubDictOf(super_value)
did_match = matcher.match(sub_value)
if did_match is not None:
return did_match
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
if sub_value != super_value:
return SubDictMismatch(k, sub_value, super_value)
class FunctionCallMatcher(object):
def __init__(self, expected_func_calls):
self.expected_func_calls = expected_func_calls
self.actual_func_calls = []
def call(self, *args, **kwargs):
func_call = {'args': args, 'kwargs': kwargs}
self.actual_func_calls.append(func_call)
def match(self):
dict_list_matcher = DictListMatches(self.expected_func_calls)
return dict_list_matcher.match(self.actual_func_calls)
class XMLMismatch(object):
"""Superclass for XML mismatch."""
def __init__(self, state):
self.path = str(state)
self.expected = state.expected
self.actual = state.actual
def describe(self):
return "%(path)s: XML does not match" % {'path': self.path}
def get_details(self):
return {
'expected': content.text_content(self.expected),
'actual': content.text_content(self.actual),
}
class XMLDocInfoMismatch(XMLMismatch):
"""XML version or encoding doesn't match."""
def __init__(self, state, expected_doc_info, actual_doc_info):
super(XMLDocInfoMismatch, self).__init__(state)
self.expected_doc_info = expected_doc_info
self.actual_doc_info = actual_doc_info
def describe(self):
return ("%(path)s: XML information mismatch(version, encoding) "
"expected version %(expected_version)s, "
"expected encoding %(expected_encoding)s; "
"actual version %(actual_version)s, "
"actual encoding %(actual_encoding)s" %
{'path': self.path,
'expected_version': self.expected_doc_info['version'],
'expected_encoding': self.expected_doc_info['encoding'],
'actual_version': self.actual_doc_info['version'],
'actual_encoding': self.actual_doc_info['encoding']})
class XMLTagMismatch(XMLMismatch):
"""XML tags don't match."""
def __init__(self, state, idx, expected_tag, actual_tag):
super(XMLTagMismatch, self).__init__(state)
self.idx = idx
self.expected_tag = expected_tag
self.actual_tag = actual_tag
def describe(self):
return ("%(path)s: XML tag mismatch at index %(idx)d: "
"expected tag <%(expected_tag)s>; "
"actual tag <%(actual_tag)s>" %
{'path': self.path, 'idx': self.idx,
'expected_tag': self.expected_tag,
'actual_tag': self.actual_tag})
class XMLAttrKeysMismatch(XMLMismatch):
"""XML attribute keys don't match."""
def __init__(self, state, expected_only, actual_only):
super(XMLAttrKeysMismatch, self).__init__(state)
self.expected_only = ', '.join(sorted(expected_only))
self.actual_only = ', '.join(sorted(actual_only))
def describe(self):
return ("%(path)s: XML attributes mismatch: "
"keys only in expected: %(expected_only)s; "
"keys only in actual: %(actual_only)s" %
{'path': self.path, 'expected_only': self.expected_only,
'actual_only': self.actual_only})
class XMLAttrValueMismatch(XMLMismatch):
"""XML attribute values don't match."""
def __init__(self, state, key, expected_value, actual_value):
super(XMLAttrValueMismatch, self).__init__(state)
self.key = key
self.expected_value = expected_value
self.actual_value = actual_value
def describe(self):
return ("%(path)s: XML attribute value mismatch: "
"expected value of attribute %(key)s: %(expected_value)r; "
"actual value: %(actual_value)r" %
{'path': self.path, 'key': self.key,
'expected_value': self.expected_value,
'actual_value': self.actual_value})
class XMLTextValueMismatch(XMLMismatch):
"""XML text values don't match."""
def __init__(self, state, expected_text, actual_text):
super(XMLTextValueMismatch, self).__init__(state)
self.expected_text = expected_text
self.actual_text = actual_text
def describe(self):
return ("%(path)s: XML text value mismatch: "
"expected text value: %(expected_text)r; "
"actual value: %(actual_text)r" %
{'path': self.path, 'expected_text': self.expected_text,
'actual_text': self.actual_text})
class XMLUnexpectedChild(XMLMismatch):
"""Unexpected child present in XML."""
def __init__(self, state, tag, idx):
super(XMLUnexpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML unexpected child element <%(tag)s> "
"present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLExpectedChild(XMLMismatch):
"""Expected child not present in XML."""
def __init__(self, state, tag, idx):
super(XMLExpectedChild, self).__init__(state)
self.tag = tag
self.idx = idx
def describe(self):
return ("%(path)s: XML expected child element <%(tag)s> "
"not present at index %(idx)d" %
{'path': self.path, 'tag': self.tag, 'idx': self.idx})
class XMLMatchState(object):
"""Maintain some state for matching.
Tracks the XML node path and saves the expected and actual full
XML text, for use by the XMLMismatch subclasses.
"""
def __init__(self, expected, actual):
self.path = []
self.expected = expected
self.actual = actual
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, exc_tb):
self.path.pop()
return False
def __str__(self):
return '/' + '/'.join(self.path)
def node(self, tag, idx):
"""Adds tag and index to the path; they will be popped off when
the corresponding 'with' statement exits.
:param tag: The element tag
:param idx: If not None, the integer index of the element
within its parent. Not included in the path
element if None.
"""
if idx is not None:
self.path.append("%s[%d]" % (tag, idx))
else:
self.path.append(tag)
return self
class XMLMatches(object):
"""Compare XML strings. More complete than string comparison."""
SKIP_TAGS = (etree.Comment, etree.ProcessingInstruction)
def __init__(self, expected, allow_mixed_nodes=False,
skip_empty_text_nodes=True, skip_values=('DONTCARE',)):
self.expected_xml = expected
self.expected = etree.parse(six.StringIO(expected))
self.allow_mixed_nodes = allow_mixed_nodes
self.skip_empty_text_nodes = skip_empty_text_nodes
self.skip_values = set(skip_values)
def __str__(self):
return 'XMLMatches(%r)' % self.expected_xml
def match(self, actual_xml):
actual = etree.parse(six.StringIO(actual_xml))
state = XMLMatchState(self.expected_xml, actual_xml)
expected_doc_info = self._get_xml_docinfo(self.expected)
actual_doc_info = self._get_xml_docinfo(actual)
if expected_doc_info != actual_doc_info:
return XMLDocInfoMismatch(state, expected_doc_info,
actual_doc_info)
result = self._compare_node(self.expected.getroot(),
actual.getroot(), state, None)
if result is False:
return XMLMismatch(state)
elif result is not True:
return result
@staticmethod
def _get_xml_docinfo(xml_document):
return {'version': xml_document.docinfo.xml_version,
'encoding': xml_document.docinfo.encoding}
def _compare_text_nodes(self, expected, actual, state):
expected_text = [expected.text]
expected_text.extend(child.tail for child in expected)
actual_text = [actual.text]
actual_text.extend(child.tail for child in actual)
if self.skip_empty_text_nodes:
expected_text = [text for text in expected_text
if text and not text.isspace()]
actual_text = [text for text in actual_text
if text and not text.isspace()]
if self.skip_values.intersection(
expected_text + actual_text):
return
if self.allow_mixed_nodes:
# lets sort text nodes because they can be mixed
expected_text = sorted(expected_text)
actual_text = sorted(actual_text)
if expected_text != actual_text:
return XMLTextValueMismatch(state, expected_text, actual_text)
def _compare_node(self, expected, actual, state, idx):
"""Recursively compares nodes within the XML tree."""
# Start by comparing the tags
if expected.tag != actual.tag:
return XMLTagMismatch(state, idx, expected.tag, actual.tag)
with state.node(expected.tag, idx):
# Compare the attribute keys
expected_attrs = set(expected.attrib.keys())
actual_attrs = set(actual.attrib.keys())
if expected_attrs != actual_attrs:
expected_only = expected_attrs - actual_attrs
actual_only = actual_attrs - expected_attrs
return XMLAttrKeysMismatch(state, expected_only, actual_only)
# Compare the attribute values
for key in expected_attrs:
expected_value = expected.attrib[key]
actual_value = actual.attrib[key]
if self.skip_values.intersection(
[expected_value, actual_value]):
continue
elif expected_value != actual_value:
return XMLAttrValueMismatch(state, key, expected_value,
actual_value)
# Compare text nodes
text_nodes_mismatch = self._compare_text_nodes(
expected, actual, state)
if text_nodes_mismatch:
return text_nodes_mismatch
# Compare the contents of the node
matched_actual_child_idxs = set()
# first_actual_child_idx - pointer to next actual child
# used with allow_mixed_nodes=False ONLY
# prevent to visit actual child nodes twice
first_actual_child_idx = 0
for expected_child in expected:
if expected_child.tag in self.SKIP_TAGS:
continue
related_actual_child_idx = None
if self.allow_mixed_nodes:
first_actual_child_idx = 0
for actual_child_idx in range(
first_actual_child_idx, len(actual)):
if actual[actual_child_idx].tag in self.SKIP_TAGS:
first_actual_child_idx += 1
continue
if actual_child_idx in matched_actual_child_idxs:
continue
# Compare the nodes
result = self._compare_node(expected_child,
actual[actual_child_idx],
state, actual_child_idx)
first_actual_child_idx += 1
if result is not True:
if self.allow_mixed_nodes:
continue
else:
return result
else: # nodes match
related_actual_child_idx = actual_child_idx
break
if related_actual_child_idx is not None:
matched_actual_child_idxs.add(actual_child_idx)
else:
return XMLExpectedChild(state, expected_child.tag,
actual_child_idx + 1)
# Make sure we consumed all nodes in actual
for actual_child_idx, actual_child in enumerate(actual):
if (actual_child.tag not in self.SKIP_TAGS and
actual_child_idx not in matched_actual_child_idxs):
return XMLUnexpectedChild(state, actual_child.tag,
actual_child_idx)
# The nodes match
return True
class EncodedByUTF8(object):
def match(self, obj):
if isinstance(obj, six.binary_type):
if hasattr(obj, "decode"):
try:
obj.decode("utf-8")
except UnicodeDecodeError:
return testtools.matchers.Mismatch(
"%s is not encoded in UTF-8." % obj)
else:
reason = ("Type of '%(obj)s' is '%(obj_type)s', "
"should be '%(correct_type)s'."
% {
"obj": obj,
"obj_type": type(obj).__name__,
"correct_type": six.binary_type.__name__
})
return testtools.matchers.Mismatch(reason)
| HybridF5/nova | nova/tests/unit/matchers.py | Python | apache-2.0 | 19,775 | [
"VisIt"
] | 005aa1f93d3777c7ee47476571af2276bf5aedb54611f299c3ea0de719bc7b7b |
#!/bin/python2
if __name__ == "__main__": #if run in standalone
from sys import exit
import argparse
from libs.InAndOut import importer, saveKey
parser = argparse.ArgumentParser(description='Offline analysis for\
spiking neural networks')
#Files to open
parser.add_argument('--store-dir',
help = 'Path of the store dir',
dest = 'path'
)
parser.add_argument('--general-hash',
help = 'General hash file. Always needed',
dest = 'general'
)
parser.add_argument('--config-hash',
help = 'Hash of the neural net file that needs to be opened.',
dest = 'config_hash'
)
#Analysis to run
parser.add_argument('--all',
help = 'Run all available analysis (all the possible for the input)',
action = 'store_true'
)
parser.add_argument('--phase',
action = 'store_true',
help = 'Check the phase distance. Needs membrane data'
# FIXME: enable phase distance on spikes
)
parser.add_argument('--frequency',
action = 'store_true',
help = 'Return the fundamental frequency for each neuron. Needs membrane data')
# FIXME: enable this on spikes
parser.add_argument('--duty',
action = 'store_true',
help = 'Return the duty cycle for each neurons. Needs membrane data')
# FIXME: enable this on spikes
parser.add_argument('--update-json',
action = 'store_true',
help = 'Save data to json (for historyGUI)',
default = False)
parser.add_argument('--batch',
action = 'store_true',
help = 'Suppress output, save to file',
default = False)
args = parser.parse_args()
batch = args.batch
if not args.path:
print "WARNING:\tYou must define the store dir! Using general_config default"
try:
import general_config
args.path = general_config._history_dir
except:
raise #debug file not found etc.
exit("FATAL:\t\tCannot import general config. Define a store dir, please")
#Check required
if not args.general:
exit("FATAL:\t\tYou need to define a general config file hash")
#TODO: copy this for other analysis. Make a function if it get boring
if args.phase or args.duty or args.frequency or args.all:
if not args.config_hash:
exit("ERROR:\t\tto run this analysis, you must provide the config hash")
else:
membrane_file = args.path + '/.' + args.general + args.config_hash + '_membrane'
Vm_dict = importer(membrane_file)
if args.config_hash:
if not batch:
print not batch, "INFO: Opened file %s" % membrane_file
print not batch, "INFO: This is a membrane file."
print batch, "INFO: There are %s neurons in this net, and %s samples (ms)" % (len(Vm_dict), len(Vm_dict[0]))
if args.phase or args.duty or args.frequency or args.all:
#Run import only if needed
import plugins.analysis.membrane as membrane_analysis
results = membrane_analysis.analyzeMembrane(Vm_dict)
if args.all:
print(not batch, results)
else:
if args.phase:
print(not batch, results["phase"])
if args.fundamental:
print(not batch, results["fundamental"])
if args.duty_cycle:
print(not batch, results["duty_cycle"])
saveKey(args.general + args.config_hash + "_analysis", results, out_dir = args.path)
if args.update_json:
import json
data_to_json = []
for n in Vm_dict:
x = 0
neuron = []
for y in Vm_dict[n]:
point = {"x":x,"y":y}
x+=1
neuron.append(point)
data_to_json.append(neuron)
with open('./historyGUI/data.json', 'w') as outfile:
json.dump(data_to_json, outfile)
| nico202/pyNeMo | Analyze.py | Python | gpl-2.0 | 4,060 | [
"NEURON"
] | da4690941be05e906557a8cb554b9724687d368f5eeb905be6fe410c8f9d5462 |
# petclaw to vtk
import os
import numpy as np
from petsc4py import PETSc
import pickle
import glob
import shutil
def post_calculation():
pass
class IO(object):
def read_petsc(self):
if hasattr(self, 'frame'): frame = self.frame
if hasattr(self,'file_prefix'): file_prefix = self.file_prefix
if hasattr(self, 'path'): path = self.path
if hasattr(self, 'write_aux'): write_aux = self.write_aux
if hasattr(self, 'write_aux'): read_aux = self.read_aux
if hasattr(self, 'write_p'): write_p = self.write_p
pickle_filename = os.path.join(path, '%s.pkl' % file_prefix) + str(frame).zfill(4)
viewer_filename = os.path.join(path, '%s.ptc' % file_prefix) + str(frame).zfill(4)
aux_viewer_filename1 = os.path.join(path, '%s_aux.ptc' % file_prefix) + str(frame).zfill(4)
aux_viewer_filename2 = os.path.join(path, '%s_aux.ptc' % file_prefix) + str(0).zfill(4)
if os.path.exists(aux_viewer_filename1):
aux_viewer_filename = aux_viewer_filename1
else:
aux_viewer_filename = aux_viewer_filename2
pickle_file = open(pickle_filename,'rb')
# this dictionary is mostly holding debugging information, only nstates is needed
# most of this information is explicitly saved in the individual patches
value_dict = pickle.load(pickle_file)
nstates = value_dict['nstates']
num_dim = value_dict['num_dim']
num_aux = value_dict['num_aux']
num_eqn = value_dict['num_eqn']
self.__setattr__('num_dim',num_dim)
self.__setattr__('num_aux',num_aux)
self.__setattr__('num_eqn',num_eqn)
# now set up the PETSc viewer (assuming binary)
viewer = PETSc.Viewer().createBinary(viewer_filename, PETSc.Viewer.Mode.READ)
if read_aux:
aux_viewer = PETSc.Viewer().createBinary(aux_viewer_filename, PETSc.Viewer.Mode.READ)
patches = []
for m in xrange(nstates):
patch_dict = pickle.load(pickle_file)
level = patch_dict['level']
names = patch_dict['names']
lower = patch_dict['lower']
n = patch_dict['num_cells']
d = patch_dict['delta']
from clawpack import petclaw
dimensions = []
for i in xrange(num_dim):
dimensions.append(
petclaw.Dimension(names[i],lower[i],lower[i] + n[i]*d[i],n[i]))
patch = petclaw.Patch(dimensions)
self.__setattr__('_patch',patch)
if num_dim==1:
self.__setattr__('x',patch.x)
elif num_dim==2:
self.__setattr__('x',patch.x)
self.__setattr__('y',patch.y)
elif num_dim == 3:
self.__setattr__('y',patch.y)
self.__setattr__('z',path.z)
self.__setattr__('num_cells',patch.num_cells_global)
claw = petclaw.State(patch,num_eqn,num_aux) ##
self.__setattr__('_claw',claw)
self.t = value_dict['t']
self.problem_data = value_dict['problem_data']
self.nstates = value_dict['nstates']
self._claw.gqVec.load(viewer)
if read_aux:
self._claw.gauxVec.load(aux_viewer)
self.__setattr__('aux',self._claw.aux)
self.__setattr__('q',self._claw.q)
self.__setattr__('frame',frame)
self.__setattr__('file_prefix',file_prefix)
self.__setattr__('read_aux', read_aux)
self.__setattr__('write_aux', write_aux)
self.__setattr__('write_p', write_p)
self.__setattr__('path', path)
return self
def write_vtk(self):
if hasattr(self, 'frame'): frame = self.frame
if hasattr(self,'file_prefix'): file_prefix = self.file_prefix
if hasattr(self, 'path'): path = self.path
if hasattr(self, 'write_aux'): write_aux = self.write_aux
if hasattr(self, 'write_p'): write_p = self.write_p
if hasattr(self, 'q'):
viewer_filename = os.path.join(path, file_prefix+str(frame).zfill(4)+'.vtk')
viewer = PETSc.Viewer().createASCII(viewer_filename, PETSc.Viewer.Mode.WRITE, format=PETSc.Viewer.Format.ASCII_VTK)
self.gqVec.view(viewer)
if write_aux:
self.gauxVec.view(aux_viewer)
viewer.flush()
viewer.destroy()
if write_aux:
aux_viewer.flush()
aux_viewer.destroy()
def q_to_vtk(self):
nx,ny = self.num_cells
coordinates = [self.x.centers,
self.y.centers,
np.ones(1),
]
dimensions = (nx, ny, 1)
if hasattr(self, 'frame'): frame = self.frame
if hasattr(self,'file_prefix'): file_prefix = self.file_prefix
if hasattr(self, 'path'): path = self.path
scalars = [("Q1", self.q[0]),
("Q2", self.q[1]),
("Q3", self.q[2])]
vectors = []
title = 'VTK Data'
filename = os.path.join(path, file_prefix+str(frame).zfill(4)+'.vtk')
fh = open(filename, 'wb')
fh_write = lambda s: fh.write(s.encode('ascii'))
header = '# vtk DataFile Version %d.%d'
version = (2, 0)
fh_write(header % version)
fh_write('\n')
title = title
fh_write(title[:255])
fh_write('\n')
format = 'BINARY'
fh_write(format)
fh_write('\n')
dataset_type = 'RECTILINEAR_GRID'
fh_write('DATASET %s' % dataset_type);
fh_write('\n')
fh_write('DIMENSIONS %d %d %d' % dimensions)
fh_write('\n')
for X, array in zip("XYZ", coordinates):
label = X+'_COORDINATES'
fh_write('%s %s %s' % (label, len(array), 'double'))
fh_write('\n')
array.astype('>d').tofile(fh)
fh_write('\n')
data_type = 'POINT_DATA'
fh_write('%s %d' % (data_type, np.prod(dimensions)))
fh_write('\n')
for i, (name, array) in enumerate(scalars):
attr_type = 'SCALARS'
attr_name = name or (attr_type.lower() + str(i))
attr_name = attr_name.replace(' ', '_')
fh_write('%s %s %s' %(attr_type, attr_name, 'double'))
fh_write('\n')
lookup_table = 'default'
lookup_table = lookup_table.replace(' ', '_')
fh_write('LOOKUP_TABLE %s' % lookup_table)
fh_write('\n')
array.astype('>d').tofile(fh)
fh_write('\n')
for i, (name, array) in enumerate(vectors):
attr_type = 'VECTORS'
attr_name = name or (attr_type.lower() + str(i))
attr_name = attr_name.replace(' ', '_')
fh_write('%s %s %s' %(attr_type, attr_name, 'double'))
fh_write('\n')
array.astype('>d').tofile(fh)
fh_write('\n')
fh.flush()
fh.close()
def copy_files(self,src_file,dst_file):
pass
def __init__(self,frame=0,file_prefix='claw',path='./',write_aux=False,write_p=False,read_aux=False):
self.frame=frame
self.file_prefix=file_prefix
self.path=path
self.read_aux = read_aux
self.write_aux = write_aux
self.write_p = write_p
self.write_postprocess = False
self.postprocess = post_calculation()
| nthakkar/emclaw | utils/inout.py | Python | gpl-2.0 | 7,567 | [
"VTK"
] | a3ab295e6bba4cfb2df785c2a03958ac9aa2984edc04466532455a9f6b747d60 |
#!/usr/bin/env python
# -*- coding: iso-8859-1 -*-
###########################################################################
# ESPResSo++ #
# Test script for Converting GROMACS tabulated file #
# #
###########################################################################
import sys
import time
import os
import espressopp
import mpi4py.MPI as MPI
import math
import logging
import os
from espressopp import Real3D, Int3D
from espressopp.tools.convert import gromacs
# Input values for system
N = 10 # box size
size = (float(N), float(N), float(N))
numParticles = N**3 # number of particles
nsteps = 1000 # number of steps
cutoff = 2.5 # cutoff for LJ potential
tabfile = "pot-lj-esp.tab" # filename for tabulated potential
skin = 0.3 # skin for Verlet lists
spline = 2 # interpolation spline type
# parameters to convert GROMACS tabulated potential file
filein = "table6-12.xvg" # gromacs tabulated file to be converted
fileout = "pot-lj-gro.tab" # filename of output file
sigma = 1.0
epsilon = 1.0
c6 = 4.0
c12 = 4.0
files = [tabfile, fileout] # run simulation on these files
######################################################################
## IT SHOULD BE UNNECESSARY TO MAKE MODIFICATIONS BELOW THIS LINE ##
######################################################################
print '\n-- GROMACS Tabulated File Conversion Test -- \n'
print 'Steps: %3s' % nsteps
print 'Particles: %3s' % numParticles
print 'Cutoff: %3s' % cutoff
# writes the tabulated potential file
def writeTabFile(pot, name, N, low=0.0, high=2.5, body=2):
outfile = open(name, "w")
delta = (high - low) / (N - 1)
for i in range(N):
r = low + i * delta
energy = pot.computeEnergy(r)
if body == 2:# this is for 2-body potentials
force = pot.computeForce(Real3D(r, 0.0, 0.0))[0]
else: # this is for 3- and 4-body potentials
force = pot.computeForce(r)
outfile.write("%15.8g %15.8g %15.8g\n"%(r, energy, force))
outfile.close()
# write the espressopp++ tabulated file for a LJ potential
print 'Generating potential file ... (%2s)' % tabfile
potLJ = espressopp.interaction.LennardJones(epsilon=1.0, sigma=1.0, shift=0.0, cutoff=cutoff)
writeTabFile(potLJ, tabfile, N=1500, low=0.01, high=potLJ.cutoff)
# convert gromacs tabulated file to espressopp++ format
print 'Converting GROMACS file to ESPResSo++ file ... (%2s -> %2s)' % (filein, fileout)
gromacs.convertTable(filein, fileout, sigma, epsilon, c6, c12)
#exit() # exit if you just want to convert a file
# compute the number of cells on each node
def calcNumberCells(size, nodes, cutoff):
ncells = 1
while size / (ncells * nodes) >= cutoff:
ncells = ncells + 1
return ncells - 1
#start_time = time.clock()
# run simulation for all tabulated potential files
for potfile in files:
print '\nUsing file: %0s'% potfile
# set up system
system = espressopp.System()
system.rng = espressopp.esutil.RNG()
system.bc = espressopp.bc.OrthorhombicBC(system.rng, size)
system.skin = skin
comm = MPI.COMM_WORLD
nodeGrid = Int3D(1, 1, comm.size)
cellGrid = Int3D(
calcNumberCells(size[0], nodeGrid[0], cutoff),
calcNumberCells(size[1], nodeGrid[1], cutoff),
calcNumberCells(size[2], nodeGrid[2], cutoff)
)
system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid)
pid = 0
for i in range(N):
for j in range(N):
for k in range(N):
m = (i + 2*j + 3*k) % 11
r = 0.45 + m * 0.01
x = (i + r) / N * size[0]
y = (j + r) / N * size[1]
z = (k + r) / N * size[2]
x = 1.0 * i
y = 1.0 * j
z = 1.0 * k
system.storage.addParticle(pid, Real3D(x, y, z))
# not yet: dd.setVelocity(id, (1.0, 0.0, 0.0))
pid = pid + 1
system.storage.decompose()
# integrator
integrator = espressopp.integrator.VelocityVerlet(system)
integrator.dt = 0.005
# now build Verlet List
# ATTENTION: you must not add the skin explicitly here
logging.getLogger("Interpolation").setLevel(logging.INFO)
vl = espressopp.VerletList(system, cutoff = cutoff + system.skin)
potTab = espressopp.interaction.Tabulated(itype=spline, filename=potfile, cutoff=cutoff)
# ATTENTION: auto shift was enabled
interTab = espressopp.interaction.VerletListTabulated(vl)
interTab.setPotential(type1=0, type2=0, potential=potTab)
system.addInteraction(interTab)
temp = espressopp.analysis.Temperature(system)
press = espressopp.analysis.Pressure(system)
temperature = temp.compute()
p = press.compute()
Ek = 0.5 * temperature * (3 * numParticles)
Ep = interTab.computeEnergy()
print 'Start %5s: tot energy = %10.3f pot = %10.3f kin = %10.3f temp = %10.3f p = %10.3f' \
% ("", Ek + Ep, Ep, Ek, temperature, p)
# langevin thermostat
langevin = espressopp.integrator.LangevinThermostat(system)
integrator.addExtension(langevin)
langevin.gamma = 1.0
langevin.temperature = 1.0
integrator.run(nsteps)
temperature = temp.compute()
p = press.compute()
Ek = 0.5 * temperature * (3 * numParticles)
Ep = interTab.computeEnergy()
print 'Step %6d: tot energy = %10.3f pot = %10.3f kin = %10.3f temp = %10.3f p = %10.3f' % \
(nsteps, Ek + Ep, Ep, Ek, temperature, p)
os.system('rm '+potfile) # remove file
print '\nDone.'
| junghans/espressopp | examples/convert_gromacs_tables/convert_gromacs_table.py | Python | gpl-3.0 | 6,166 | [
"ESPResSo",
"Gromacs"
] | dcd8f15e6d2e174ddc7f03a4b8144258fe7138c76d29c89dbd0b38a784163af1 |
from __future__ import absolute_import, division, print_function
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
| JWDebelius/scikit-bio | skbio/util/_exception.py | Python | bsd-3-clause | 419 | [
"scikit-bio"
] | 995eabacd5eb14d720e370bd6d2608b3f47d6daeb9f2daafbe06b65ccd909406 |
#pylint: disable=missing-docstring
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import math
import vtk
from .ImageAnnotationSource import ImageAnnotationSource
from .. import base
class ImageAnnotation(base.ChiggerResult):
"""
Result object for displaying images in 3D space.
Inputs:
key,value pairs set the options for this object.
"""
@staticmethod
def getOptions():
"""
Return the default options for this object.
"""
opt = base.ChiggerResult.getOptions()
opt.add('position', (0.5, 0.5), "The position of the image center within the viewport, in "
"relative coordinates.", vtype=tuple)
opt.add('width', 0.25, "The logo width as a fraction of the window width, this is ignored "
"if 'scale' option is set.")
opt.add('horizontal_alignment', 'center', "The position horizontal position alignment.",
allow=['left', 'center', 'right'])
opt.add('vertical_alignment', 'center', "The position vertical position alignment.",
allow=['bottom', 'center', 'top'])
opt.add('scale', None, "The scale of the image. (By default the image is scaled by the "
"width.)", vtype=float)
opt += ImageAnnotationSource.getOptions()
return opt
def __init__(self, **kwargs):
super(ImageAnnotation, self).__init__(ImageAnnotationSource(), **kwargs)
self._vtkcamera = self._vtkrenderer.MakeCamera()
self._vtkrenderer.SetInteractive(False)
def update(self, **kwargs):
"""
Updates the 3D camera to place the image in the defined location.
"""
super(ImageAnnotation, self).update(**kwargs)
renderer = self.getVTKRenderer()
# Coordinate transormation object
tr = vtk.vtkCoordinate()
tr.SetCoordinateSystemToNormalizedViewport()
# Size of window
window = renderer.GetRenderWindow().GetSize()
# Size of image
size = self._sources[-1].getVTKSource().GetOutput().GetDimensions()
# Image scale
if self.isOptionValid('scale'):
scale = self.getOption('scale')
else:
scale = float(window[0])/float(size[0]) * self.getOption('width')
# Compute the camera distance
angle = self._vtkcamera.GetViewAngle()
d = window[1]*0.5 / math.tan(math.radians(angle*0.5))
# Determine the image position
if self.isOptionValid('position'):
p = self.getOption('position')
tr.SetValue(p[0], p[1], 0)
position = list(tr.GetComputedDisplayValue(renderer))
# Adjust for off-center alignments
if self.getOption('horizontal_alignment') == 'left':
position[0] = position[0] + (size[0]*0.5*scale)
elif self.getOption('horizontal_alignment') == 'right':
position[0] = position[0] - (size[0]*0.5*scale)
if self.getOption('vertical_alignment') == 'top':
position[1] = position[1] - (size[1]*0.5*scale)
elif self.getOption('vertical_alignment') == 'bottom':
position[1] = position[1] + (size[1]*0.5*scale)
# Reference position (middle of window)
tr.SetValue(0.5, 0.5, 0)
ref = tr.GetComputedDisplayValue(renderer)
# Camera offsets
x = (ref[0] - position[0]) * 1/scale
y = (ref[1] - position[1]) * 1/scale
# Set the camera
self._vtkcamera.SetViewUp(0, 1, 0)
self._vtkcamera.SetPosition(size[0]/2. + x, size[1]/2. + y, d * 1/scale)
self._vtkcamera.SetFocalPoint(size[0]/2. + x, size[1]/2. + y, 0)
# Update the renderer
renderer.SetActiveCamera(self._vtkcamera)
renderer.ResetCameraClippingRange()
| nuclear-wizard/moose | python/chigger/annotations/ImageAnnotation.py | Python | lgpl-2.1 | 4,106 | [
"MOOSE",
"VTK"
] | 59adcce62770a55349f021e5e6dbce2d55c27b9ff0fae72c82bd127197a9bf0a |
def extractLolitiveMoe(item):
'''
DISABLED
Parser for 'lolitive.moe'
'''
return None | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractLolitiveMoe.py | Python | bsd-3-clause | 92 | [
"MOE"
] | e2590105d7dbfaf0a2eb4ee0e8f4f531fb0da85e7b2491f818fe50300b8b7fef |
# -*- coding: utf-8 -*-
"""
Created on 20/10/2016
@author: Charlie Bourigault
@contact: bourigault.charlie@gmail.com
Please report issues and request on the GitHub project from ChrisEberl (Python_DIC)
More details regarding the project on the GitHub Wiki : https://github.com/ChrisEberl/Python_DIC/wiki
Current File: Contains functions used by the filterWidget classes and associated to image filtering
"""
import numpy as np, cv2
from functions import getData
def applyFilterListToImage(filterList, image):
if filterList is not None:
nbFilters = len(np.atleast_1d(filterList))
if nbFilters > 0:
for currentFilter in np.atleast_1d(filterList):
filterName = currentFilter[1]
filterParameters = [currentFilter[2], currentFilter[3], currentFilter[4]]
image = applyFilterToImage(filterName, filterParameters, image)
return image
def applyFilterToImage(filterName, filterParameters, image):
backupImage = image
if filterName == 'Zoom':
try:
minY = int(filterParameters[2].split(',')[0])
maxY = minY + int(filterParameters[0])
minX = int(filterParameters[2].split(',')[1])
maxX = minX + int(filterParameters[1])
image = image[minX:maxX, minY:maxY]
except:
image = backupImage
elif filterName == 'Blur':
image = cv2.blur(image, (int(filterParameters[0]), int(filterParameters[1])))
elif filterName == 'Gaussian':
try:
image = cv2.GaussianBlur(image, (int(filterParameters[0]), int(filterParameters[1])), int(filterParameters[2].split(',')[0]), int(filterParameters[2].split(',')[1]))
except:
image = backupImage
elif filterName == 'Brightness':
maxValue = np.max(image)
phi = float(filterParameters[0])/100
theta = float(filterParameters[1])/100
degree = float(filterParameters[2])
image = image.astype(np.float_)
image = maxValue*(1+theta)*(image/maxValue/(1-phi))**(1/degree)
image[image > 255] = 255
image[image < 0] = 0
image = image.astype(np.uint8)
elif filterName == 'Darkness':
maxValue = np.max(image)
phi = float(filterParameters[0])/100
theta = float(filterParameters[1])/100
degree = float(filterParameters[2])
image = image.astype(np.float_)
image = maxValue*(1-theta)*(image/maxValue/(1+phi))**(degree)
image[image > 255] = 255
image[image < 0] = 0
image = image.astype(np.uint8)
elif filterName == 'Contrast':
maxValue = np.max(image)
phi = float(filterParameters[0])/100
theta = float(filterParameters[1])/100
degree = float(filterParameters[2])
medium = (float(maxValue)+np.min(image))/2
image = image.astype(np.float_)
image[image > medium] = medium*(1+theta)*(image[image > medium]/medium/(1-phi))**(1/degree)
image[image < medium] = medium*(1-theta)*(image[image < medium]/medium/(1+phi))**(degree)
image[image > 255] = 255
image[image < 0] = 0
image = image.astype(np.uint8)
return image
def saveOpenFilter(filePath, filterList=None):
filterFileName = '/filter.dat'
if filterList is None: #we want to open the filterFileName file
filterList = getData.testReadFile(filePath+filterFileName)
return filterList
else:
np.savetxt(filePath+filterFileName, np.array(filterList), fmt="%s")
| ChrisEberl/Python_DIC | functions/filterFunctions.py | Python | apache-2.0 | 3,537 | [
"Gaussian"
] | f244e5f0650dab88528f1226bb9739ca54575806cdaa94e5bf44e1e769be04df |
import numpy as np
from ase.data import atomic_numbers, chemical_symbols
from ase.units import Bohr
from gpaw.setup import Setups
from gpaw.xc import XC
from gpaw.mpi import world
Bondi64jpc_vdWradii = { # units Anstrom
'He' : 1.40,
'Ne' : 1.54,
'Ar' : 1.88,
'Kr' : 2.02,
'Xe' : 2.16
}
def vdWradii(symbols, xc):
"""Find the elements van der Waals radius.
Method proposed in:
Tkatchenko and Scheffler PRL 102 (2009) 073005
The returned radii are given in Angstroms.
"""
Z_rare_gas = [atomic_numbers[symbol] for symbol in Bondi64jpc_vdWradii]
Z_rare_gas.sort()
if isinstance(xc, str):
xc = XC(xc)
def get_density(Z):
"""Return density and radial grid from setup."""
# load setup
setups = Setups([Z], 'paw', {}, 2,
xc, world)
setup = setups[0].data
# create density
n_g = setup.nc_g.copy()
for f, phi_g in zip(setup.f_j, setup.phi_jg):
n_g += f * phi_g**2
beta = setup.beta
g = np.arange(setup.ng, dtype=float)
r_g = beta * g / (setup.ng - g)
return n_g, r_g
radii = []
radius = {}
for symbol in symbols:
Z = atomic_numbers[symbol]
if symbol not in radius:
# find the rare gas of the elements row
Zrg = None
for Zr in Z_rare_gas:
if Zrg is None and Z <= Zr:
Zrg = Zr
n_g, r_g = get_density(Zrg)
# find density at R
R = Bondi64jpc_vdWradii[chemical_symbols[Zrg]] / Bohr
n = 0
while r_g[n] < R:
n += 1
# linear interpolation
ncut = (n_g[n-1] +
(n_g[n] - n_g[n-1]) * (R - r_g[n-1]) / (r_g[n] - r_g[n-1]))
# print "Z, Zrg, ncut", Z, Zrg, ncut
# find own R at this density
n_g, r_g = get_density(Z)
n = 0
while n_g[n] > ncut:
n += 1
# linear interpolation
R = (r_g[n-1] +
(r_g[n] - r_g[n-1]) * (ncut - n_g[n-1]) / (n_g[n] - n_g[n-1]))
radius[symbol] = R * Bohr
radii.append(radius[symbol])
return radii
| qsnake/gpaw | gpaw/analyse/vdwradii.py | Python | gpl-3.0 | 2,296 | [
"ASE",
"GPAW"
] | 8c856dfb4c3110c7f6fa62affab538ad42b472837c3b1984b693490711c45935 |
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='glm', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1', # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='OpenGL Mathematics (GLM) for Python', # Required
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/mackst/glm', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Shi Chi(Mack)', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='schistone@gmail.com', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='opengl glm vulkan', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['contrib', 'docs', 'tests']), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# install_requires=['peppercorn'], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/mackst/glm/issues',
# 'Funding': 'https://donate.pypi.org',
'Say Thanks!': 'https://github.com/mackst/glm',
'Source': 'https://github.com/mackst/glm',
},
) | mackst/glm | setup.py | Python | mit | 8,032 | [
"VisIt"
] | 1def791d2c18b9341595bb73cf0c87e79fa20ac6c801311c94fee301e834bcf3 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2014, Dennis Drescher
# All rights reserved.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should also have received a copy of the GNU Lesser General Public
# License along with this library in the file named "LICENSE".
# If not, write to the Free Software Foundation, 51 Franklin Street,
# suite 500, Boston, MA 02110-1335, USA or visit their web page on the
# internet at http://www.fsf.org/licenses/lgpl.html.
# Import modules
from distutils.core import setup
from glob import glob
setup(name = 'shrinkypic',
version = '0.1.r26',
description = "Image Processing Application",
long_description = "ShrinkyPic is a simple image processing application that provides a (very) small interface for Imagemagick.",
maintainer = "Dennis Drescher",
maintainer_email = "dennis_drescher@sil.org",
package_dir = {'':'lib'},
packages = ["shrinkypic", 'shrinkypic.dialog', 'shrinkypic.icon', 'shrinkypic.process'],
scripts = glob("shrinkypic*"),
license = 'LGPL',
)
| thresherdj/shrinkypic | setup.py | Python | mit | 1,604 | [
"VisIt"
] | ceaffe1c6d6852f9a70e2c405fb23df0c8f9fd0dc227fe1b4afcc9b19ef9f97e |
"""Use PyMOl to create templates for bomeba"""
import numpy as np
np.set_printoptions(precision=2)
import __main__
__main__.pymol_argv = ['pymol','-qck']
import pymol
from pymol import cmd, stored
pymol.finish_launching()
import openbabel as ob
# set hydrogen names to PDB compliant
cmd.set('pdb_reformat_names_mode', 2)
#cmd.set('retain_order', 1)
sel = 'all'
#aa = ['BDP', 'NGA']
#aa = ['AFL', 'NAG', 'MAG']
aa = ['B6D', 'A2G', 'BGC']
def minimize(selection='all', forcefield='GAFF', method='cg',
nsteps= 2000, conv=1E-8, cutoff=False, cut_vdw=6.0, cut_elec=8.0):
pdb_string = cmd.get_pdbstr(selection)
name = cmd.get_legal_name(selection)
obconversion = ob.OBConversion()
obconversion.SetInAndOutFormats('pdb', 'pdb')
mol = ob.OBMol()
obconversion.ReadString(mol, pdb_string)
ff = ob.OBForceField.FindForceField(forcefield)
ff.Setup(mol)
if cutoff == True:
ff.EnableCutOff(True)
ff.SetVDWCutOff(cut_vdw)
ff.SetElectrostaticCutOff(cut_elec)
if method == 'cg':
ff.ConjugateGradients(nsteps, conv)
else:
ff.SteepestDescent(nsteps, conv)
ff.GetCoordinates(mol)
nrg = ff.Energy()
pdb_string = obconversion.WriteString(mol)
cmd.delete(name)
if name == 'all':
name = 'all_'
cmd.read_pdbstr(pdb_string, name)
return nrg
#aa = ['W']
for res_name in aa:
## Get coordinates and offset
cmd.load('templates/glycan/{}.pdb'.format(res_name))
stored.IDs = []
cmd.iterate('all','stored.IDs.append((ID))')
cmd.alter('all', 'ID = ID - stored.IDs[0] - 1')
#cmd.fab(res_name)
nrg = minimize(selection=sel, forcefield='GAFF', method='cg', nsteps=2000)
#print(nrg)
xyz = cmd.get_coords(sel)
offset = len(xyz)
## get atom names
stored.atom_names = []
cmd.iterate(sel, 'stored.atom_names.append(name)')
## get bonds
stored.bonds = []
model = cmd.get_model(sel)
for at in model.atom:
cmd.iterate('neighbor ID %s' % at.id,
'stored.bonds.append((%s-1, ID-1))' % at.id)
bonds = list(set([tuple(sorted(i)) for i in stored.bonds]))
bonds.sort()
bb = []
sc = []
## small check before returning the results
if len(stored.atom_names) == offset:
res = """{}_info = AA_info(coords=np.{},
atom_names = {},
bb = {},
sc = {},
bonds = {},
offset = {})\n""".format(res_name, repr(xyz), stored.atom_names, bb, sc, bonds, offset)
print(res)
else:
print('Something funny is going on here!')
cmd.delete('all')
| BIOS-IMASL/bomeba0 | bomeba0/scaffold/gen_templates_gl.py | Python | apache-2.0 | 2,639 | [
"PyMOL"
] | 8873fc30150e1f1e3b4ccceb4fe93c903aaabf9e96526359efa2afff55994730 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.simulation.neuron.simulationdatacontainers.mhocfile import MHOCSections, MHocFileData, MHocFile
from morphforge.simulation.neuron.simulationdatacontainers.mmodfileset import MModFileSet
__all__ = ['MHOCSections', 'MHocFileData', 'MHocFile', 'MModFileSet']
| mikehulluk/morphforge | src/morphforge/simulation/neuron/simulationdatacontainers/__init__.py | Python | bsd-2-clause | 1,813 | [
"NEURON"
] | 174d40a14762037a6756411164dc5483e87058a1bd31fe6a385f62551ee409f0 |
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import sys
from pyscf import gto, scf
from pyscf import lo
from pyscf.tools import molden
'''
Write orbitals in molden format
'''
mol = gto.M(
atom = '''
C 3.2883 3.3891 0.2345
C 1.9047 3.5333 0.2237
C 3.8560 2.1213 0.1612
C 1.0888 2.4099 0.1396
C 3.0401 0.9977 0.0771
C 1.6565 1.1421 0.0663
H 3.9303 4.2734 0.3007
H 1.4582 4.5312 0.2815
H 4.9448 2.0077 0.1699
H 0.0000 2.5234 0.1311
H 3.4870 0.0000 0.0197
H 1.0145 0.2578 0.0000
''',
basis = 'cc-pvdz',
symmetry = 1)
mf = scf.RHF(mol)
mf.kernel()
#
# First method is to explicit call the functions provided by molden.py
#
with open('C6H6mo.molden', 'w') as f1:
molden.header(mol, f1)
molden.orbital_coeff(mol, f1, mf.mo_coeff, ene=mf.mo_energy, occ=mf.mo_occ)
#
# Second method is to simply call from_mo function to write the orbitals
#
c_loc_orth = lo.orth.orth_ao(mol)
molden.from_mo(mol, 'C6H6loc.molden', c_loc_orth)
#
# Molden format does not support high angular momentum basis. To handle the
# orbitals which have l>=5 functions, a hacky way is to call molden.remove_high_l
# function. However, the resultant orbitals may not be orthnormal.
#
mol = gto.M(
atom = 'He 0 0 0',
basis = {'He': gto.expand_etbs(((0, 3, 1., 2.), (5, 2, 1., 2.)))})
mf = scf.RHF(mol).run()
try:
molden.from_mo(mol, 'He_without_h.molden', mf.mo_coeff)
except RuntimeError:
print(' Found l=5 in basis.')
molden.from_mo(mol, 'He_without_h.molden', mf.mo_coeff, ignore_h=True)
| gkc1000/pyscf | examples/tools/02-molden.py | Python | apache-2.0 | 1,649 | [
"PySCF"
] | 2d41297b5a7bf973e7e7bb771aaf152941d956796fd4ca06abdafb79930efcaa |
#!/usr/bin/env python3
"""Use this if the antenna timeing callibration changes"""
import numpy as np
from scipy.signal import resample
import h5py
from LoLIM.IO.raw_tbb_IO import MultiFile_Dal1, filePaths_by_stationName, read_antenna_pol_flips, read_bad_antennas
from LoLIM.utilities import processed_data_dir, even_antName_to_odd
from LoLIM.signal_processing import parabolic_fit
from LoLIM import utilities
utilities.default_raw_data_loc = "/home/brian/KAP_data_link/lightning_data"
utilities.default_processed_data_loc = "/home/brian/processed_files"
def recalibrate_pulse(timeID, input_fname, output_fname, set_polarization_delay=True, upsample_factor=4, polarization_flips="polarization_flips.txt"):
processed_data_folder = processed_data_dir(timeID)
raw_fpaths = filePaths_by_stationName(timeID)
polarization_flips = read_antenna_pol_flips( processed_data_folder + '/' + polarization_flips )
input_file = h5py.File(processed_data_folder+'/'+input_fname, "r")
try:
output_file = h5py.File(processed_data_folder+'/'+output_fname, "r+")
except:
output_file = h5py.File(processed_data_folder+'/'+output_fname, "w")
sample_time = 5.0e-9
if upsample_factor>1:
sample_time /= upsample_factor
for sname, h5_statGroup in input_file.items():
out_statGroup = output_file.require_group(sname)
print()
print()
print(sname)
datafile = MultiFile_Dal1(raw_fpaths[sname], polarization_flips=polarization_flips)
if set_polarization_delay:
datafile.find_and_set_polarization_delay()
antenna_calibrations = { antname:calibration for antname,calibration in zip(datafile.get_antenna_names(), datafile.get_total_delays()) }
for antname, in_antData in h5_statGroup.items():
out_statGroup.copy(in_antData, out_statGroup, name= antname)
out_antData = out_statGroup[antname]
old_even = out_antData.attrs['PolE_timeOffset_CS']
old_odd = out_antData.attrs['PolO_timeOffset_CS']
new_even_delay = antenna_calibrations[antname]
new_odd_delay = antenna_calibrations[ even_antName_to_odd(antname) ]
out_antData.attrs['PolE_timeOffset'] = -new_even_delay ## NOTE: due to historical reasons, there is a sign flip here
out_antData.attrs['PolO_timeOffset'] = -new_odd_delay ## NOTE: due to historical reasons, there is a sign flip here
out_antData.attrs['PolE_timeOffset_CS'] = new_even_delay
out_antData.attrs['PolO_timeOffset_CS'] = new_odd_delay
print(antname, old_even-new_even_delay, old_odd-new_odd_delay)
starting_index = out_antData.attrs['starting_index']
if np.isfinite( out_antData.attrs['PolE_peakTime'] ):
polE_HE = out_antData[1]
if upsample_factor>1:
polE_HE = resample(polE_HE, len(polE_HE)*upsample_factor )
PolE_peak_finder = parabolic_fit( polE_HE )
out_antData.attrs['PolE_peakTime'] = starting_index*5.0E-9 - new_even_delay + PolE_peak_finder.peak_index*sample_time
if np.isfinite( out_antData.attrs['PolO_peakTime'] ):
polO_HE = out_antData[3]
if upsample_factor>1:
polO_HE = resample(polO_HE, len(polO_HE)*upsample_factor )
PolO_peak_finder = parabolic_fit( polO_HE )
out_antData.attrs['PolO_peakTime'] = starting_index*5.0E-9 - new_odd_delay + PolO_peak_finder.peak_index*sample_time
| Bhare8972/LOFAR-LIM | LIM_scripts/stationTimings/reCallibrate_pulses.py | Python | mit | 3,845 | [
"Brian"
] | f763c6ce8621472c7fcc8553f1ab170c51e2b515f6c0432707299fc313e4dfe7 |
from __future__ import print_function, division, unicode_literals
import math
import os
import numpy as np
from pymatgen.core.structure import Structure
from pymatgen.io.vasp.inputs import Incar
from mpinterfaces import VASP_STD_BIN, VDW_KERNEL, QUEUE_SYSTEM
import mpinterfaces.utils as utl
__author__ = "Michael Ashton"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Michael Ashton"
__email__ = "ashtonmv@gmail.com"
__status__ = "Production"
__date__ = "March 3, 2017"
# TODO: the run_* functions in mat2d subpackages can be merged and simplified, a lot
# of code duplcations
def run_gamma_calculations(submit=True, step_size=0.5):
"""
Setup a 2D grid of static energy calculations to plot the Gamma
surface between two layers of the 2D material. These calculations
are run and stored in subdirectories under 'friction/lateral'.
Args:
submit (bool): Whether or not to submit the jobs.
step_size (float): the distance between grid points in
Angstroms.
"""
if not os.path.isdir('friction'):
os.mkdir('friction')
os.chdir('friction')
if not os.path.isdir('lateral'):
os.mkdir('lateral')
os.chdir('lateral')
os.system('cp ../../CONTCAR POSCAR')
# Pad the bottom layer with 20 Angstroms of vacuum.
utl.ensure_vacuum(Structure.from_file('POSCAR'), 20)
structure = Structure.from_file('POSCAR')
n_sites_per_layer = structure.num_sites
n_divs_x = int(math.ceil(structure.lattice.a / step_size))
n_divs_y = int(math.ceil(structure.lattice.b / step_size))
# Get the thickness of the material.
max_height = max([site.coords[2] for site in structure.sites])
min_height = min([site.coords[2] for site in structure.sites])
thickness = max_height - min_height
# Make a new layer.
species, coords = [], []
for site in structure.sites:
# Original site
species.append(site.specie)
coords.append(site.coords)
# New layer site
species.append(site.specie)
coords.append([site.coords[0], site.coords[1],
site.coords[2] + thickness + 3.5])
Structure(structure.lattice, species, coords,
coords_are_cartesian=True).to('POSCAR', 'POSCAR')
for x in range(n_divs_x):
for y in range(n_divs_y):
dir = '{}x{}'.format(x, y)
if not os.path.isdir(dir):
os.mkdir(dir)
# Copy input files
os.chdir(dir)
os.system('cp ../../../INCAR .')
os.system('cp ../../../KPOINTS .')
os.system('cp ../POSCAR .')
if VDW_KERNEL:
os.system('cp {} .'.format(VDW_KERNEL))
# Shift the top layer
structure = Structure.from_file("POSCAR")
all_z_coords = [s.coords[2] for s in structure.sites]
top_layer = [s for s in structure.sites if s.coords[2] > np.mean(all_z_coords)]
structure.remove_sites([i for i, s in enumerate(structure.sites) if s in top_layer])
for site in top_layer:
structure.append(
site.specie,
[site.coords[0]+float(x)/float(n_divs_x),
site.coords[1]+float(y)/float(n_divs_y),
site.coords[2]], coords_are_cartesian=True
)
structure = structure.get_sorted_structure()
structure.to("POSCAR", "POSCAR")
utl.write_potcar()
incar_dict = Incar.from_file('INCAR').as_dict()
incar_dict.update({'NSW': 0, 'LAECHG': False, 'LCHARG': False,
'LWAVE': False, 'LVTOT': False,
'MAGMOM': utl.get_magmom_string(structure)})
incar_dict.pop('NPAR', None)
Incar.from_dict(incar_dict).write_file('INCAR')
if QUEUE_SYSTEM == 'pbs':
utl.write_pbs_runjob(dir, 1, 8, '1000mb', '2:00:00', VASP_STD_BIN)
submission_command = 'qsub runjob'
elif QUEUE_SYSTEM == 'slurm':
utl.write_slurm_runjob(dir, 8, '1000mb', '2:00:00', VASP_STD_BIN)
submission_command = 'sbatch runjob'
if submit:
os.system(submission_command)
os.chdir('../')
os.chdir('../../')
def run_normal_force_calculations(basin_and_saddle_dirs,
spacings=np.arange(1.5, 4.25, 0.25),
submit=True):
"""
Set up and run static calculations of the basin directory and
saddle directory at specified interlayer spacings to get f_N and
f_F.
Args:
basin_and_saddle_dirs (tuple): Can be obtained by the
get_basin_and_peak_locations() function under
friction.analysis. For example,
run_normal_force_calculations(('0x0', '3x6'))
or
run_normal_force_calculations(get_basin_and_peak_locations())
will both work.
spacings (tuple): list of interlayer spacings (in Angstroms, as floats)
at which to run the calculations.
submit (bool): Whether or not to submit the jobs.
"""
spacings = [str(spc) for spc in spacings]
os.chdir('friction')
if not os.path.isdir('normal'):
os.mkdir('normal')
os.chdir('normal')
for spacing in spacings:
if not os.path.isdir(spacing):
os.mkdir(spacing)
for subdirectory in basin_and_saddle_dirs:
os.system('cp -r ../lateral/{} {}/'.format(subdirectory, spacing))
os.chdir('{}/{}'.format(spacing, subdirectory))
structure = Structure.from_file('POSCAR')
n_sites = len(structure.sites)
all_z_coords = [s.coords[2] for s in structure.sites]
top_layer = [s for s in structure.sites if s.coords[2] >
np.mean(all_z_coords)]
bottom_of_top_layer = min([site.coords[2] for site in top_layer])
remove_indices = [i for i, s in enumerate(structure.sites) if s in
top_layer]
structure.remove_sites(remove_indices)
top_of_bottom_layer = max(
[site.coords[2] for site in structure.sites]
)
for site in top_layer:
structure.append(
site.specie,
[site.coords[0],
site.coords[1],
site.coords[2] - bottom_of_top_layer
+ top_of_bottom_layer + float(spacing)],
coords_are_cartesian=True)
structure = structure.get_sorted_structure()
structure.to('POSCAR', 'POSCAR')
utl.write_potcar()
incar_dict = Incar.from_file('INCAR').as_dict()
incar_dict.update({"MAGMOM": utl.get_magmom_string(structure)})
Incar.from_dict(incar_dict).write_file("INCAR")
if QUEUE_SYSTEM == 'pbs':
utl.write_pbs_runjob('{}_{}'.format(
subdirectory, spacing), 1, 8, '1000mb', '2:00:00',
VASP_STD_BIN)
submission_command = 'qsub runjob'
elif QUEUE_SYSTEM == 'slurm':
utl.write_slurm_runjob('{}_{}'.format(
subdirectory, spacing), 8, '1000mb', '2:00:00',
VASP_STD_BIN)
submission_command = 'sbatch runjob'
if submit:
os.system(submission_command)
os.chdir('../../')
os.chdir('../../')
| henniggroup/MPInterfaces | mpinterfaces/mat2d/friction/startup.py | Python | mit | 7,609 | [
"VASP",
"pymatgen"
] | 16298399584c51367d6f7f3c27d6a4811a1f99dcef2b254d75d4ebfb5b68a59f |
"""
DIRAC API Class
All DIRAC functionality is exposed through the DIRAC API and this
serves as a source of documentation for the project via EpyDoc.
The DIRAC API provides the following functionality:
- A transparent and secure way for users
to submit jobs to the Grid, monitor them and
retrieve outputs
- Interaction with Grid storage and file catalogues
via the DataManagement public interfaces (more to be added)
- Local execution of workflows for testing purposes.
"""
__RCSID__ = "$Id$"
import re, os, sys, time, shutil, types, tempfile, glob, tarfile, urllib
import DIRAC
from DIRAC.Core.Base.API import API
from DIRAC.Interfaces.API.JobRepository import JobRepository
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.Core.Utilities.Subprocess import shellCall
from DIRAC.Core.Utilities.ModuleFactory import ModuleFactory
from DIRAC.WorkloadManagementSystem.Client.WMSClient import WMSClient
from DIRAC.WorkloadManagementSystem.Client.SandboxStoreClient import SandboxStoreClient
# from DIRAC.DataManagementSystem.Client.ReplicaManager import ReplicaManager
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client.PathFinder import getSystemSection, getServiceURL
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC.Core.Utilities.Time import toString
from DIRAC.Core.Utilities.List import breakListIntoChunks
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite
from DIRAC.ConfigurationSystem.Client.LocalConfiguration import LocalConfiguration
from DIRAC.Core.Base.AgentReactor import AgentReactor
from DIRAC.Core.Security.X509Chain import X509Chain
from DIRAC.Core.Security import Locations
from DIRAC.Core.Utilities import Time
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
COMPONENT_NAME = 'DiracAPI'
class Dirac( API ):
"""
DIRAC API Class
"""
#############################################################################
def __init__( self, withRepo = False, repoLocation = '', jobManagerClient = False,
sbRPCClient = False, sbTransferClient = False, useCertificates = False ):
"""Internal initialization of the DIRAC API.
"""
super( Dirac, self ).__init__()
self.section = '/LocalSite/'
self.jobRepo = False
if withRepo:
self.jobRepo = JobRepository( repoLocation )
if not self.jobRepo.isOK():
gLogger.error( "Unable to write to supplied repository location" )
self.jobRepo = False
self.scratchDir = gConfig.getValue( self.section + 'ScratchDir', '/tmp' )
self.__clients = {'JobManager':jobManagerClient, 'SBRPC':sbRPCClient, 'SBTransfer':sbTransferClient}
self.__useCertificates = useCertificates
# Determine the default file catalog
self.defaultFileCatalog = gConfig.getValue( self.section + '/FileCatalog', None )
#############################################
# Client instantiation
#############################################
def _wmsClient( self ):
return self.__clients.setdefault( 'WMS', WMSClient( self.__clients[ 'JobManager' ],
self.__clients[ 'SBRPC' ],
self.__clients[ 'SBTransfer' ],
self.__useCertificates ) )
def _sbClient( self ):
return self.__clients.setdefault( 'SandboxClient',
SandboxStoreClient( rpcClient = self.__clients[ 'SBRPC' ],
transferClient = self.__clients[ 'SBTransfer' ],
useCertificates = self.__useCertificates ) )
#############################################################################
# Repository specific methods
#############################################################################
def getRepositoryJobs( self, printOutput = False ):
""" Retireve all the jobs in the repository
Example Usage:
>>> print dirac.getRepositoryJobs()
{'OK': True, 'Value': [1,2,3,4]}
:return S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn( "No repository is initialised" )
return S_OK()
jobs = self.jobRepo.readRepository()['Value']
jobIDs = jobs.keys()
if printOutput:
print self.pPrint.pformat( jobIDs )
return S_OK( jobIDs )
def monitorRepository( self, printOutput = False ):
"""Monitor the jobs present in the repository
Example Usage:
>>> print dirac.monitorRepository()
{'OK': True, 'Value': ''}
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn( "No repository is initialised" )
return S_OK()
jobs = self.jobRepo.readRepository()['Value']
jobIDs = jobs.keys()
res = self.status( jobIDs )
if not res['OK']:
return self._errorReport( res['Message'], 'Failed to get status of jobs from WMS' )
jobs = self.jobRepo.readRepository()['Value']
statusDict = {}
for jobDict in jobs.values():
state = 'Unknown'
if jobDict.has_key( 'State' ):
state = jobDict['State']
if not statusDict.has_key( state ):
statusDict[state] = 0
statusDict[state] += 1
if printOutput:
print self.pPrint.pformat( statusDict )
return S_OK( statusDict )
def retrieveRepositorySandboxes( self, requestedStates = None, destinationDirectory = '' ):
""" Obtain the output sandbox for the jobs in requested states in the repository
Example Usage:
>>> print dirac.retrieveRepositorySandboxes(requestedStates=['Done','Failed'],destinationDirectory='sandboxes')
{'OK': True, 'Value': ''}
:param requestedStates: List of jobs states to be considered
:type requestedStates: list of strings
:param destinationDirectory: The target directory to place sandboxes (each jobID will have a directory created beneath this)
:type destinationDirectory: string
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn( "No repository is initialised" )
return S_OK()
if requestedStates == None:
requestedStates = ['Done', 'Failed', 'Completed'] # because users dont care about completed
jobs = self.jobRepo.readRepository()['Value']
for jobID in sorted( jobs ):
jobDict = jobs[jobID]
if jobDict.has_key( 'State' ) and ( jobDict['State'] in requestedStates ):
if ( jobDict.has_key( 'Retrieved' ) and ( not int( jobDict['Retrieved'] ) ) ) \
or ( not jobDict.has_key( 'Retrieved' ) ):
self.getOutputSandbox( jobID, destinationDirectory )
return S_OK()
def retrieveRepositoryData( self, requestedStates = None, destinationDirectory = '' ):
""" Obtain the output data for the jobs in requested states in the repository
Example Usage:
>>> print dirac.retrieveRepositoryData(requestedStates=['Done'],destinationDirectory='outputData')
{'OK': True, 'Value': ''}
:param requestedStates: List of jobs states to be considered
:type requestedStates: list of strings
:param destinationDirectory: The target directory to place sandboxes (a directory is created for each JobID)
:type destinationDirectory: string
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn( "No repository is initialised" )
return S_OK()
if requestedStates == None:
requestedStates = ['Done']
jobs = self.jobRepo.readRepository()['Value']
for jobID in sorted( jobs ):
jobDict = jobs[jobID]
if jobDict.has_key( 'State' ) and ( jobDict['State'] in requestedStates ):
if ( jobDict.has_key( 'OutputData' ) and ( not int( jobDict['OutputData'] ) ) ) \
or ( not jobDict.has_key( 'OutputData' ) ):
destDir = jobID
if destinationDirectory:
destDir = "%s/%s" % ( destinationDirectory, jobID )
self.getJobOutputData( jobID, destinationDir = destDir )
return S_OK()
def removeRepository( self ):
""" Removes the job repository and all sandboxes and output data retrieved
Example Usage:
>>> print dirac.removeRepository()
{'OK': True, 'Value': ''}
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn( "No repository is initialised" )
return S_OK()
jobs = self.jobRepo.readRepository()['Value']
for jobID in sorted( jobs ):
jobDict = jobs[jobID]
if jobDict.has_key( 'Sandbox' ) and os.path.exists( jobDict['Sandbox'] ):
shutil.rmtree( jobDict['Sandbox'], ignore_errors = True )
if jobDict.has_key( 'OutputFiles' ):
for fileName in eval( jobDict['OutputFiles'] ):
if os.path.exists( fileName ):
os.remove( fileName )
self.delete( sorted( jobs ) )
os.remove( self.jobRepo.getLocation()['Value'] )
self.jobRepo = False
return S_OK()
def resetRepository( self, jobIDs = None ):
""" Reset all the status of the (optionally supplied) jobs in the repository
Example Usage:
>>> print dirac.resetRepository(jobIDs = [1111,2222,'3333'])
{'OK': True, 'Value': ''}
:returns: S_OK,S_ERROR
"""
if not self.jobRepo:
gLogger.warn( "No repository is initialised" )
return S_OK()
if jobIDs == None:
jobIDs = []
if not type( jobIDs ) == types.ListType:
return self._errorReport( 'The jobIDs must be a list of (strings or ints).' )
self.jobRepo.resetRepository( jobIDs = jobIDs )
return S_OK()
#############################################################################
def submit( self, job, mode = 'wms' ):
"""Submit jobs to DIRAC WMS.
These can be either:
- Instances of the Job Class
- VO Application Jobs
- Inline scripts
- Scripts as executables
- Scripts inside an application environment
- JDL File
- JDL String
Example usage:
>>> print dirac.submit(job)
{'OK': True, 'Value': '12345'}
:param job: Instance of Job class or JDL string
:type job: Job() or string
:param mode: Submit job locally with mode = 'wms' (default), 'local' to run workflow or 'agent' to run full Job Wrapper locally
:type mode: string
:returns: S_OK,S_ERROR
"""
self.__printInfo()
cleanPath = ''
jobDescription = ''
if type( job ) in types.StringTypes:
if os.path.exists( job ):
self.log.verbose( 'Found job JDL file %s' % ( job ) )
jdl = job
else:
( fd, jdl ) = tempfile.mkstemp( prefix = 'DIRAC_', suffix = '.jdl', text = True )
self.log.verbose( 'Job is a JDL string' )
os.write( fd, job )
os.close( fd )
cleanPath = jdl
else:
try:
formulationErrors = job.errorDict
except Exception, x:
self.log.verbose( 'Could not obtain job errors:%s' % ( x ) )
formulationErrors = {}
if formulationErrors:
for method, errorList in formulationErrors.items():
self.log.error( '>>>> Error in %s() <<<<\n%s' % ( method, '\n'.join( errorList ) ) )
return S_ERROR( formulationErrors )
# Run any VO specific checks if desired prior to submission, this may or may not be overidden
# in a derived class for example
try:
result = self.preSubmissionChecks( job, mode )
if not result['OK']:
self.log.error( 'Pre-submission checks failed for job with message: "%s"' % ( result['Message'] ) )
return result
except Exception, x:
msg = 'Error in VO specific function preSubmissionChecks: "%s"' % ( x )
self.log.error( msg )
return S_ERROR( msg )
tmpdir = tempfile.mkdtemp( prefix = 'DIRAC_' )
self.log.verbose( 'Created temporary directory for submission %s' % ( tmpdir ) )
jobDescription = tmpdir + '/jobDescription.xml'
fd = os.open( jobDescription, os.O_RDWR | os.O_CREAT )
os.write( fd, job._toXML() )
os.close( fd )
jdl = tmpdir + '/job.jdl'
fd = os.open( jdl, os.O_RDWR | os.O_CREAT )
os.write( fd, job._toJDL( xmlFile = jobDescription ) )
os.close( fd )
cleanPath = tmpdir
if mode:
if mode.lower() == 'local':
self.log.info( 'Executing workflow locally without WMS submission' )
curDir = os.getcwd()
stopCopies = False
if gConfig.getValue( '/LocalSite/DisableLocalJobDirectory', '' ):
stopCopies = True
else:
jobDir = tempfile.mkdtemp( suffix = '_JobDir', prefix = 'Local_', dir = curDir )
os.chdir( jobDir )
stopCallback = False
if gConfig.getValue( '/LocalSite/DisableLocalModeCallback', '' ):
stopCallback = True
self.log.info( 'Executing at', os.getcwd() )
result = self.runLocal( jdl, jobDescription, curDir,
disableCopies = stopCopies,
disableCallback = stopCallback )
os.chdir( curDir )
if mode.lower() == 'agent':
self.log.info( 'Executing workflow locally with full WMS submission and DIRAC Job Agent' )
result = self.runLocalAgent( jdl )
if mode.lower() == 'wms':
self.log.verbose( 'Will submit job to WMS' ) # this will happen by default anyway
result = self._sendJob( jdl )
if not result['OK']:
self.log.error( 'Job submission failure', result['Message'] )
elif self.jobRepo:
jobIDList = result['Value']
if type( jobIDList ) != types.ListType:
jobIDList = [ jobIDList ]
for jobID in jobIDList:
result = self.jobRepo.addJob( jobID, 'Submitted' )
self.log.verbose( 'Cleaning up %s...' % cleanPath )
self.__cleanTmp( cleanPath )
return result
#############################################################################
def __cleanTmp( self, cleanPath ):
"""Remove tmp file or directory
"""
if not cleanPath:
return
if os.path.isfile( cleanPath ):
os.unlink( cleanPath )
return
if os.path.isdir( cleanPath ):
shutil.rmtree( cleanPath, ignore_errors = True )
return
self.__printOutput( sys.stdout, 'Could not remove %s' % str( cleanPath ) )
return
#############################################################################
def preSubmissionChecks( self, job, mode ):
"""Internal function. The pre-submission checks method allows VOs to
make their own checks before job submission. To make use of this the
method should be overridden in a derived VO-specific Dirac class.
"""
return S_OK( 'Nothing to do' )
#############################################################################
def runLocalAgent( self, jdl ):
"""Internal function. This method is equivalent to submit(job,mode='Agent').
All output files are written to a <jobID> directory where <jobID> is the
result of submission to the WMS. Please note that the job must be eligible to the
site it is submitted from.
"""
jdl = self.__forceLocal( jdl )
jobID = self._sendJob( jdl )
if not jobID['OK']:
self.log.error( 'Job submission failure', jobID['Message'] )
return S_ERROR( 'Could not submit job to WMS' )
jobID = int( jobID['Value'] )
self.log.info( 'The job has been submitted to the WMS with jobID = %s, monitoring starts.' % jobID )
result = self.__monitorSubmittedJob( jobID )
if not result['OK']:
self.log.info( result['Message'] )
return result
self.log.info( 'Job %s is now eligible to be picked up from the WMS by a local job agent' % jobID )
# now run job agent targetted to pick up this job
result = self.__runJobAgent( jobID )
return result
@classmethod
def __forceLocal( self, job ):
"""Update Job description to avoid pilot submission by WMS
"""
if os.path.exists( job ):
jdlFile = open( job, 'r' )
jdl = jdlFile.read()
jdlFile.close()
else:
jdl = job
if not re.search( '\[', jdl ):
jdl = '[' + jdl + ']'
classAdJob = ClassAd( jdl )
classAdJob.insertAttributeString( 'Site', DIRAC.siteName() )
classAdJob.insertAttributeString( 'SubmitPools', 'Local' )
classAdJob.insertAttributeString( 'PilotTypes', 'private' )
return classAdJob.asJDL()
#############################################################################
def __runJobAgent( self, jobID ):
""" This internal method runs a tailored job agent for the local execution
of a previously submitted WMS job. The type of CEUniqueID can be overidden
via the configuration.
Currently must unset CMTPROJECTPATH to get this to work.
"""
agentName = 'WorkloadManagement/JobAgent'
self.log.verbose( 'In case being booted from a DIRAC script,'
' now resetting sys arguments to null from: \n%s' % ( sys.argv ) )
sys.argv = []
localCfg = LocalConfiguration()
ceType = gConfig.getValue( '/LocalSite/LocalCE', 'InProcess' )
localCfg.addDefaultEntry( 'CEUniqueID', ceType )
localCfg.addDefaultEntry( 'ControlDirectory', os.getcwd() )
localCfg.addDefaultEntry( 'MaxCycles', 1 )
localCfg.addDefaultEntry( '/LocalSite/WorkingDirectory', os.getcwd() )
localCfg.addDefaultEntry( '/LocalSite/TotalCPUs', 1 )
localCfg.addDefaultEntry( '/LocalSite/MaxCPUTime', 300000 )
localCfg.addDefaultEntry( '/LocalSite/CPUTime', 300000 )
localCfg.addDefaultEntry( '/LocalSite/OwnerGroup', self.__getCurrentGroup() )
localCfg.addDefaultEntry( '/LocalSite/MaxRunningJobs', 1 )
localCfg.addDefaultEntry( '/LocalSite/MaxTotalJobs', 1 )
# if os.environ.has_key('VO_LHCB_SW_DIR'):
# localCfg.addDefaultEntry('/LocalSite/SharedArea',os.environ['VO_LHCB_SW_DIR'])
# Running twice in the same process, the second time it use the initial JobID.
( fd, jobidCfg ) = tempfile.mkstemp( '.cfg', 'DIRAC_JobId', text = True )
os.write( fd, 'AgentJobRequirements\n {\n JobID = %s\n }\n' % jobID )
os.close( fd )
gConfig.loadFile( jobidCfg )
self.__cleanTmp( jobidCfg )
localCfg.addDefaultEntry( '/AgentJobRequirements/PilotType', 'private' )
ownerDN = self.__getCurrentDN()
ownerGroup = self.__getCurrentGroup()
# localCfg.addDefaultEntry('OwnerDN',ownerDN)
# localCfg.addDefaultEntry('OwnerGroup',ownerGroup)
# localCfg.addDefaultEntry('JobID',jobID)
localCfg.addDefaultEntry( '/AgentJobRequirements/OwnerDN', ownerDN )
localCfg.addDefaultEntry( '/AgentJobRequirements/OwnerGroup', ownerGroup )
localCfg.addDefaultEntry( '/Resources/Computing/%s/PilotType' % ceType, 'private' )
localCfg.addDefaultEntry( '/Resources/Computing/%s/OwnerDN' % ceType, ownerDN )
localCfg.addDefaultEntry( '/Resources/Computing/%s/OwnerGroup' % ceType, ownerGroup )
# localCfg.addDefaultEntry('/Resources/Computing/%s/JobID' %ceType,jobID)
# SKP can add compatible platforms here
localCfg.setConfigurationForAgent( agentName )
result = localCfg.loadUserData()
if not result[ 'OK' ]:
self.log.error( 'There were errors when loading configuration', result['Message'] )
return S_ERROR( 'Could not start DIRAC Job Agent' )
agent = AgentReactor( agentName )
result = agent.runNumCycles( agentName, numCycles = 1 )
if not result['OK']:
self.log.error( 'Job Agent execution completed with errors', result['Message'] )
return result
#############################################################################
def __getCurrentGroup( self ):
"""Simple function to return current DIRAC group.
"""
proxy = Locations.getProxyLocation()
if not proxy:
return S_ERROR( 'No proxy found in local environment' )
else:
self.log.verbose( 'Current proxy is %s' % proxy )
chain = X509Chain()
result = chain.loadProxyFromFile( proxy )
if not result[ 'OK' ]:
return result
result = chain.getDIRACGroup()
if not result[ 'OK' ]:
return result
group = result[ 'Value' ]
self.log.verbose( 'Current group is %s' % group )
return group
#############################################################################
def __getCurrentDN( self ):
"""Simple function to return current DN.
"""
proxy = Locations.getProxyLocation()
if not proxy:
return S_ERROR( 'No proxy found in local environment' )
else:
self.log.verbose( 'Current proxy is %s' % proxy )
chain = X509Chain()
result = chain.loadProxyFromFile( proxy )
if not result[ 'OK' ]:
return result
result = chain.getIssuerCert()
if not result[ 'OK' ]:
return result
issuerCert = result[ 'Value' ]
dn = issuerCert.getSubjectDN()[ 'Value' ]
return dn
#############################################################################
def _runLocalJobAgent( self, jobID ):
"""Developer function. In case something goes wrong with 'agent' submission, after
successful WMS submission, this takes the jobID and allows to retry the job agent
running.
"""
result = self.__monitorSubmittedJob( jobID )
if not result['OK']:
self.log.info( result['Message'] )
return result
self.log.info( 'Job %s is now eligible to be picked up from the WMS by a local job agent' % jobID )
# now run job agent targetted to pick up this job
result = self.__runJobAgent( jobID )
return result
#############################################################################
def __monitorSubmittedJob( self, jobID ):
"""Internal function. Monitors a submitted job until it is eligible to be
retrieved or enters a failed state.
"""
pollingTime = 10 # seconds
maxWaitingTime = 600 # seconds
start = time.time()
finalState = False
while not finalState:
jobStatus = self.status( jobID )
self.log.verbose( jobStatus )
if not jobStatus['OK']:
self.log.error( 'Could not monitor job status, will retry in %s seconds' % pollingTime, jobStatus['Message'] )
else:
jobStatus = jobStatus['Value'][jobID]['Status']
if jobStatus.lower() == 'waiting':
finalState = True
return S_OK( 'Job is eligible to be picked up' )
if jobStatus.lower() == 'failed':
finalState = True
return S_ERROR( 'Problem with job %s definition, WMS status is Failed' % jobID )
self.log.info( 'Current status for job %s is %s will retry in %s seconds' % ( jobID, jobStatus, pollingTime ) )
current = time.time()
if current - start > maxWaitingTime:
finalState = True
return S_ERROR( 'Exceeded max waiting time of %s seconds for job %s to enter Waiting state,'
' exiting.' % ( maxWaitingTime, jobID ) )
time.sleep( pollingTime )
#############################################################################
@classmethod
def __getVOPolicyModule( self, module ):
""" Utility to get the VO Policy module name
"""
moduleName = ''
setup = gConfig.getValue( '/DIRAC/Setup', '' )
vo = None
ret = getProxyInfo( disableVOMS = True )
if ret['OK'] and 'group' in ret['Value']:
vo = getVOForGroup( ret['Value']['group'] )
if setup and vo:
moduleName = gConfig.getValue( 'DIRAC/VOPolicy/%s/%s/%s' % ( vo, setup, module ), '' )
if not moduleName:
moduleName = gConfig.getValue( 'DIRAC/VOPolicy/%s' % module, '' )
return moduleName
#############################################################################
def getInputDataCatalog( self, lfns, siteName = '', fileName = 'pool_xml_catalog.xml', ignoreMissing = False ):
"""This utility will create a pool xml catalogue slice for the specified LFNs using
the full input data resolution policy plugins for the VO.
If not specified the site is assumed to be the DIRAC.siteName() from the local
configuration. The fileName can be a full path.
Example usage:
>>> print print d.getInputDataCatalog('/lhcb/production/DC06/phys-v2-lumi5/00001680/DST/0000/00001680_00000490_5.dst',None,'myCat.xml')
{'Successful': {'<LFN>': {'pfntype': 'ROOT_All', 'protocol': 'SRM2',
'pfn': '<PFN>', 'turl': '<TURL>', 'guid': '3E3E097D-0AC0-DB11-9C0A-00188B770645',
'se': 'CERN-disk'}}, 'Failed': [], 'OK': True, 'Value': ''}
:param lfns: Logical File Name(s) to query
:type lfns: LFN string or list []
:param siteName: DIRAC site name
:type siteName: string
:param fileName: Catalogue name (can include path)
:type fileName: string
:returns: S_OK,S_ERROR
"""
if type( lfns ) == type( " " ):
lfns = [lfns.replace( 'LFN:', '' )]
elif type( lfns ) == type( [] ):
try:
lfns = [str( lfn.replace( 'LFN:', '' ) ) for lfn in lfns]
except Exception, x:
return self._errorReport( str( x ), 'Expected strings for LFNs' )
else:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
if not siteName:
siteName = DIRAC.siteName()
if ignoreMissing:
self.log.verbose( 'Ignore missing flag is enabled' )
localSEList = getSEsForSite( siteName )
if not localSEList['OK']:
return localSEList
self.log.verbose( localSEList )
inputDataPolicy = self.__getVOPolicyModule( 'InputDataModule' )
if not inputDataPolicy:
return self._errorReport( 'Could not retrieve DIRAC/VOPolicy/InputDataModule for VO' )
catalogFailed = {}
self.log.info( 'Attempting to resolve data for %s' % siteName )
self.log.verbose( '%s' % ( '\n'.join( lfns ) ) )
replicaDict = self.getReplicas( lfns )
if not replicaDict['OK']:
return replicaDict
if replicaDict['Value'].has_key( 'Failed' ):
catalogFailed = replicaDict['Value']['Failed']
guidDict = self.getMetadata( lfns )
if not guidDict['OK']:
return guidDict
for lfn, reps in replicaDict['Value']['Successful'].items():
guidDict['Value']['Successful'][lfn].update( reps )
resolvedData = guidDict
diskSE = gConfig.getValue( self.section + '/DiskSE', ['-disk', '-DST', '-USER', '-FREEZER'] )
tapeSE = gConfig.getValue( self.section + '/TapeSE', ['-tape', '-RDST', '-RAW'] )
# Add catalog path / name here as well as site name to override the standard policy of resolving automatically
configDict = { 'JobID':None,
'LocalSEList':localSEList['Value'],
'DiskSEList':diskSE,
'TapeSEList':tapeSE,
'SiteName':siteName,
'CatalogName':fileName
}
self.log.verbose( configDict )
argumentsDict = {'FileCatalog':resolvedData, 'Configuration':configDict, 'InputData':lfns}
if ignoreMissing:
argumentsDict['IgnoreMissing'] = True
self.log.verbose( argumentsDict )
moduleFactory = ModuleFactory()
self.log.verbose( 'Input Data Policy Module: %s' % inputDataPolicy )
moduleInstance = moduleFactory.getModule( inputDataPolicy, argumentsDict )
if not moduleInstance['OK']:
self.log.warn( 'Could not create InputDataModule' )
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
self.log.debug( result )
if not result['OK']:
if result.has_key( 'Failed' ):
self.log.error( 'Input data resolution failed for the following files:\n', '\n'.join( result['Failed'] ) )
if catalogFailed:
self.log.error( 'Replicas not found for the following files:' )
for key, value in catalogFailed.items():
self.log.error( '%s %s' % ( key, value ) )
if result.has_key( 'Failed' ):
failedKeys = catalogFailed.keys()
result['Failed'] = failedKeys
return result
#############################################################################
def _runInputDataResolution( self, inputData, site = None ):
""" Run the VO plugin input data resolution mechanism.
"""
localSEList = gConfig.getValue( '/LocalSite/LocalSE', '' )
if not localSEList:
return self._errorReport( 'LocalSite/LocalSE should be defined in your config file' )
if re.search( ',', localSEList ):
localSEList = localSEList.replace( ' ', '' ).split( ',' )
else:
localSEList = [localSEList.replace( ' ', '' )]
self.log.verbose( 'Local SEs:', localSEList )
inputDataModule = self.__getVOPolicyModule( 'InputDataModule' )
if not inputDataModule:
return self._errorReport( 'Could not retrieve DIRAC/VOPolicy/InputDataModule for VO' )
self.log.info( 'Job has input data requirement, will attempt to resolve data for %s' % DIRAC.siteName() )
self.log.verbose( '\n'.join( inputData ) )
replicaDict = self.getReplicas( inputData )
if not replicaDict['OK']:
return replicaDict
catalogFailed = {}
if replicaDict['Value'].has_key( 'Failed' ):
catalogFailed = replicaDict['Value']['Failed']
guidDict = self.getMetadata( inputData )
if not guidDict['OK']:
return guidDict
for lfn, reps in replicaDict['Value']['Successful'].items():
guidDict['Value']['Successful'][lfn].update( reps )
resolvedData = guidDict
diskSE = gConfig.getValue( self.section + '/DiskSE', ['-disk', '-DST', '-USER', '-FREEZER'] )
tapeSE = gConfig.getValue( self.section + '/TapeSE', ['-tape', '-RDST', '-RAW'] )
configDict = {'JobID':None, 'LocalSEList':localSEList, 'DiskSEList':diskSE, 'TapeSEList':tapeSE}
self.log.debug( configDict )
if site:
configDict.update( {'SiteName':site} )
argumentsDict = {'FileCatalog':resolvedData, 'Configuration':configDict, 'InputData':inputData}
self.log.debug( argumentsDict )
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule( inputDataModule, argumentsDict )
if not moduleInstance['OK']:
self.log.warn( 'Could not create InputDataModule' )
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.error( 'Input data resolution failed' )
if catalogFailed:
self.log.error( 'Replicas not found for the following files:' )
for key, value in catalogFailed.items():
self.log.error( '%s %s' % ( key, value ) )
if result.has_key( 'Failed' ):
failedKeys = catalogFailed.keys()
result['Failed'] = failedKeys
return result
#############################################################################
def runLocal( self, jobJDL, jobXML, baseDir, disableCopies = False, disableCallback = False ):
"""Internal function. This method is equivalent to submit(job,mode='Local').
All output files are written to the local directory.
"""
# FIXME: Better create an unique local directory for this job
if disableCopies:
self.log.verbose( 'DisableLocalJobDirectory is set, leaving everything in local dir' )
shutil.copy( jobXML, '%s/%s' % ( os.getcwd(), os.path.basename( jobXML ) ) )
# If not set differently in the CS use the root from the current DIRAC installation
siteRoot = gConfig.getValue( '/LocalSite/Root', DIRAC.rootPath )
self.log.info( 'Preparing environment for site %s to execute job' % DIRAC.siteName() )
os.environ['DIRACROOT'] = siteRoot
self.log.verbose( 'DIRACROOT = %s' % ( siteRoot ) )
os.environ['DIRACPYTHON'] = sys.executable
self.log.verbose( 'DIRACPYTHON = %s' % ( sys.executable ) )
self.log.verbose( 'JDL file is: %s' % jobJDL )
self.log.verbose( 'Job XML file description is: %s' % jobXML )
parameters = self.__getJDLParameters( jobJDL )
if not parameters['OK']:
self.log.warn( 'Could not extract job parameters from JDL file %s' % ( jobJDL ) )
return parameters
self.log.verbose( parameters )
inputData = None
if parameters['Value'].has_key( 'InputData' ):
if parameters['Value']['InputData']:
inputData = parameters['Value']['InputData']
if type( inputData ) == type( " " ):
inputData = [inputData]
jobParamsDict = {'Job':parameters['Value']}
if inputData:
localSEList = gConfig.getValue( '/LocalSite/LocalSE', '' )
if not localSEList:
return self._errorReport( 'LocalSite/LocalSE should be defined in your config file' )
if re.search( ',', localSEList ):
localSEList = localSEList.replace( ' ', '' ).split( ',' )
else:
localSEList = [localSEList.replace( ' ', '' )]
self.log.verbose( localSEList )
inputDataPolicy = self.__getVOPolicyModule( 'InputDataModule' )
if not inputDataPolicy:
return self._errorReport( 'Could not retrieve DIRAC/VOPolicy/InputDataModule for VO' )
self.log.info( 'Job has input data requirement, will attempt to resolve data for %s' % DIRAC.siteName() )
self.log.verbose( '\n'.join( inputData ) )
replicaDict = self.getReplicas( inputData )
if not replicaDict['OK']:
return replicaDict
guidDict = self.getMetadata( inputData )
if not guidDict['OK']:
return guidDict
for lfn, reps in replicaDict['Value']['Successful'].items():
guidDict['Value']['Successful'][lfn].update( reps )
resolvedData = guidDict
diskSE = gConfig.getValue( self.section + '/DiskSE', ['-disk', '-DST', '-USER', '-FREEZER'] )
tapeSE = gConfig.getValue( self.section + '/TapeSE', ['-tape', '-RDST', '-RAW'] )
configDict = { 'JobID': None,
'LocalSEList': localSEList,
'DiskSEList': diskSE,
'TapeSEList': tapeSE
}
self.log.verbose( configDict )
argumentsDict = { 'FileCatalog': resolvedData,
'Configuration': configDict,
'InputData': inputData,
'Job': parameters['Value']
}
self.log.verbose( argumentsDict )
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule( inputDataPolicy, argumentsDict )
if not moduleInstance['OK']:
self.log.warn( 'Could not create InputDataModule' )
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.warn( 'Input data resolution failed' )
return result
localArch = None # If running locally assume the user chose correct platform (could check in principle)
if parameters['Value'].has_key( 'SystemConfig' ):
if parameters['Value']['SystemConfig']:
localArch = parameters['Value']['SystemConfig']
if localArch:
jobParamsDict['CE'] = {}
jobParamsDict['CE']['CompatiblePlatforms'] = localArch
softwarePolicy = self.__getVOPolicyModule( 'SoftwareDistModule' )
if softwarePolicy:
moduleFactory = ModuleFactory()
moduleInstance = moduleFactory.getModule( softwarePolicy, jobParamsDict )
if not moduleInstance['OK']:
self.log.warn( 'Could not create SoftwareDistModule' )
return moduleInstance
module = moduleInstance['Value']
result = module.execute()
if not result['OK']:
self.log.warn( 'Software installation failed with result:\n%s' % ( result ) )
return result
else:
self.log.verbose( 'Could not retrieve DIRAC/VOPolicy/SoftwareDistModule for VO' )
# return self._errorReport( 'Could not retrieve DIRAC/VOPolicy/SoftwareDistModule for VO' )
if parameters['Value'].has_key( 'InputSandbox' ):
sandbox = parameters['Value']['InputSandbox']
if type( sandbox ) in types.StringTypes:
sandbox = [sandbox]
for isFile in sandbox:
if disableCopies:
break
if not os.path.isabs( isFile ):
# if a relative path, it is relative to the user working directory
isFile = os.path.join( baseDir, isFile )
# Attempt to copy into job working directory
if os.path.isdir( isFile ):
shutil.copytree( isFile, os.path.basename( isFile ), symlinks = True )
elif os.path.exists( isFile ):
shutil.copy( isFile, os.getcwd() )
else:
# perhaps the file is in an LFN attempt to download it.
getFile = self.getFile( isFile )
if not getFile['OK']:
self.log.warn( 'Failed to download %s with error:%s' % ( isFile, getFile['Message'] ) )
return S_ERROR( 'Can not copy InputSandbox file %s' % isFile )
basefname = os.path.basename( isFile )
try:
if tarfile.is_tarfile( basefname ):
tarFile = tarfile.open( basefname, 'r' )
for member in tarFile.getmembers():
tarFile.extract( member, os.getcwd() )
except Exception, x :
return S_ERROR( 'Could not untar %s with exception %s' % ( basefname, str( x ) ) )
self.log.info( 'Attempting to submit job to local site: %s' % DIRAC.siteName() )
if parameters['Value'].has_key( 'Executable' ):
executable = os.path.expandvars( parameters['Value']['Executable'] )
else:
return self._errorReport( 'Missing job "Executable"' )
arguments = ''
if parameters['Value'].has_key( 'Arguments' ):
arguments = parameters['Value']['Arguments']
command = '%s %s' % ( executable, arguments )
self.log.info( 'Executing: %s' % command )
executionEnv = dict( os.environ )
if parameters['Value'].has_key( 'ExecutionEnvironment' ):
self.log.verbose( 'Adding variables to execution environment' )
variableList = parameters['Value']['ExecutionEnvironment']
if type( variableList ) == type( " " ):
variableList = [variableList]
for var in variableList:
nameEnv = var.split( '=' )[0]
valEnv = urllib.unquote( var.split( '=' )[1] ) # this is needed to make the value contain strange things
executionEnv[nameEnv] = valEnv
self.log.verbose( '%s = %s' % ( nameEnv, valEnv ) )
cbFunction = self.__printOutput
if disableCallback:
cbFunction = None
result = shellCall( 0, command, env = executionEnv, callbackFunction = cbFunction )
if not result['OK']:
return result
status = result['Value'][0]
self.log.verbose( 'Status after execution is %s' % ( status ) )
outputFileName = None
errorFileName = None
# FIXME: if there is an callbackFunction, StdOutput and StdError will be empty soon
if parameters['Value'].has_key( 'StdOutput' ):
outputFileName = parameters['Value']['StdOutput']
if parameters['Value'].has_key( 'StdError' ):
errorFileName = parameters['Value']['StdError']
if outputFileName:
stdout = result['Value'][1]
if os.path.exists( outputFileName ):
os.remove( outputFileName )
self.log.info( 'Standard output written to %s' % ( outputFileName ) )
outputFile = open( outputFileName, 'w' )
print >> outputFile, stdout
outputFile.close()
else:
self.log.warn( 'Job JDL has no StdOutput file parameter defined' )
if errorFileName:
stderr = result['Value'][2]
if os.path.exists( errorFileName ):
os.remove( errorFileName )
self.log.verbose( 'Standard error written to %s' % ( errorFileName ) )
errorFile = open( errorFileName, 'w' )
print >> errorFile, stderr
errorFile.close()
else:
self.log.warn( 'Job JDL has no StdError file parameter defined' )
if parameters['Value'].has_key( 'OutputSandbox' ):
sandbox = parameters['Value']['OutputSandbox']
if type( sandbox ) in types.StringTypes:
sandbox = [sandbox]
if parameters['Value'].has_key( 'OutputSandbox' ):
sandbox = parameters['Value']['OutputSandbox']
if type( sandbox ) in types.StringTypes:
sandbox = [sandbox]
for i in sandbox:
if disableCopies:
break
globList = glob.glob( i )
for isFile in globList:
if os.path.isabs( isFile ):
# if a relative path, it is relative to the user working directory
isFile = os.path.basename( isFile )
# Attempt to copy back from job working directory
if os.path.isdir( isFile ):
shutil.copytree( isFile, baseDir, symlinks = True )
elif os.path.exists( isFile ):
shutil.copy( isFile, baseDir )
else:
return S_ERROR( 'Can not copy OutputSandbox file %s' % isFile )
if status:
return S_ERROR( 'Execution completed with non-zero status %s' % ( status ) )
return S_OK( 'Execution completed successfully' )
#############################################################################
@classmethod
def __printOutput( self, fd = None, message = '' ):
"""Internal callback function to return standard output when running locally.
"""
if fd:
if type( fd ) == types.IntType:
if fd == 0:
print >> sys.stdout, message
elif fd == 1:
print >> sys.stderr, message
else:
print message
elif type( fd ) == types.FileType:
print >> fd, message
else:
print message
#############################################################################
# def listCatalog( self, directory, printOutput = False ):
# """ Under development.
# Obtain listing of the specified directory.
# """
# rm = ReplicaManager()
# listing = rm.listCatalogDirectory( directory )
# if re.search( '\/$', directory ):
# directory = directory[:-1]
#
# if printOutput:
# for fileKey, metaDict in listing['Value']['Successful'][directory]['Files'].items():
# print '#' * len( fileKey )
# print fileKey
# print '#' * len( fileKey )
# print self.pPrint.pformat( metaDict )
#############################################################################
def getReplicas( self, lfns, active = True, printOutput = False ):
"""Obtain replica information from file catalogue client. Input LFN(s) can be string or list.
Example usage:
>>> print dirac.getReplicas('/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst')
{'OK': True, 'Value': {'Successful': {'/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst':
{'CERN-RDST':
'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst'}},
'Failed': {}}}
:param lfns: Logical File Name(s) to query
:type lfns: LFN string or list []
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfns ) == type( " " ):
lfns = lfns.replace( 'LFN:', '' )
elif type( lfns ) == type( [] ):
try:
lfns = [str( lfn.replace( 'LFN:', '' ) ) for lfn in lfns]
except Exception, x:
return self._errorReport( str( x ), 'Expected strings for LFNs' )
else:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
start = time.time()
dm = DataManager()
if active:
repsResult = dm.getActiveReplicas( lfns )
else:
repsResult = dm.getReplicas( lfns )
timing = time.time() - start
self.log.info( 'Replica Lookup Time: %.2f seconds ' % ( timing ) )
self.log.debug( repsResult )
if not repsResult['OK']:
self.log.warn( repsResult['Message'] )
return repsResult
if printOutput:
print self.pPrint.pformat( repsResult['Value'] )
return repsResult
#############################################################################
def getAllReplicas( self, lfns, printOutput = False ):
"""Only differs from getReplicas method in the sense that replicas on banned SEs
will be included in the result.
Obtain replica information from file catalogue client. Input LFN(s) can be string or list.
Example usage:
>>> print dirac.getAllReplicas('/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst')
{'OK': True, 'Value': {'Successful': {'/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst':
{'CERN-RDST':
'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst'}},
'Failed': {}}}
:param lfns: Logical File Name(s) to query
:type lfns: LFN string or list []
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfns ) == type( " " ):
lfns = lfns.replace( 'LFN:', '' )
elif type( lfns ) == type( [] ):
try:
lfns = [str( lfn.replace( 'LFN:', '' ) ) for lfn in lfns]
except Exception, x:
return self._errorReport( str( x ), 'Expected strings for LFNs' )
else:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
# rm = ReplicaManager()
# start = time.time()
# repsResult = rm.getCatalogReplicas( lfns )
# RF_NOTE : this method will return different values that api.getReplicas
fc = FileCatalog()
start = time.time()
repsResult = fc.getReplicas( lfns )
timing = time.time() - start
self.log.info( 'Replica Lookup Time: %.2f seconds ' % ( timing ) )
self.log.verbose( repsResult )
if not repsResult['OK']:
self.log.warn( repsResult['Message'] )
return repsResult
if printOutput:
print self.pPrint.pformat( repsResult['Value'] )
return repsResult
#############################################################################
def splitInputData( self, lfns, maxFilesPerJob = 20, printOutput = False ):
"""Split the supplied lfn list by the replicas present at the possible
destination sites. An S_OK object will be returned containing a list of
lists in order to create the jobs.
Example usage:
>>> d.splitInputData(lfns,10)
{'OK': True, 'Value': [['<LFN>'], ['<LFN>']]}
:param lfns: Logical File Name(s) to split
:type lfns: list
:param maxFilesPerJob: Number of files per bunch
:type maxFilesPerJob: integer
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
from DIRAC.Core.Utilities.SiteSEMapping import getSitesForSE
sitesForSE = {}
if type( lfns ) == type( " " ):
lfns = lfns.replace( 'LFN:', '' )
elif type( lfns ) == type( [] ):
try:
lfns = [str( lfn.replace( 'LFN:', '' ) ) for lfn in lfns]
except Exception, x:
return self._errorReport( str( x ), 'Expected strings for LFNs' )
else:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
if not type( maxFilesPerJob ) == types.IntType:
try:
maxFilesPerJob = int( maxFilesPerJob )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer for maxFilesPerJob' )
replicaDict = self.getReplicas( lfns, active = True )
if not replicaDict['OK']:
return replicaDict
if len( replicaDict['Value']['Successful'] ) == 0:
return self._errorReport( replicaDict['Value']['Failed'].items()[0], 'Failed to get replica information' )
siteLfns = {}
for lfn, reps in replicaDict['Value']['Successful'].items():
possibleSites = set( [site for se in reps for site in sitesForSE.setdefault( se, getSitesForSE( se ).get( 'Value', [] ) )] )
siteLfns.setdefault( ','.join( sorted( possibleSites ) ), [] ).append( lfn )
if '' in siteLfns:
# Some files don't have active replicas
return self._errorReport( 'No active replica found for', str( siteLfns[''] ) )
lfnGroups = []
for files in siteLfns.values():
lists = breakListIntoChunks( files, maxFilesPerJob )
lfnGroups += lists
if printOutput:
print self.pPrint.pformat( lfnGroups )
return S_OK( lfnGroups )
#############################################################################
def getMetadata( self, lfns, printOutput = False ):
"""Obtain replica metadata from file catalogue client. Input LFN(s) can be string or list.
Example usage:
>>> print dirac.getMetadata('/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst')
{'OK': True, 'Value': {'Successful': {'/lhcb/data/CCRC08/RDST/00000106/0000/00000106_00006321_1.rdst':
{'Status': '-', 'Size': 619475828L, 'GUID': 'E871FBA6-71EA-DC11-8F0C-000E0C4DEB4B', 'CheckSumType': 'AD',
'CheckSumValue': ''}}, 'Failed': {}}}
:param lfns: Logical File Name(s) to query
:type lfns: LFN string or list []
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfns ) == type( " " ):
lfns = lfns.replace( 'LFN:', '' )
elif type( lfns ) == type( [] ):
try:
lfns = [str( lfn.replace( 'LFN:', '' ) ) for lfn in lfns]
except Exception, x:
return self._errorReport( str( x ), 'Expected strings for LFNs' )
else:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
fc = FileCatalog()
start = time.time()
repsResult = fc.getFileMetadata( lfns )
timing = time.time() - start
self.log.info( 'Metadata Lookup Time: %.2f seconds ' % ( timing ) )
self.log.verbose( repsResult )
if not repsResult['OK']:
self.log.warn( 'Failed to retrieve file metadata from the catalogue' )
self.log.warn( repsResult['Message'] )
return repsResult
if printOutput:
print self.pPrint.pformat( repsResult['Value'] )
return repsResult
#############################################################################
def addFile( self, lfn, fullPath, diracSE, fileGuid = None, printOutput = False ):
"""Add a single file to Grid storage. lfn is the desired logical file name
for the file, fullPath is the local path to the file and diracSE is the
Storage Element name for the upload. The fileGuid is optional, if not
specified a GUID will be generated on the fly. If subsequent access
depends on the file GUID the correct one should
Example Usage:
>>> print dirac.addFile('/lhcb/user/p/paterson/myFile.tar.gz','myFile.tar.gz','CERN-USER')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'/lhcb/user/p/paterson/test/myFile.tar.gz': {'put': 64.246301889419556,
'register': 1.1102778911590576}}}}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param diracSE: DIRAC SE name e.g. CERN-USER
:type diracSE: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfn ) == type( " " ):
lfn = lfn.replace( 'LFN:', '' )
else:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
if not os.path.exists( fullPath ):
return self._errorReport( 'File path %s must exist' % ( fullPath ) )
if not os.path.isfile( fullPath ):
return self._errorReport( 'Expected path to file not %s' % ( fullPath ) )
dm = DataManager( catalogs = self.defaultFileCatalog )
result = dm.putAndRegister( lfn, fullPath, diracSE, guid = fileGuid )
if not result['OK']:
return self._errorReport( 'Problem during putAndRegister call', result['Message'] )
if not printOutput:
return result
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def getFile( self, lfn, destDir = '', printOutput = False ):
"""Retrieve a single file or list of files from Grid storage to the current directory. lfn is the
desired logical file name for the file, fullPath is the local path to the file and diracSE is the
Storage Element name for the upload. The fileGuid is optional, if not specified a GUID will be
generated on the fly.
Example Usage:
>>> print dirac.getFile('/lhcb/user/p/paterson/myFile.tar.gz')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'/lhcb/user/p/paterson/test/myFile.tar.gz': '/afs/cern.ch/user/p/paterson/w1/DIRAC3/myFile.tar.gz'}}}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfn ) == type( " " ):
lfn = lfn.replace( 'LFN:', '' )
elif type( lfn ) == type( [] ):
try:
lfn = [str( lfnName.replace( 'LFN:', '' ) ) for lfnName in lfn]
except Exception, x:
return self._errorReport( str( x ), 'Expected strings for LFN(s)' )
else:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
dm = DataManager()
result = dm.getFile( lfn, destinationDir = destDir )
if not result['OK']:
return self._errorReport( 'Problem during getFile call', result['Message'] )
if result['Value']['Failed']:
self.log.error( 'Failures occurred during rm.getFile' )
if printOutput:
print self.pPrint.pformat( result['Value'] )
return S_ERROR( result['Value'] )
if not printOutput:
return result
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def replicateFile( self, lfn, destinationSE, sourceSE = '', localCache = '', printOutput = False ):
"""Replicate an existing file to another Grid SE. lfn is the desired logical file name
for the file to be replicated, destinationSE is the DIRAC Storage Element to create a
replica of the file at. Optionally the source storage element and local cache for storing
the retrieved file for the new upload can be specified.
Example Usage:
>>> print dirac.replicateFile('/lhcb/user/p/paterson/myFile.tar.gz','CNAF-USER')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'/lhcb/user/p/paterson/test/myFile.tar.gz': {'register': 0.44766902923583984,
'replicate': 56.42345404624939}}}}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param destinationSE: Destination DIRAC SE name e.g. CERN-USER
:type destinationSE: string
:param sourceSE: Optional source SE
:type sourceSE: string
:param localCache: Optional path to local cache
:type localCache: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfn ) in types.StringTypes:
lfn = lfn.replace( 'LFN:', '' )
elif type( lfn ) != types.ListType:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
if not sourceSE:
sourceSE = ''
if not localCache:
localCache = ''
if not type( sourceSE ) in types.StringTypes:
return self._errorReport( 'Expected string for source SE name' )
if not type( localCache ) == type( " " ):
return self._errorReport( 'Expected string for path to local cache' )
dm = DataManager()
result = dm.replicateAndRegister( lfn, destinationSE, sourceSE, '', localCache )
if not result['OK']:
return self._errorReport( 'Problem during replicateFile call', result['Message'] )
if not printOutput:
return result
print self.pPrint.pformat( result['Value'] )
return result
def replicate( self, lfn, destinationSE, sourceSE = '', printOutput = False ):
"""Replicate an existing file to another Grid SE. lfn is the desired logical file name
for the file to be replicated, destinationSE is the DIRAC Storage Element to create a
replica of the file at. Optionally the source storage element and local cache for storing
the retrieved file for the new upload can be specified.
Example Usage:
>>> print dirac.replicate('/lhcb/user/p/paterson/myFile.tar.gz','CNAF-USER')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'/lhcb/user/p/paterson/test/myFile.tar.gz': {'register': 0.44766902923583984}}}}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param destinationSE: Destination DIRAC SE name e.g. CERN-USER
:type destinationSE: string
:param sourceSE: Optional source SE
:type sourceSE: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfn ) == type( " " ):
lfn = lfn.replace( 'LFN:', '' )
else:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
if not sourceSE:
sourceSE = ''
if not type( sourceSE ) == type( " " ):
return self._errorReport( 'Expected string for source SE name' )
dm = DataManager()
result = dm.replicate( lfn, destinationSE, sourceSE, '' )
if not result['OK']:
return self._errorReport( 'Problem during replicate call', result['Message'] )
if not printOutput:
return result
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def getAccessURL( self, lfn, storageElement, printOutput = False ):
"""Allows to retrieve an access URL for an LFN replica given a valid DIRAC SE
name. Contacts the file catalog and contacts the site SRM endpoint behind
the scenes.
Example Usage:
>>> print dirac.getAccessURL('/lhcb/data/CCRC08/DST/00000151/0000/00000151_00004848_2.dst','CERN-RAW')
{'OK': True, 'Value': {'Successful': {'srm://...': {'SRM2': 'rfio://...'}}, 'Failed': {}}}
:param lfn: Logical File Name (LFN)
:type lfn: string or list
:param storageElement: DIRAC SE name e.g. CERN-RAW
:type storageElement: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfn ) == type( " " ):
lfn = lfn.replace( 'LFN:', '' )
else:
return self._errorReport( 'Expected single string for LFN' )
dm = DataManager()
result = dm.getReplicaAccessUrl( [lfn], storageElement )
if not result['OK']:
return self._errorReport( 'Problem during getAccessURL call', result['Message'] )
if not printOutput:
return result
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def getPhysicalFileAccessURL( self, pfn, storageElement, printOutput = False ):
"""Allows to retrieve an access URL for an PFN given a valid DIRAC SE
name. The SE is contacted directly for this information.
Example Usage:
>>> print dirac.getPhysicalFileAccessURL('srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/DST/00000151/0000/00000151_00004848_2.dst','CERN_M-DST')
{'OK': True, 'Value':{'Failed': {},
'Successful': {'srm://srm-lhcb.cern.ch/castor/cern.ch/grid/lhcb/data/CCRC08/DST/00000151/0000/00000151_00004848_2.dst': {'RFIO': 'castor://...'}}}}
:param pfn: Physical File Name (PFN)
:type pfn: string or list
:param storageElement: DIRAC SE name e.g. CERN-RAW
:type storageElement: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( pfn ) == type( " " ):
if re.search( 'LFN:', pfn ):
return self._errorReport( 'Expected PFN not LFN' )
pfn = pfn.replace( 'PFN:', '' )
elif type( pfn ) == type( [] ):
try:
pfn = [str( pfnName.replace( 'PFN:', '' ) ) for pfnName in pfn]
except Exception, x:
return self._errorReport( str( x ), 'Expected strings for PFN(s)' )
else:
return self._errorReport( 'Expected single string for PFN' )
result = StorageElement( storageElement ).getAccessUrl( [pfn] )
if not result['OK']:
return self._errorReport( 'Problem during getAccessURL call', result['Message'] )
if not printOutput:
return result
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def getPhysicalFileMetadata( self, pfn, storageElement, printOutput = False ):
"""Allows to retrieve metadata for physical file(s) on a supplied storage
element. Contacts the site SRM endpoint and performs a gfal_ls behind
the scenes.
Example Usage:
>>> print dirac.getPhysicalFileMetadata('srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data
/lhcb/data/CCRC08/RAW/LHCb/CCRC/23341/023341_0000039571.raw','NIKHEF-RAW')
{'OK': True, 'Value': {'Successful': {'srm://...': {'SRM2': 'rfio://...'}}, 'Failed': {}}}
:param pfn: Physical File Name (PFN)
:type pfn: string or list
:param storageElement: DIRAC SE name e.g. CERN-RAW
:type storageElement: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( pfn ) == type( " " ):
if re.search( 'LFN:', pfn ):
return self._errorReport( 'Expected PFN not LFN' )
pfn = pfn.replace( 'PFN:', '' )
pfn = [pfn]
elif type( pfn ) == type( [] ):
try:
pfn = [str( pfile.replace( 'PFN:', '' ) ) for pfile in pfn]
except Exception, x:
return self._errorReport( str( x ), 'Expected list of strings for PFNs' )
else:
return self._errorReport( 'Expected single string or list of strings for PFN(s)' )
result = StorageElement( storageElement ).getFileMetadata( pfn )
if not result['OK']:
return self._errorReport( 'Problem during getStorageFileMetadata call', result['Message'] )
if not printOutput:
return result
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def removeFile( self, lfn, printOutput = False ):
"""Remove LFN and *all* associated replicas from Grid Storage Elements and
file catalogues.
Example Usage:
>>> print dirac.removeFile('LFN:/lhcb/data/CCRC08/RAW/LHCb/CCRC/22808/022808_0000018443.raw')
{'OK': True, 'Value':...}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
if type( lfn ) in types.StringTypes:
lfn = lfn.replace( 'LFN:', '' )
elif type( lfn ) != types.ListType:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
dm = DataManager()
result = dm.removeFile( lfn )
if printOutput and result['OK']:
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def removeReplica( self, lfn, storageElement, printOutput = False ):
"""Remove replica of LFN from specified Grid Storage Element and
file catalogues.
Example Usage:
>>> print dirac.removeReplica('LFN:/lhcb/user/p/paterson/myDST.dst','CERN-USER')
{'OK': True, 'Value':...}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param storageElement: DIRAC SE Name
:type storageElement: string
:returns: S_OK,S_ERROR
"""
if type( lfn ) in types.StringTypes:
lfn = lfn.replace( 'LFN:', '' )
elif type( lfn ) != types.ListType:
return self._errorReport( 'Expected single string or list of strings for LFN(s)' )
dm = DataManager()
result = dm.removeReplica( storageElement, lfn )
if printOutput and result['OK']:
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def dataLoggingInfo( self, lfn, printOutput = False ):
"""Retrieve logging information for a given dataset.
Example Usage:
>>> print dirac.dataLoggingInfo('/lhcb/data/CCRC08/RAW/LHCb/CCRC/22808/022808_0000018443.raw')
{'OK': True, 'Value': [('AddedToTransformation', 'Transformation 3', datetime.datetime(2008, 5, 18, 13, 54, 15)]}
:param lfn: Logical File Name (LFN)
:type lfn: string
:param printOutput: Optional flag to print result
:type printOutput: boolean
:returns: S_OK,S_ERROR
"""
if type( lfn ) == type( " " ):
lfn = lfn.replace( 'LFN:', '' )
else:
return self._errorReport( 'Expected single string for LFN' )
dataLogging = RPCClient( 'DataManagement/DataLogging' )
result = dataLogging.getFileLoggingInfo( lfn )
if not result['OK']:
return self._errorReport( 'Problem during getFileLoggingInfo call', result['Message'] )
if not printOutput:
return result
loggingTupleList = result['Value']
headers = ( 'Status', 'MinorStatus', 'DateTime', 'Source' )
line = ''
statAdj = 0
mStatAdj = 0
dtAdj = 25
sourceAdj = 0
for i in loggingTupleList:
if len( str( i[0] ) ) > statAdj:
statAdj = len( str( i[0] ) ) + 4
if len( str( i[1] ) ) > mStatAdj:
mStatAdj = len( str( i[1] ) ) + 4
if len( str( i[3] ) ) > sourceAdj:
sourceAdj = len( str( i[3] ) ) + 4
print '\n' + headers[0].ljust( statAdj ) + headers[1].ljust( mStatAdj ) + \
headers[2].ljust( dtAdj ) + headers[3].ljust( sourceAdj ) + '\n'
for i in loggingTupleList:
line = i[0].ljust( statAdj ) + i[1].ljust( mStatAdj ) + \
toString( i[2] ).ljust( dtAdj ) + i[3].ljust( sourceAdj )
print line
return result
#############################################################################
def _sendJob( self, jdl ):
"""Internal function.
This is an internal wrapper for submit() in order to
catch whether a user is authorized to submit to DIRAC or
does not have a valid proxy. This is not intended for
direct use.
"""
jobID = None
if gConfig.getValue( '/LocalSite/DisableSubmission', '' ):
return S_ERROR( 'Submission disabled by /LocalSite/DisableSubmission flag for debugging purposes' )
try:
jobID = self._wmcClient().submitJob( jdl )
# raise 'problem'
except Exception, x:
return S_ERROR( "Cannot submit job: %s" % str( x ) )
return jobID
#############################################################################
def getInputSandbox( self, jobID, outputDir = None ):
"""Retrieve input sandbox for existing JobID.
This method allows the retrieval of an existing job input sandbox for
debugging purposes. By default the sandbox is downloaded to the current
directory but this can be overidden via the outputDir parameter. All files
are extracted into a InputSandbox<JOBID> directory that is automatically created.
Example Usage:
>>> print dirac.getInputSandbox(12345)
{'OK': True, 'Value': ['Job__Sandbox__.tar.bz2']}
:param jobID: JobID
:type jobID: integer or string
:param outputDir: Optional directory for files
:type outputDir: string
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
# TODO: Do not check if dir already exists
dirPath = ''
if outputDir:
dirPath = '%s/InputSandbox%s' % ( outputDir, jobID )
if os.path.exists( dirPath ):
return self._errorReport( 'Job input sandbox directory %s already exists' % ( dirPath ) )
else:
dirPath = '%s/InputSandbox%s' % ( os.getcwd(), jobID )
if os.path.exists( dirPath ):
return self._errorReport( 'Job input sandbox directory %s already exists' % ( dirPath ) )
try:
os.mkdir( dirPath )
except Exception, x:
return self._errorReport( str( x ), 'Could not create directory in %s' % ( dirPath ) )
result = self._sbClient().downloadSandboxForJob( jobID, 'Input', dirPath )
if not result[ 'OK' ]:
self.log.warn( result[ 'Message' ] )
else:
self.log.info( 'Files retrieved and extracted in %s' % ( dirPath ) )
return result
#############################################################################
def getOutputSandbox( self, jobID, outputDir = None, oversized = True, noJobDir = False ):
"""Retrieve output sandbox for existing JobID.
This method allows the retrieval of an existing job output sandbox.
By default the sandbox is downloaded to the current directory but
this can be overidden via the outputDir parameter. All files are
extracted into a <JOBID> directory that is automatically created.
Example Usage:
>>> print dirac.getOutputSandbox(12345)
{'OK': True, 'Value': ['Job__Sandbox__.tar.bz2']}
:param jobID: JobID
:type jobID: integer or string
:param outputDir: Optional directory path
:type outputDir: string
:param oversized: Optionally disable oversized sandbox download
:type oversized: boolean
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
# TODO: Do not check if dir already exists
dirPath = ''
if outputDir:
dirPath = outputDir
if not noJobDir:
dirPath = '%s/%s' % ( outputDir, jobID )
# if os.path.exists( dirPath ):
# return self._errorReport( 'Job output directory %s already exists' % ( dirPath ) )
else:
dirPath = '%s/%s' % ( os.getcwd(), jobID )
if os.path.exists( dirPath ):
return self._errorReport( 'Job output directory %s already exists' % ( dirPath ) )
try:
if not os.path.exists( dirPath ):
os.makedirs( dirPath )
except Exception, x:
return self._errorReport( str( x ), 'Could not create directory in %s' % ( dirPath ) )
# New download
result = self._sbClient().downloadSandboxForJob( jobID, 'Output', dirPath )
if result['OK']:
self.log.info( 'Files retrieved and extracted in %s' % ( dirPath ) )
if self.jobRepo:
self.jobRepo.updateJob( jobID, {'Retrieved':1, 'Sandbox':os.path.realpath( dirPath )} )
return result
self.log.warn( result[ 'Message' ] )
if not oversized:
if self.jobRepo:
self.jobRepo.updateJob( jobID, {'Retrieved':1, 'Sandbox':os.path.realpath( dirPath )} )
return result
params = self.parameters( int( jobID ) )
if not params['OK']:
self.log.verbose( 'Could not retrieve job parameters to check for oversized sandbox' )
return params
if not params['Value'].has_key( 'OutputSandboxLFN' ):
self.log.verbose( 'No oversized output sandbox for job %s:\n%s' % ( jobID, params ) )
return result
oversizedSandbox = params['Value']['OutputSandboxLFN']
if not oversizedSandbox:
self.log.verbose( 'Null OutputSandboxLFN for job %s' % jobID )
return result
self.log.info( 'Attempting to retrieve %s' % oversizedSandbox )
start = os.getcwd()
os.chdir( dirPath )
getFile = self.getFile( oversizedSandbox )
if not getFile['OK']:
self.log.warn( 'Failed to download %s with error:%s' % ( oversizedSandbox, getFile['Message'] ) )
os.chdir( start )
return getFile
fileName = os.path.basename( oversizedSandbox )
try:
result = S_OK()
if tarfile.is_tarfile( fileName ):
tarFile = tarfile.open( fileName, 'r' )
for member in tarFile.getmembers():
tarFile.extract( member, dirPath )
except Exception, x :
os.chdir( start )
result = S_ERROR( str( x ) )
if os.path.exists( fileName ):
os.unlink( fileName )
os.chdir( start )
if result['OK']:
if self.jobRepo:
self.jobRepo.updateJob( jobID, {'Retrieved':1, 'Sandbox':os.path.realpath( dirPath )} )
return result
#############################################################################
def delete( self, jobID ):
"""Delete job or list of jobs from the WMS, if running these jobs will
also be killed.
Example Usage:
>>> print dirac.delete(12345)
{'OK': True, 'Value': [12345]}
:param jobID: JobID
:type jobID: int, string or list
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
try:
jobID = [int( job ) for job in jobID]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
result = self._wmcClient().deleteJob( jobID )
if result['OK']:
if self.jobRepo:
for jobID in result['Value']:
self.jobRepo.removeJob( jobID )
return result
#############################################################################
def reschedule( self, jobID ):
"""Reschedule a job or list of jobs in the WMS. This operation is the same
as resubmitting the same job as new. The rescheduling operation may be
performed to a configurable maximum number of times but the owner of a job
can also reset this counter and reschedule jobs again by hand.
Example Usage:
>>> print dirac.reschedule(12345)
{'OK': True, 'Value': [12345]}
:param jobID: JobID
:type jobID: int, string or list
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
try:
jobID = [int( job ) for job in jobID]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
result = self._wmcClient().rescheduleJob( jobID )
if result['OK']:
if self.jobRepo:
repoDict = {}
for jobID in result['Value']:
repoDict[jobID] = {'State':'Submitted'}
self.jobRepo.updateJobs( repoDict )
return result
def kill( self, jobID ):
"""Issue a kill signal to a running job. If a job has already completed this
action is harmless but otherwise the process will be killed on the compute
resource by the Watchdog.
Example Usage:
>>> print dirac.kill(12345)
{'OK': True, 'Value': [12345]}
:param jobID: JobID
:type jobID: int, string or list
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
try:
jobID = [int( job ) for job in jobID]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
result = self._wmcClient().killJob( jobID )
if result['OK']:
if self.jobRepo:
for jobID in result['Value']:
self.jobRepo.removeJob( jobID )
return result
#############################################################################
def status( self, jobID ):
"""Monitor the status of DIRAC Jobs.
Example Usage:
>>> print dirac.status(79241)
{79241: {'status': 'Done', 'site': 'LCG.CERN.ch'}}
:param jobID: JobID
:type jobID: int, string or list
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = [int( jobID )]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
try:
jobID = [int( job ) for job in jobID]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == types.IntType:
jobID = [jobID]
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
statusDict = monitoring.getJobsStatus( jobID )
minorStatusDict = monitoring.getJobsMinorStatus( jobID )
siteDict = monitoring.getJobsSites( jobID )
if not statusDict['OK']:
self.log.warn( 'Could not obtain job status information' )
return statusDict
if not siteDict['OK']:
self.log.warn( 'Could not obtain job site information' )
return siteDict
if not minorStatusDict['OK']:
self.log.warn( 'Could not obtain job minor status information' )
return minorStatusDict
result = {}
repoDict = {}
for job, vals in statusDict['Value'].items():
result[job] = vals
if self.jobRepo:
repoDict[job] = {'State':vals['Status']}
if self.jobRepo:
self.jobRepo.updateJobs( repoDict )
for job, vals in siteDict['Value'].items():
result[job].update( vals )
for job, vals in minorStatusDict['Value'].items():
result[job].update( vals )
for job, vals in result.items():
if result[job].has_key( 'JobID' ):
del result[job]['JobID']
return S_OK( result )
#############################################################################
def getJobInputData( self, jobID ):
"""Retrieve the input data requirement of any job existing in the workload management
system.
Example Usage:
>>> dirac.getJobInputData(1405)
{'OK': True, 'Value': {1405:
['LFN:/lhcb/production/DC06/phys-v2-lumi5/00001680/DST/0000/00001680_00000490_5.dst']}}
:param jobID: JobID
:type jobID: int, string or list
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = [int( jobID )]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
try:
jobID = [int( job ) for job in jobID]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( 1 ):
jobID = [jobID]
summary = {}
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
for job in jobID:
result = monitoring.getInputData( job )
if result['OK']:
summary[job] = result['Value']
else:
self.log.warn( 'Getting input data for job %s failed with message:\n%s' % ( job, result['Message'] ) )
summary[job] = []
return S_OK( summary )
#############################################################################
def getJobOutputLFNs( self, jobID ):
""" Retrieve the output data LFNs of a given job locally.
This does not download the output files but simply returns the LFN list
that a given job has produced.
Example Usage:
>>> dirac.getJobOutputLFNs(1405)
{'OK':True,'Value':[<LFN>]}
:param jobID: JobID
:type jobID: int or string
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
result = self.parameters( int( jobID ) )
if not result['OK']:
return result
if not result['Value'].has_key( 'UploadedOutputData' ):
self.log.info( 'Parameters for job %s do not contain uploaded output data:\n%s' % ( jobID, result ) )
return S_ERROR( 'No output data found for job %s' % jobID )
outputData = result['Value']['UploadedOutputData']
outputData = outputData.replace( ' ', '' ).split( ',' )
if not outputData:
return S_ERROR( 'No output data files found' )
self.log.verbose( 'Found the following output data LFNs:\n', '\n'.join( outputData ) )
return S_OK( outputData )
#############################################################################
def getJobOutputData( self, jobID, outputFiles = '', destinationDir = '' ):
""" Retrieve the output data files of a given job locally.
Optionally restrict the download of output data to a given file name or
list of files using the outputFiles option, by default all job outputs
will be downloaded.
Example Usage:
>>> dirac.getJobOutputData(1405)
{'OK':True,'Value':[<LFN>]}
:param jobID: JobID
:type jobID: int or string
:param outputFiles: Optional files to download
:type outputFiles: string or list
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
result = self.parameters( int( jobID ) )
if not result['OK']:
return result
if not result['Value'].has_key( 'UploadedOutputData' ):
self.log.info( 'Parameters for job %s do not contain uploaded output data:\n%s' % ( jobID, result ) )
return S_ERROR( 'No output data found for job %s' % jobID )
outputData = result['Value']['UploadedOutputData']
outputData = outputData.replace( ' ', '' ).split( ',' )
if not outputData:
return S_ERROR( 'No output data files found to download' )
if outputFiles:
if type( outputFiles ) == type( " " ):
outputFiles = [os.path.basename( outputFiles )]
elif type( outputFiles ) == type( [] ):
try:
outputFiles = [os.path.basename( fname ) for fname in outputFiles]
except Exception, x:
return self._errorReport( str( x ), 'Expected strings for output file names' )
else:
return self._errorReport( 'Expected strings for output file names' )
self.log.info( 'Found specific outputFiles to download:', ', '.join( outputFiles ) )
newOutputData = []
for outputFile in outputData:
if os.path.basename( outputFile ) in outputFiles:
newOutputData.append( outputFile )
self.log.verbose( '%s will be downloaded' % outputFile )
else:
self.log.verbose( '%s will be ignored' % outputFile )
outputData = newOutputData
# These two lines will break backwards compatibility.
# if not destinationDir:
# destinationDir = jobID
obtainedFiles = []
for outputFile in outputData:
self.log.info( 'Attempting to retrieve %s' % outputFile )
result = self.getFile( outputFile, destDir = destinationDir )
if not result['OK']:
self.log.error( 'Failed to download %s' % outputFile )
return result
else:
localPath = "%s/%s" % ( destinationDir, os.path.basename( outputFile ) )
obtainedFiles.append( os.path.realpath( localPath ) )
if self.jobRepo:
self.jobRepo.updateJob( jobID, {'OutputData':1, 'OutputFiles':obtainedFiles} )
return S_OK( outputData )
#############################################################################
def selectJobs( self, status = None, minorStatus = None, applicationStatus = None,
site = None, owner = None, ownerGroup = None, jobGroup = None, date = None ):
"""Options correspond to the web-page table columns. Returns the list of JobIDs for
the specified conditions. A few notes on the formatting:
- date must be specified as yyyy-mm-dd. By default, the date is today.
- jobGroup corresponds to the name associated to a group of jobs, e.g. productionID / job names.
- site is the DIRAC site name, e.g. LCG.CERN.ch
- owner is the immutable nickname, e.g. paterson
Example Usage:
>>> dirac.selectJobs( status='Failed', owner='paterson', site='LCG.CERN.ch')
{'OK': True, 'Value': ['25020', '25023', '25026', '25027', '25040']}
:param status: Job status
:type status: string
:param minorStatus: Job minor status
:type minorStatus: string
:param applicationStatus: Job application status
:type applicationStatus: string
:param site: Job execution site
:type site: string
:param owner: Job owner
:type owner: string
:param jobGroup: Job group
:type jobGroup: string
:param date: Selection date
:type date: string
:returns: S_OK,S_ERROR
"""
options = {'Status':status, 'MinorStatus':minorStatus, 'ApplicationStatus':applicationStatus, 'Owner':owner,
'Site':site, 'JobGroup':jobGroup, 'OwnerGroup':ownerGroup }
conditions = {}
for key, value in options.items():
if value:
try:
conditions[key] = str( value )
except Exception, x:
return self._errorReport( str( x ), 'Expected string for %s field' % key )
if not type( date ) == type( " " ):
try:
if date:
date = str( date )
except Exception, x:
return self._errorReport( str( x ), 'Expected yyyy-mm-dd string for date' )
if not date:
date = '%s' % Time.date()
self.log.verbose( 'Setting date to %s' % ( date ) )
self.log.verbose( 'Will select jobs with last update %s and following conditions' % date )
self.log.verbose( self.pPrint.pformat( conditions ) )
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobs( conditions, date )
if not result['OK']:
self.log.warn( result['Message'] )
return result
jobIDs = result['Value']
self.log.verbose( '%s job(s) selected' % ( len( jobIDs ) ) )
if not jobIDs:
return S_ERROR( 'No jobs selected for conditions: %s' % conditions )
else:
return result
#############################################################################
def getJobsInHerd( self, jobID ):
"""Get all jobs in the same herd as the given one.
Example Usage:
>>> dirac.getJobsInHerd( 2342 )
{'OK': True, 'Value': [ 2342, 2533, 2534, 2643, 2650 ] }
:param jobID: JobID
:type JobID: int
:returns: S_OK,S_ERROR
"""
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobsInHerd( jobID )
try:
result.pop( 'rpcStub' )
except:
pass
return result
#############################################################################
def getJobSummary( self, jobID, outputFile = None, printOutput = False ):
"""Output similar to the web page can be printed to the screen
or stored as a file or just returned as a dictionary for further usage.
Jobs can be specified individually or as a list.
Example Usage:
>>> dirac.getJobSummary(959209)
{'OK': True, 'Value': {959209: {'Status': 'Staging', 'LastUpdateTime': '2008-12-08 16:43:18',
'MinorStatus': '28 / 30', 'Site': 'Unknown', 'HeartBeatTime': 'None', 'ApplicationStatus': 'unknown',
'JobGroup': '00003403', 'Owner': 'joel', 'SubmissionTime': '2008-12-08 16:41:38'}}}
:param jobID: JobID
:type jobID: int or string
:param outputFile: Optional output file
:type outputFile: string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = [int( jobID )]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
try:
jobID = [int( job ) for job in jobID]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
headers = ['Status', 'MinorStatus', 'ApplicationStatus', 'Site', 'JobGroup', 'LastUpdateTime',
'HeartBeatTime', 'SubmissionTime', 'Owner']
if type( jobID ) == type( 1 ):
jobID = [jobID]
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobsSummary( jobID )
if not result['OK']:
self.log.warn( result['Message'] )
return result
try:
jobSummary = eval( result['Value'] )
# self.log.info(self.pPrint.pformat(jobSummary))
except Exception, x:
self.log.warn( 'Problem interpreting result from job monitoring service' )
return S_ERROR( 'Problem while converting result from job monitoring' )
summary = {}
for job in jobID:
summary[job] = {}
for key in headers:
if not jobSummary.has_key( job ):
self.log.warn( 'No records for JobID %s' % job )
value = 'None'
elif jobSummary[job].has_key( key ):
value = jobSummary[job][key]
else:
value = 'None'
summary[job][key] = value
if outputFile:
if os.path.exists( outputFile ):
return self._errorReport( 'Output file %s already exists' % ( outputFile ) )
dirPath = os.path.basename( outputFile )
if re.search( '/', dirPath ) and not os.path.exists( dirPath ):
try:
os.mkdir( dirPath )
except Exception, x:
return self._errorReport( str( x ), 'Could not create directory %s' % ( dirPath ) )
fopen = open( outputFile, 'w' )
line = 'JobID'.ljust( 12 )
for i in headers:
line += i.ljust( 35 )
fopen.write( line + '\n' )
for jobID, params in summary.items():
line = str( jobID ).ljust( 12 )
for header in headers:
for key, value in params.items():
if header == key:
line += value.ljust( 35 )
fopen.write( line + '\n' )
fopen.close()
self.log.verbose( 'Output written to %s' % outputFile )
if printOutput:
print self.pPrint.pformat( summary )
return S_OK( summary )
#############################################################################
def getJobDebugOutput( self, jobID ):
"""Developer function. Try to retrieve all possible outputs including
logging information, job parameters, sandbox outputs, pilot outputs,
last heartbeat standard output, JDL and CPU profile.
Example Usage:
>>> dirac.getJobDebugOutput(959209)
{'OK': True, 'Value': '/afs/cern.ch/user/p/paterson/DEBUG_959209'}
:param jobID: JobID
:type jobID: int or string
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
result = self.status( jobID )
if not result['OK']:
self.log.info( 'Could not obtain status information for jobID %s, please check this is valid.' % jobID )
return S_ERROR( 'JobID %s not found in WMS' % jobID )
else:
self.log.info( 'Job %s' % result['Value'] )
debugDir = '%s/DEBUG_%s' % ( os.getcwd(), jobID )
try:
os.mkdir( debugDir )
except Exception, x:
return self._errorReport( str( x ), 'Could not create directory in %s' % ( debugDir ) )
try:
result = self.getOutputSandbox( jobID, '%s' % ( debugDir ) )
msg = []
if not result['OK']:
msg.append( 'Output Sandbox: Retrieval Failed' )
else:
msg.append( 'Output Sandbox: Retrieved' )
except Exception, x:
msg.append( 'Output Sandbox: Not Available' )
try:
result = self.getInputSandbox( jobID, '%s' % ( debugDir ) )
if not result['OK']:
msg.append( 'Input Sandbox: Retrieval Failed' )
else:
msg.append( 'Input Sandbox: Retrieved' )
except Exception, x:
msg.append( 'Input Sandbox: Not Available' )
try:
result = self.parameters( jobID )
if not result['OK']:
msg.append( 'Job Parameters: Retrieval Failed' )
else:
self.__writeFile( result['Value'], '%s/JobParameters' % ( debugDir ) )
msg.append( 'Job Parameters: Retrieved' )
except Exception, x:
msg.append( 'Job Parameters: Not Available' )
try:
result = self.peek( jobID )
if not result['OK']:
msg.append( 'Last Heartbeat StdOut: Retrieval Failed' )
else:
self.__writeFile( result['Value'], '%s/LastHeartBeat' % ( debugDir ) )
msg.append( 'Last Heartbeat StdOut: Retrieved' )
except Exception, x:
msg.append( 'Last Heartbeat StdOut: Not Available' )
try:
result = self.loggingInfo( jobID )
if not result['OK']:
msg.append( 'Logging Info: Retrieval Failed' )
else:
self.__writeFile( result['Value'], '%s/LoggingInfo' % ( debugDir ) )
msg.append( 'Logging Info: Retrieved' )
except Exception, x:
msg.append( 'Logging Info: Not Available' )
try:
result = self.getJobJDL( jobID )
if not result['OK']:
msg.append( 'Job JDL: Retrieval Failed' )
else:
self.__writeFile( result['Value'], '%s/Job%s.jdl' % ( debugDir, jobID ) )
msg.append( 'Job JDL: Retrieved' )
except Exception, x:
msg.append( 'Job JDL: Not Available' )
try:
result = self.getJobCPUTime( jobID )
if not result['OK']:
msg.append( 'CPU Profile: Retrieval Failed' )
else:
self.__writeFile( result['Value'], '%s/JobCPUProfile' % ( debugDir ) )
msg.append( 'CPU Profile: Retrieved' )
except Exception, x:
msg.append( 'CPU Profile: Not Available' )
self.log.info( 'Summary of debugging outputs for job %s retrieved in directory:\n%s\n' % ( jobID, debugDir ),
'\n'.join( msg ) )
return S_OK( debugDir )
#############################################################################
def __writeFile( self, pObject, fileName ):
"""Internal function. Writes a python object to a specified file path.
"""
fopen = open( fileName, 'w' )
if not type( pObject ) == type( " " ):
fopen.write( '%s\n' % self.pPrint.pformat( pObject ) )
else:
fopen.write( pObject )
fopen.close()
#############################################################################
def getJobCPUTime( self, jobID, printOutput = False ):
"""Retrieve job CPU consumed heartbeat data from job monitoring
service. Jobs can be specified individually or as a list.
The time stamps and raw CPU consumed (s) are returned (if available).
Example Usage:
>>> d.getJobCPUTime(959209)
{'OK': True, 'Value': {959209: {}}}
:param jobID: JobID
:type jobID: int or string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = [int( jobID )]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
try:
jobID = [int( job ) for job in jobID]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
if type( jobID ) == type( 1 ):
jobID = [jobID]
summary = {}
for job in jobID:
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobHeartBeatData( job )
summary[job] = {}
if not result['OK']:
return self._errorReport( result['Message'], 'Could not get heartbeat data for job %s' % job )
if result['Value']:
tupleList = result['Value']
for tup in tupleList:
if tup[0] == 'CPUConsumed':
summary[job][tup[2]] = tup[1]
else:
self.log.warn( 'No heartbeat data for job %s' % job )
if printOutput:
print self.pPrint.pformat( summary )
return S_OK( summary )
#############################################################################
def attributes( self, jobID, printOutput = False ):
"""Return DIRAC attributes associated with the given job.
Each job will have certain attributes that affect the journey through the
workload management system, see example below. Attributes are optionally
printed to the screen.
Example Usage:
>>> print dirac.attributes(79241)
{'AccountedFlag': 'False','ApplicationNumStatus': '0',
'ApplicationStatus': 'Job Finished Successfully',
'CPUTime': '0.0','DIRACSetup': 'LHCb-Production'}
:param jobID: JobID
:type jobID: int, string or list
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = [int( jobID )]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
try:
jobID = [int( job ) for job in jobID]
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobAttributes( jobID )
if not result['OK']:
return result
if printOutput:
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def parameters( self, jobID, printOutput = False ):
"""Return DIRAC parameters associated with the given job.
DIRAC keeps track of several job parameters which are kept in the job monitoring
service, see example below. Selected parameters also printed to screen.
Example Usage:
>>> print dirac.parameters(79241)
{'OK': True, 'Value': {'JobPath': 'JobPath,JobSanity,JobPolicy,InputData,JobScheduling,TaskQueue',
'JobSanityCheck': 'Job: 768 JDL: OK, InputData: 2 LFNs OK, ','LocalBatchID': 'dc768'}
:param jobID: JobID
:type jobID: int or string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
return self._errorReport( 'Expected integer or string for jobID' )
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobParameters( jobID )
if not result['OK']:
return result
if result['Value'].has_key( 'StandardOutput' ):
del result['Value']['StandardOutput']
if printOutput:
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def loggingInfo( self, jobID, printOutput = False ):
"""DIRAC keeps track of job transitions which are kept in the job monitoring
service, see example below. Logging summary also printed to screen at the
INFO level.
Example Usage:
>>> print dirac.loggingInfo(79241)
{'OK': True, 'Value': [('Received', 'JobPath', 'Unknown', '2008-01-29 15:37:09', 'JobPathAgent'),
('Checking', 'JobSanity', 'Unknown', '2008-01-29 15:37:14', 'JobSanityAgent')]}
:param jobID: JobID
:type jobID: int or string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
return self._errorReport( 'Expected int or string, not list' )
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobLoggingInfo( jobID )
if not result['OK']:
self.log.warn( 'Could not retrieve logging information for job %s' % jobID )
self.log.warn( result )
return result
if printOutput:
loggingTupleList = result['Value']
# source is removed for printing to control width
headers = ( 'Status', 'MinorStatus', 'ApplicationStatus', 'DateTime' )
line = ''
for i in headers:
line += i.ljust( 30 )
print line
for i in loggingTupleList:
line = ''
for j in xrange( len( i ) - 1 ):
line += i[j].ljust( 30 )
print line
return result
#############################################################################
def peek( self, jobID, printout = False ):
"""The peek function will attempt to return standard output from the WMS for
a given job if this is available. The standard output is periodically
updated from the compute resource via the application Watchdog. Available
standard output is printed to screen at the INFO level.
Example Usage:
>>> print dirac.peek(1484)
{'OK': True, 'Value': 'Job peek result'}
:param jobID: JobID
:type jobID: int or string
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
elif type( jobID ) == type( [] ):
return self._errorReport( 'Expected int or string, not list' )
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobParameter( jobID, 'StandardOutput' )
if not result['OK']:
return self._errorReport( result, 'Could not retrieve job attributes' )
stdout = 'Not available yet.'
if result['Value'].has_key( 'StandardOutput' ):
self.log.verbose( result['Value']['StandardOutput'] )
stdout = result['Value']['StandardOutput']
if printout:
print stdout
else:
self.log.info( 'No standard output available to print.' )
return S_OK( stdout )
#############################################################################
def ping( self, system, service, printOutput = False ):
"""The ping function will attempt to return standard information from a system
service if this is available. If the ping() command is unsuccessful it could
indicate a period of service unavailability.
Example Usage:
>>> print dirac.ping('WorkloadManagement','JobManager')
{'OK': True, 'Value': 'Job ping result'}
:param system: system
:type system: string
:param service: service name
:type service: string
:param printOutput: Flag to print to stdOut
:type printOutput: Boolean
:returns: S_OK,S_ERROR
"""
if not type( system ) == type( " " ) and type( service ) == type( " " ):
return self._errorReport( 'Expected string for system and service to ping()' )
result = S_ERROR()
try:
systemSection = getSystemSection( system + '/' )
self.log.verbose( 'System section is: %s' % ( systemSection ) )
section = '%s/%s' % ( systemSection, service )
self.log.verbose( 'Requested service should have CS path: %s' % ( section ) )
serviceURL = getServiceURL( '%s/%s' % ( system, service ) )
self.log.verbose( 'Service URL is: %s' % ( serviceURL ) )
client = RPCClient( '%s/%s' % ( system, service ) )
result = client.ping()
if result['OK']:
result['Value']['service url'] = serviceURL
except Exception, x:
self.log.warn( 'ping for %s/%s failed with exception:\n%s' % ( system, service, str( x ) ) )
result['Message'] = str( x )
if printOutput:
print self.pPrint.pformat( result )
return result
#############################################################################
def getJobJDL( self, jobID, printOutput = False ):
"""Simple function to retrieve the current JDL of an existing job in the
workload management system. The job JDL is converted to a dictionary
and returned in the result structure.
Example Usage:
>>> print dirac.getJobJDL(12345)
{'Arguments': 'jobDescription.xml',...}
:param jobID: JobID
:type jobID: int or string
:returns: S_OK,S_ERROR
"""
if type( jobID ) == type( " " ):
try:
jobID = int( jobID )
except Exception, x:
return self._errorReport( str( x ), 'Expected integer or string for existing jobID' )
monitoring = RPCClient( 'WorkloadManagement/JobMonitoring' )
result = monitoring.getJobJDL( jobID )
if not result['OK']:
return result
result = self.__getJDLParameters( result['Value'] )
if printOutput:
print self.pPrint.pformat( result['Value'] )
return result
#############################################################################
def __getJDLParameters( self, jdl ):
"""Internal function. Returns a dictionary of JDL parameters.
"""
if os.path.exists( jdl ):
jdlFile = open( jdl, 'r' )
jdl = jdlFile.read()
jdlFile.close()
try:
parameters = {}
if not re.search( '\[', jdl ):
jdl = '[' + jdl + ']'
classAdJob = ClassAd( jdl )
paramsDict = classAdJob.contents
for param, value in paramsDict.items():
if re.search( '{', value ):
self.log.debug( 'Found list type parameter %s' % ( param ) )
rawValues = value.replace( '{', '' ).replace( '}', '' ).replace( '"', '' ).replace( 'LFN:', '' ).split()
valueList = []
for val in rawValues:
if re.search( ',$', val ):
valueList.append( val[:-1] )
else:
valueList.append( val )
parameters[param] = valueList
else:
self.log.debug( 'Found standard parameter %s' % ( param ) )
parameters[param] = value.replace( '"', '' )
return S_OK( parameters )
except Exception, x:
self.log.exception( lException = x )
return S_ERROR( 'Exception while extracting JDL parameters for job' )
#############################################################################
def __printInfo( self ):
"""Internal function to print the DIRAC API version and related information.
"""
self.log.info( '<=====%s=====>' % ( self.diracInfo ) )
self.log.verbose( 'DIRAC is running at %s in setup %s' % ( DIRAC.siteName(), self.setup ) )
def getConfigurationValue( self, option, default ):
""" Export the configuration client getValue() function
"""
return gConfig.getValue( option, default )
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| avedaee/DIRAC | Interfaces/API/Dirac.py | Python | gpl-3.0 | 108,354 | [
"DIRAC"
] | a05aa6ba8fbb0f4147654669e445462e5ebe59acebb84f50e71162ac1d2fd531 |
#!/usr/bin/env python
## @package linesearch
## \author Ed Bueler, University of Alaska Fairbanks, USA
## \brief A script for doing a line search over stddev parameter of PDD.
## Copyright (C) 2010, 2012 Ed Bueler
##
## see README for role of this script
## This script uses NCO (http://nco.sourceforge.net/).
import commands
from numpy import array, double, int
from getopt import getopt, GetoptError
from sys import argv, exit
usage="""
LINESEARCH.PY Do line search (bisection and false position) on stddev parameter
of the PDD model for surface mass balance. Abort search if stddev=0.5 and
stddev=10.0 do not make pclimate output total smb bracket the target smb.
Imports computeobjective() method from stand-alone objective.py.
Examples:
1) do a linesearch:
./linesearch.py --thresh=268 --snow=0.001 --refreeze=0.4 --lapse=0.3 \
--diffsfile=diffs.txt --startfile=start.nc
2) reproduce a file from its name:
./linesearch.py --reproduce=clim_268_0.001_0.4_0.300_1.0.nc --startfile=start.nc
3) get help; print this message:
./linesearch.py --help
See: dotune.sh, boot.sh, objective.py, README.
"""
def usagefailure(message):
print message
print
print usage
exit(2)
class Case:
"""Holds parameters for one pclimate case. Has functionality related to
naming scheme for pclimate configuration and output NetCDF files, e.g.
'clim_268.0_0.002_0.80_0.001_1.688.nc' and similar. For this name, the
'nameroot' is '268.0_0.002_0.80_0.001_1.688'."""
def __init__(self):
self.threshhold = 273.15
self.ddfsnow = 0.003
self.ddfice = 2.0 * self.ddfsnow
self.refreeze = 0.6
self.stddev_lapse = 0.0
self.stddev = 2.53
self.annualizepdd = False
def update_ddfice(self):
self.ddfice = 2.0 * self.ddfsnow
def get_nameroot(self):
self.update_ddfice()
return "%.1f_%.3f_%.2f_%.3f_%.4f" \
% (self.threshhold, self.ddfsnow, self.refreeze, self.stddev_lapse, self.stddev)
def put_nameroot(self,namerootIN):
p = namerootIN.split('_')
if len(p) < 5:
print "ERROR: nameroot provided to Case.put_nameroot has too few params"
exit(1)
self.threshhold = float(p[0])
self.ddfsnow = float(p[1])
self.refreeze = float(p[2])
self.stddev_lapse = float(p[3])
self.stddev = float(p[4])
self.update_ddfice()
def put_pism_overrides(self,nc):
"""input nc is an open NetCDF file"""
self.update_ddfice()
# variable type is NC_BYTE
overs = nc.createVariable("pism_overrides", 'b')
overs.pdd_positive_threshold_temp = self.threshhold
overs.pdd_factor_snow = self.ddfsnow
overs.pdd_factor_ice = self.ddfice
overs.pdd_refreeze = self.refreeze
overs.pdd_std_dev_lapse_lat_rate = self.stddev_lapse
overs.pdd_std_dev = self.stddev
overs[:] = 0
class Files:
def __init__(self):
# it might be good to have this controllable at command line:
# name of PISM file with Greenland geometry and precip,smb from Ettema et al.
# and other needed info to run pclimate:
DATANAME = "Greenland_5km_v1.1.nc"
self.targetfile = "pism_" + DATANAME
self.startfile = "start.nc" # effectively allow user to forget --startfile=
self.diffsfile = "" # if user forgets --diffsfile=foo.txt then writes to stdout
def copy(self):
cpfn = Files()
cpfn.targetfile = self.targetfile
cpfn.startfile = self.startfile
cpfn.diffsfile = self.diffsfile
return cpfn
def configfile(self,cp):
"this is output of pclimate, which will be evaluated against PISMDATA"
return "config_" + cp.get_nameroot() + ".nc"
def climatefile(self,cp):
"this is the configuration file for the -config_override mechanism"
return "clim_" + cp.get_nameroot() + ".nc"
def printme(self, cp):
print "**PRINTING Files OBJECT:**"
print " targetfile = " + self.targetfile
print " startfile = " + self.startfile
print " diffsfile = " + self.diffsfile
print " configfile(cp) = " + self.configfile(cp)
print " climatefile(cp) = " + self.climatefile(cp)
print "**END**"
def evalcase(stddev, cp, fn, deletencfiles):
"""evaluates one pclimate run against smb target in a file"""
# input cp is of type Case()
# input fn is of type Filenames()
cp.stddev = stddev
try:
from netCDF4 import Dataset as NC
except:
from netCDF3 import Dataset as NC
from objective import computeobjective
configopt = " -config_override " + fn.configfile(cp)
print " creating -config_override file %s ..." % fn.configfile(cp)
try:
nc_config = NC(fn.configfile(cp), 'w')
except:
usagefailure("ERROR: NETCDF FILE '%s' CANNOT BE OPENED FOR WRITING" \
% fn.configfile(cp) )
cp.put_pism_overrides(nc_config)
nc_config.close()
# change this to "mpiexec -n 8" or similar to run on multiple processes
mpido=""
# coupler settings: Fausto 2m air temp parameterization, but default PDD
# (w/o Greve/Fausto settings of PDD parameters)
coupleropts = " -atmosphere searise_greenland -surface pdd"
if cp.annualizepdd:
coupleropts += " -pdd_annualize"
timeopts = " -times 1990:1.0:1991"
#dt = 0.0833333333 # monthly = (1/12) of year
if len(fn.diffsfile) > 0:
print " will add lines of text to " + fn.diffsfile + " ..."
if deletencfiles:
print " will delete NetCDF files %s and %s when no longer needed ..." % \
(fn.configfile(cp), fn.climatefile(cp))
# run PISM:
command = mpido + " pclimate -i " + fn.startfile + coupleropts + configopt \
+ timeopts + " -o " + fn.climatefile(cp)
print " doing:"
print " " + command
try:
(status,output) = commands.getstatusoutput(command)
except KeyboardInterrupt:
exit(2)
if status:
#exit(status)
print status
print output
countvalid, avdiff, avL2, av2kL2 = computeobjective(
fn.startfile, fn.climatefile(cp),
fn.targetfile,
"acab", "smb")
print " result: "
print " %d locations for valid (thk>0) comparison:" % countvalid
print " average of signed differences (whole sheet) is %12.7f" % avdiff
print " average of squared differences (whole sheet) is %12.7f" % avL2
print " average of squared differences (H < 2000) is %12.7f" % av2kL2
# FIXME: allow choice of weights
weighted = 1.0 * avL2 + 3.0 * av2kL2
print " weighted average of above quantities is %12.7f" % weighted
# write results to file if a file name was given, otherwise stdout
lineoftxt = "%s %12.7f %12.7f %12.7f %12.7f\n" % \
(fn.climatefile(cp), avdiff, avL2, av2kL2, weighted)
if len(fn.diffsfile) > 0:
diffs = file(fn.diffsfile,'a')
diffs.write(lineoftxt)
diffs.close()
else:
print " result in one line:"
print lineoftxt
# finalize: if option was given, clean up generated NetCDF file
if deletencfiles:
command = "rm -rf " + fn.configfile(cp) + " " + fn.climatefile(cp)
print " doing:"
print " " + command
try:
(status,output) = commands.getstatusoutput(command)
except KeyboardInterrupt:
exit(2)
if status:
#exit(status)
print status
print output
print ""
return avdiff
def reproduce(climfilename, fn):
"""parses a pclimate output file name, and runs evalcase() to reproduce that run"""
# input fn is of type Filenames()
if not(climfilename.endswith('.nc')):
print "ERROR: filename needs to be a NetCDF file"
exit(1)
if not(climfilename.startswith('clim_')):
print "WARNING: filename was expected to start with 'clim_'"
print " (in any case, filename will be stripped through first '_' to"
print " generate the nameroot)"
nr = climfilename[climfilename.find('_')+1:-3]
cp = Case()
cp.put_nameroot(nr)
# fn.printme(cp)
result = evalcase(cp.stddev, cp, fn, False)
print "************ result at stddev=%.5f is %.5f ***********" \
% (cp.stddev, result)
if __name__ == "__main__":
cp = Case()
fn = Files()
try:
opts, args = getopt(argv[1:], "",
["thresh=", "snow=", "refreeze=", "lapse=",
"sd=", # <-- if this is an option then don't do linesearch
"tol=",
"reproduce=",
"diffsfile=", "startfile=",
"annualize", "deletenc", "help","usage"])
except GetoptError:
usagefailure('ERROR: INCORRECT COMMAND LINE ARGUMENTS FOR linesearch.py')
dolinesearch = True
cp.annualizepdd = False
deletencfiles = False
avdifftol = 0.001
merely_reproduce = False
reproname = ""
for (opt, optarg) in opts:
if opt in ("--thresh"):
cp.threshhold = float(optarg)
if opt in ("--snow"):
cp.ddfsnow = float(optarg)
if opt in ("--refreeze"):
cp.refreeze = float(optarg)
if opt in ("--lapse"):
cp.stddev_lapse = float(optarg)
if opt in ("--sd"):
dolinesearch = False
cp.stddev = float(optarg)
if opt in ("--annualize"):
cp.annualizepdd = True
if opt in ("--tol"):
avdifftol = float(optarg)
if opt in ("--diffsfile"):
fn.diffsfile = optarg
if opt in ("--startfile"):
fn.startfile = optarg
if opt in ("--deletenc"):
deletencfiles = True
if opt in ("--reproduce"):
merely_reproduce = True
reproname = optarg
if opt in ("--help", "--usage"):
print usage
exit(0)
if merely_reproduce:
if deletencfiles:
print "WARNING: option --deletenc is ignored; output files will remain"
reproduce(reproname, fn.copy())
exit(0)
print "LINESEARCH.PY CASE (threshhold=%.2f, ddfsnow=%.4f, refreeze=%.2f, sdlapse=%.3f)" \
% (cp.threshhold, cp.ddfsnow, cp.refreeze, cp.stddev_lapse)
print ""
if not(dolinesearch):
result = evalcase(cp.stddev, cp, fn, False)
print "************ result at stddev=%.5f is %.5f ***********" \
% (cp.stddev, result)
exit(0)
# line search: combines bisection with false position at end
Asd = 0.5
Bsd = 10.0
stddevtol = 1.0e-5 * (Bsd - Asd);
F_Asd = evalcase(Asd, cp, fn, deletencfiles)
F_Bsd = evalcase(Bsd, cp, fn, deletencfiles)
if F_Asd * F_Bsd > 0:
print "************ at stddev in [%.5f,%.5f], NO BRACKET ***********" \
% (Asd,Bsd)
else:
count = 2
maxcount = 20 # max number of function evals per case
while True:
if count < 5:
# bisection for a few steps
Csd = 0.5 * (Asd + Bsd)
else:
# now false position
if abs(Bsd - Asd) <= stddevtol:
break
Csd = Asd - (F_Asd) * (Bsd - Asd) / (F_Bsd - F_Asd);
result = evalcase(Csd, cp, fn, deletencfiles)
print "************ result at stddev=%.5f is %.5f ***********" \
% (Csd, result)
if abs(result) <= avdifftol:
break
count = count + 1
if count >= maxcount:
print "************ max number of function evals reached in bisection **********"
break
if F_Asd * result > 0.0:
Asd = Csd
F_Asd = result
else:
Bsd = Csd
F_Bsd = result
| matthiasmengel/pism_pik | examples/old/pddtune/linesearch.py | Python | gpl-2.0 | 11,882 | [
"NetCDF"
] | dde71a96c74413d1a784dba9a5f66b87f63b0bea554728b77eaae3120bd84020 |
from __future__ import print_function, absolute_import, unicode_literals
__all__ = ["corner", "hist2d"]
__version__ = "1.0.2"
__author__ = "Dan Foreman-Mackey (danfm@nyu.edu)"
__copyright__ = "Copyright 2013-2015 Daniel Foreman-Mackey"
__contributors__ = [
# Alphabetical by first name.
"Adrian Price-Whelan @adrn",
"Brendon Brewer @eggplantbren",
"Ekta Patel @ekta1224",
"Emily Rice @emilurice",
"Geoff Ryan @geoffryan",
"Guillaume @ceyzeriat",
"Gregory Ashton @ga7g08",
"Kelle Cruz @kelle",
"Kyle Barbary @kbarbary",
"Marco Tazzari @mtazzari",
"Matt Pitkin @mattpitkin",
"Phil Marshall @drphilmarshall",
"Pierre Gratier @pirg",
"Stephan Hoyer @shoyer",
"Víctor Zabalza @zblz",
"Will Vousden @willvousden",
"Wolfgang Kerzendorf @wkerzendorf",
]
import logging
import numpy as np
import matplotlib.pyplot as pl
from matplotlib.ticker import MaxNLocator
from matplotlib.colors import LinearSegmentedColormap, colorConverter
from matplotlib.ticker import ScalarFormatter
try:
from scipy.ndimage import gaussian_filter
except ImportError:
gaussian_filter = None
def corner(xs, bins=20, range=None, weights=None, color="k",
smooth=None, smooth1d=None,
labels=None, label_kwargs=None,
show_titles=False, title_fmt=".2f", title_kwargs=None,
truths=None, truth_color="#4682b4",
scale_hist=False, quantiles=None, verbose=False, fig=None,
max_n_ticks=5, top_ticks=False, use_math_text=False,
hist_kwargs=None, **hist2d_kwargs):
"""
Make a *sick* corner plot showing the projections of a data set in a
multi-dimensional space. kwargs are passed to hist2d() or used for
`matplotlib` styling.
Parameters
----------
xs : array_like (nsamples, ndim)
The samples. This should be a 1- or 2-dimensional array. For a 1-D
array this results in a simple histogram. For a 2-D array, the zeroth
axis is the list of samples and the next axis are the dimensions of
the space.
bins : int or array_like (ndim,) (optional)
The number of bins to use in histograms, either as a fixed value for
all dimensions, or as a list of integers for each dimension.
weights : array_like (nsamples,)
The weight of each sample. If `None` (default), samples are given
equal weight.
color : str (optional)
A ``matplotlib`` style color for all histograms.
smooth, smooth1d : float (optional)
The standard deviation for Gaussian kernel passed to
`scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
respectively. If `None` (default), no smoothing is applied.
labels : iterable (ndim,) (optional)
A list of names for the dimensions. If a ``xs`` is a
``pandas.DataFrame``, labels will default to column names.
label_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_xlabel` and
`set_ylabel` methods.
show_titles : bool (optional)
Displays a title above each 1-D histogram showing the 0.5 quantile
with the upper and lower errors supplied by the quantiles argument.
title_fmt : string (optional)
The format string for the quantiles given in titles. If you explicitly
set ``show_titles=True`` and ``title_fmt=None``, the labels will be
shown as the titles. (default: ``.2f``)
title_kwargs : dict (optional)
Any extra keyword arguments to send to the `set_title` command.
range : iterable (ndim,) (optional)
A list where each element is either a length 2 tuple containing
lower and upper bounds or a float in range (0., 1.)
giving the fraction of samples to include in bounds, e.g.,
[(0.,10.), (1.,5), 0.999, etc.].
If a fraction, the bounds are chosen to be equal-tailed.
truths : iterable (ndim,) (optional)
A list of reference values to indicate on the plots. Individual
values can be omitted by using ``None``.
truth_color : str (optional)
A ``matplotlib`` style color for the ``truths`` makers.
scale_hist : bool (optional)
Should the 1-D histograms be scaled in such a way that the zero line
is visible?
quantiles : iterable (optional)
A list of fractional quantiles to show on the 1-D histograms as
vertical dashed lines.
verbose : bool (optional)
If true, print the values of the computed quantiles.
plot_contours : bool (optional)
Draw contours for dense regions of the plot.
use_math_text : bool (optional)
If true, then axis tick labels for very large or small exponents will
be displayed as powers of 10 rather than using `e`.
max_n_ticks: int (optional)
Maximum number of ticks to try to use
top_ticks : bool (optional)
If true, label the top ticks of each axis
fig : matplotlib.Figure (optional)
Overplot onto the provided figure object.
hist_kwargs : dict (optional)
Any extra keyword arguments to send to the 1-D histogram plots.
**hist2d_kwargs : (optional)
Any remaining keyword arguments are sent to `corner.hist2d` to generate
the 2-D histogram plots.
"""
if quantiles is None:
quantiles = []
if title_kwargs is None:
title_kwargs = dict()
if label_kwargs is None:
label_kwargs = dict()
# Try filling in labels from pandas.DataFrame columns.
if labels is None:
try:
labels = xs.columns
except AttributeError:
pass
# Deal with 1D sample lists.
xs = np.atleast_1d(xs)
if len(xs.shape) == 1:
xs = np.atleast_2d(xs)
else:
assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
xs = xs.T
assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
"dimensions than samples!"
# Parse the weight array.
if weights is not None:
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("Weights must be 1-D")
if xs.shape[1] != weights.shape[0]:
raise ValueError("Lengths of weights must match number of samples")
# Parse the parameter ranges.
if range is None:
if "extents" in hist2d_kwargs:
logging.warn("Deprecated keyword argument 'extents'. "
"Use 'range' instead.")
range = hist2d_kwargs.pop("extents")
else:
range = [[x.min(), x.max()] for x in xs]
# Check for parameters that never change.
m = np.array([e[0] == e[1] for e in range], dtype=bool)
if np.any(m):
raise ValueError(("It looks like the parameter(s) in "
"column(s) {0} have no dynamic range. "
"Please provide a `range` argument.")
.format(", ".join(map(
"{0}".format, np.arange(len(m))[m]))))
else:
# If any of the extents are percentiles, convert them to ranges.
# Also make sure it's a normal list.
range = list(range)
for i, _ in enumerate(range):
try:
emin, emax = range[i]
except TypeError:
q = [0.5 - 0.5*range[i], 0.5 + 0.5*range[i]]
range[i] = quantile(xs[i], q, weights=weights)
if len(range) != xs.shape[0]:
raise ValueError("Dimension mismatch between samples and range")
# Parse the bin specifications.
try:
bins = [float(bins) for _ in range]
except TypeError:
if len(bins) != len(range):
raise ValueError("Dimension mismatch between bins and range")
# Some magic numbers for pretty axis layout.
K = len(xs)
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
# Create a new figure if one wasn't provided.
if fig is None:
fig, axes = pl.subplots(K, K, figsize=(dim, dim))
else:
try:
axes = np.array(fig.axes).reshape((K, K))
except:
raise ValueError("Provided figure has {0} axes, but data has "
"dimensions K={1}".format(len(fig.axes), K))
# Format the figure.
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
# Set up the default histogram keywords.
if hist_kwargs is None:
hist_kwargs = dict()
hist_kwargs["color"] = hist_kwargs.get("color", color)
if smooth1d is None:
hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
for i, x in enumerate(xs):
# Deal with masked arrays.
if hasattr(x, "compressed"):
x = x.compressed()
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, i]
# Plot the histograms.
if smooth1d is None:
n, _, _ = ax.hist(x, bins=bins[i], weights=weights,
range=range[i], **hist_kwargs)
else:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
n, b = np.histogram(x, bins=bins[i], weights=weights,
range=range[i])
n = gaussian_filter(n, smooth1d)
x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
y0 = np.array(list(zip(n, n))).flatten()
ax.plot(x0, y0, **hist_kwargs)
if truths is not None and truths[i] is not None:
ax.axvline(truths[i], color=truth_color)
# Plot quantiles if wanted.
if len(quantiles) > 0:
qvalues = quantile(x, quantiles, weights=weights)
for q in qvalues:
ax.axvline(q, ls="dashed", color=color)
if verbose:
print("Quantiles:")
print([item for item in zip(quantiles, qvalues)])
if show_titles:
title = None
if title_fmt is not None:
# Compute the quantiles for the title. This might redo
# unneeded computation but who cares.
q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
weights=weights)
q_m, q_p = q_50-q_16, q_84-q_50
# Format the quantile display.
fmt = "{{0:{0}}}".format(title_fmt).format
title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))
# Add in the column name if it's given.
#if labels is not None:
# title = "{0} = {1}".format(labels[i], title)
elif labels is not None:
title = "{0}".format(labels[i])
if title is not None:
ax.set_title(title, **title_kwargs)
# Set up the axes.
ax.set_xlim(range[i])
if scale_hist:
maxn = np.max(n)
ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
else:
ax.set_ylim(0, 1.1 * np.max(n))
ax.set_yticklabels([])
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
if top_ticks:
ax.xaxis.set_ticks_position("top")
[l.set_rotation(45) for l in ax.get_xticklabels()]
else:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[i], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
for j, y in enumerate(xs):
if np.shape(xs)[0] == 1:
ax = axes
else:
ax = axes[i, j]
if j > i:
ax.set_frame_on(False)
ax.set_xticks([])
ax.set_yticks([])
continue
elif j == i:
continue
# Deal with masked arrays.
if hasattr(y, "compressed"):
y = y.compressed()
hist2d(y, x, ax=ax, range=[range[j], range[i]], weights=weights,
color=color, smooth=smooth, bins=[bins[j], bins[i]],
**hist2d_kwargs)
if truths is not None:
if truths[i] is not None and truths[j] is not None:
ax.plot(truths[j], truths[i], "s", color=truth_color)
if truths[j] is not None:
ax.axvline(truths[j], color=truth_color)
if truths[i] is not None:
ax.axhline(truths[i], color=truth_color)
ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
if i < K - 1:
ax.set_xticklabels([])
else:
[l.set_rotation(45) for l in ax.get_xticklabels()]
if labels is not None:
ax.set_xlabel(labels[j], **label_kwargs)
ax.xaxis.set_label_coords(0.5, -0.3)
# use MathText for axes ticks
ax.xaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
if j > 0:
ax.set_yticklabels([])
else:
[l.set_rotation(45) for l in ax.get_yticklabels()]
if labels is not None:
ax.set_ylabel(labels[i], **label_kwargs)
ax.yaxis.set_label_coords(-0.3, 0.5)
# use MathText for axes ticks
ax.yaxis.set_major_formatter(
ScalarFormatter(useMathText=use_math_text))
return fig
def quantile(x, q, weights=None):
"""
Like numpy.percentile, but:
* Values of q are quantiles [0., 1.] rather than percentiles [0., 100.]
* scalar q not supported (q must be iterable)
* optional weights on x
"""
if weights is None:
return np.percentile(x, [100. * qi for qi in q])
else:
idx = np.argsort(x)
xsorted = x[idx]
cdf = np.add.accumulate(weights[idx])
cdf /= cdf[-1]
return np.interp(q, cdf, xsorted).tolist()
def hist2d(x, y, bins=20, range=None, weights=None, levels=None, smooth=None,
ax=None, color=None, plot_datapoints=True, plot_density=True,
plot_contours=True, no_fill_contours=False, fill_contours=False,
contour_kwargs=None, contourf_kwargs=None, data_kwargs=None,
**kwargs):
"""
Plot a 2-D histogram of samples.
Parameters
----------
x, y : array_like (nsamples,)
The samples.
levels : array_like
The contour levels to draw.
ax : matplotlib.Axes (optional)
A axes instance on which to add the 2-D histogram.
plot_datapoints : bool (optional)
Draw the individual data points.
plot_density : bool (optional)
Draw the density colormap.
plot_contours : bool (optional)
Draw the contours.
no_fill_contours : bool (optional)
Add no filling at all to the contours (unlike setting
``fill_contours=False``, which still adds a white fill at the densest
points).
fill_contours : bool (optional)
Fill the contours.
contour_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contour` method.
contourf_kwargs : dict (optional)
Any additional keyword arguments to pass to the `contourf` method.
data_kwargs : dict (optional)
Any additional keyword arguments to pass to the `plot` method when
adding the individual data points.
"""
if ax is None:
ax = pl.gca()
# Set the default range based on the data range if not provided.
if range is None:
if "extent" in kwargs:
logging.warn("Deprecated keyword argument 'extent'. "
"Use 'range' instead.")
range = kwargs["extent"]
else:
range = [[x.min(), x.max()], [y.min(), y.max()]]
# Set up the default plotting arguments.
if color is None:
color = "k"
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
# This is the color map for the density plot, over-plotted to indicate the
# density of the points near the center.
density_cmap = LinearSegmentedColormap.from_list(
"density_cmap", [color, (1, 1, 1, 0)])
# This color map is used to hide the points at the high density areas.
white_cmap = LinearSegmentedColormap.from_list(
"white_cmap", [(1, 1, 1), (1, 1, 1)], N=2)
# This "color map" is the list of colors for the contour levels if the
# contours are filled.
rgba_color = colorConverter.to_rgba(color)
contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
for i, l in enumerate(levels):
contour_cmap[i][-1] *= float(i) / (len(levels)+1)
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
range=range, weights=weights)
except ValueError:
raise ValueError("It looks like at least one of your sample columns "
"have no dynamic range. You could try using the "
"'range' argument.")
if smooth is not None:
if gaussian_filter is None:
raise ImportError("Please install scipy for smoothing")
H = gaussian_filter(H, smooth)
# Compute the density levels.
Hflat = H.flatten()
inds = np.argsort(Hflat)[::-1]
Hflat = Hflat[inds]
sm = np.cumsum(Hflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Hflat[sm <= v0][-1]
except:
V[i] = Hflat[0]
V.sort()
m = np.diff(V) == 0
if np.any(m):
logging.warning("Too few points to create valid contours")
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate([
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
])
Y2 = np.concatenate([
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
])
if plot_datapoints:
if data_kwargs is None:
data_kwargs = dict()
data_kwargs["color"] = data_kwargs.get("color", color)
data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
data_kwargs["mec"] = data_kwargs.get("mec", "none")
data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)
# Plot the base fill to hide the densest data points.
if (plot_contours or plot_density) and not no_fill_contours:
ax.contourf(X2, Y2, H2.T, [V.min(), H.max()],
cmap=white_cmap, antialiased=False)
if plot_contours and fill_contours:
if contourf_kwargs is None:
contourf_kwargs = dict()
contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
False)
ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
**contourf_kwargs)
# Plot the density map. This can't be plotted at the same time as the
# contour fills.
elif plot_density:
ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)
# Plot the contour edge colors.
if plot_contours:
if contour_kwargs is None:
contour_kwargs = dict()
contour_kwargs["colors"] = contour_kwargs.get("colors", color)
ax.contour(X2, Y2, H2.T, V, **contour_kwargs)
ax.set_xlim(range[0])
ax.set_ylim(range[1])
| earlbellinger/asteroseismology | regression/corner.py | Python | gpl-2.0 | 21,180 | [
"Gaussian"
] | 7304e8c6d32b8a7b1d41849659b56955cb21902b2780c9f884320be3ba68eb80 |
import numpy as np
import csv
import matplotlib.pyplot as plt
plt.ion()
print '\nEnter 1 to train and test on a subsample of the training data.'
print 'Enter 0 to train on full training sample, and test on the Kaggle test data.\n'
test_on_train = raw_input('ENTER 1 or 0: ')
#read in training data
csv_file_object = csv.reader(open('train.csv', 'r'))
header1 = csv_file_object.next()
data1 = []
for row in csv_file_object:
data1.append(row)
data1 = np.array(data1)
#optionally make the training set small even for applying to REAL
# test data for submission (random forests keep crashing...)
#temp = data1
#train_size = int(temp.shape[0]/5.)
#train_sample_indices = np.random.random_integers(0,(temp.shape[0]-1),train_size)
#data1 = temp[train_sample_indices,:]
if test_on_train == '1':
print '\nPreparing to train and test on subsets of Training Data...\n'
temp = data1
train_size = int(temp.shape[0]/20.)
train_sample_indices = np.random.random_integers(0,(temp.shape[0]-1),train_size)
data1 = temp[train_sample_indices,:]
test_sample_indices = [i for i in range(temp.shape[0]) if i not in train_sample_indices]
data2 = temp[test_sample_indices,:]
header2 = header1
true_count = temp[test_sample_indices, header1.index('count')].astype(int)
else:
print '\nPreparing to apply model to Kaggle Test Data...\n'
#read in testing data
test_file_object = csv.reader(open('test.csv', 'r'))
header2 = test_file_object.next()
data2 = []
for row in test_file_object:
data2.append(row)
data2 = np.array(data2)
#========================================
### MAIN FUNCTION BLOCK ###
def bikerides():
#select features to train on & organize data
train_data, train_count, test_data, test_datetime, important = feature_selection()
#select & run machine learning algorithm
learning_selection(train_data, train_count, test_data, test_datetime)
#plot features
print '------------------------------------\n'
wanttoplot = raw_input('Would you like to plot features? [enter y/n]: ')
while wanttoplot == 'y':
plotting_selection(important, train_data, train_count)
print '\n------------------------------------'
wanttoplot = raw_input('\nWould you like to make another plot? [enter y/n]: ')
#plt.close()
plt.figure() #start new figure, leave others up
print '\nFinished\n'
return
### end bikerides MAIN block
#========================================
# select which features to use and set up arrays...
def feature_selection():
print '------------------------------------\n'
choose_features = raw_input('CHOOSE IMPORTANT FEATURES TO TRAIN ON.\nYou can select any of the following features:\n\n time \n season \n holiday \n workingday \n weather \n temp \n atemp \n humidity \n windspeed \n\nList names separated by a space, for example: weather atemp \n (you can also enter "ALL") \n\nENTER FEATURES: ')
if choose_features == 'ALL':
important = ['time', 'season', 'holiday', 'workingday', 'weather', 'temp', 'atemp', 'humidity', 'windspeed']
else:
important = choose_features.split()
print '\nOrganizing data structures.\n'
train_data = np.zeros([data1.shape[0],len(important)])
test_data = np.zeros([data2.shape[0],len(important)])
for j, item in enumerate(important):
print '...processing: ', j, item
if item == 'time':
time1 = [datetime[11:13] for datetime in data1[:,0]] #pick out hours
train_data[:,j] = np.array(time1).astype(np.float) #convert to float
time2 = [datetime[11:13] for datetime in data2[:,0]]
test_data[:,j] = np.array(time2).astype(np.float)
else:
#get corresponding indices for each data set
itrain = header1.index(item)
itest = header2.index(item)
#convert all entries to float
train_data[:,j] = data1[:,itrain].astype(np.float)
test_data[:,j] = data2[:,itest].astype(np.float)
train_count = data1[:,-1].astype(np.float)
test_datetime = data2[:,0]
return train_data, train_count, test_data, test_datetime, important
### end feature_selection
#========================================
# choose a machine learning method
def learning_selection(datatrain, alivetrain, datatest, datetime):
print '\n------------------------------------'
print '\nChoose a Machine Learning technique from the following list: \n'
print ' 1. ORDINARY LEAST SQUARES \n\t fits a linear model by minimizing the residual sum of squares'
print ' 2. STOCASTIC GRADIENT DESCENT (SGD) \n\t linear classifier applied to a normalized/standardized \n\t version of the data, using the hinge-loss option for penalties; \n\t searches can be more efficient using gradient information'
print ' 3. BAYESIAN RIDGE REGRESSION \n\t fits a linear model by maximizing the marginal log(Likelihood);\n\t can be more robust to poorly-defined problems'
print ' 4. RANDOM FOREST \n\t each tree gets a bootstrapped sample of training data, and \n\t branches are chosen that yield the best outcome for a random \n\t subsample of features; final result is the model average of \n\t all 100 trees'
print ' 5. EXTREMELY RANDOMIZED FOREST \n\t similar to above, but sets a random threshold for whether a \n\t branch outcome is considered better or not; can further \n\t reduce variance, but may be more biased'
print ' 6. SUPPORT VECTOR MACHINE (SVM) \n\t multi-class classification using subsets of training data \n\t (support vectors); can be effective in many dimensions, \n\t usable with more features than training samples'
print ' 7. NAIVE BAYES \n\t assumes features are independent and gaussian; fast to run \n\t and can be trained on very small samples'
print ' 8. BERNOULLI NAIVE BAYES \n\t assumes binary distributions of data, or may manipulate data \n\t into this form'
print ' 9. ADABOOST \n\t ensemble method (like forests) that uses weights on the \n\t training samples to boost importance of incorrect predictions, \n\t so that improvements can be made before outputting the average \n\t of all 100 weak learners'
print ' 10. GRADIENT BOOSTED REGRESSION TREES (GBRT) \n\t another ensemble method with 100 weak learners; robust to \n\t outliers and handling of mixed data types'
choose_method = int(raw_input('\nENTER THE # OF THE TECHNIQUE YOU WANT TO APPLY: '))
#-------------
# ORDINARY LEAST SQUARES
if choose_method == 1:
print '\nRunning OLS...\n'
from sklearn import linear_model
ols = linear_model.LinearRegression()
ols.fit(datatrain,alivetrain)
Output = ols.predict(datatest)
#-------------
# STOCASTIC GRADIENT DESCENT (SGD)
elif choose_method == 2:
print '\nRunning SGD Classifier...\n'
from sklearn.linear_model import SGDClassifier
#normalize feature scaling (SGD is sensitive to this)
# note: this helps significantly (~10% improvement in score)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(datatrain)
datatrain = scaler.transform(datatrain)
# apply same transformation to test data
datatest = scaler.transform(datatest)
sgdc = SGDClassifier(loss="hinge", penalty="l2")
sgdc.fit(datatrain,alivetrain)
Output = sgdc.predict(datatest)
#-------------
# BAYESIAN RIDGE REGRESSION
elif choose_method == 3:
print '\nRunning Bayesian Ridge Regression...\n'
from sklearn import linear_model
brr = linear_model.BayesianRidge()
brr.fit(datatrain,alivetrain)
Output = brr.predict(datatest)
#-------------
# RANDOM FOREST
elif choose_method == 4:
print '\nRunning Random Forest Classifier...\n'
from sklearn.ensemble import RandomForestClassifier
Forest = RandomForestClassifier(n_estimators = 100) #1000 trees
Forest = Forest.fit(datatrain,alivetrain)
Output = Forest.predict(datatest)
#-------------
# EXTREMELY RANDOMIZED FOREST
elif choose_method == 5:
print '\nRunning Extremely Randomized Forest...\n'
from sklearn.ensemble import ExtraTreesClassifier
extratrees = ExtraTreesClassifier(n_estimators = 100) #1000 trees
extratrees = extratrees.fit(datatrain,alivetrain)
Output = extratrees.predict(datatest)
#-------------
# SUPPORT VECTOR MACHINES
elif choose_method == 6:
print '\nRunning SVM Classifier...\n'
from sklearn import svm
clf = svm.SVC()
clf.fit(datatrain,alivetrain)
Output = clf.predict(datatest)
#-------------
# NAIVE BAYES
elif choose_method == 7:
print '\nRunning Gaussian Naive Bayes...\n'
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(datatrain,alivetrain)
Output = gnb.predict(datatest)
#-------------
# BERNOULLI NAIVE BAYES
elif choose_method == 8:
print '\nRunning Bernoulli Naive Bayes...\n'
from sklearn.naive_bayes import BernoulliNB
bern = BernoulliNB()
bern.fit(datatrain,alivetrain)
Output = bern.predict(datatest)
#-------------
# ADABOOST
elif choose_method == 9:
print '\nRunning AdaBoost Classifier...\n'
from sklearn.ensemble import AdaBoostClassifier
ada = AdaBoostClassifier(n_estimators=100)
ada.fit(datatrain,alivetrain)
Output = ada.predict(datatest)
#-------------
# GRADIENT TREE BOOSTING
elif choose_method == 10:
print '\nRunning GBRT Classifier...\n'
from sklearn.ensemble import GradientBoostingClassifier
grad = GradientBoostingClassifier(n_estimators=100)
grad.fit(datatrain,alivetrain)
Output = grad.predict(datatest)
#----------------------------------------
# Either analyze response to the subset of Training Data OR
# output result on Kaggle Test Data to a file for submission...
count = Output.astype(np.int)
if test_on_train == '1':
print '------------------------------------\n'
print 'Comparing to known counts.\n'
#OUTPUT SOME STATISTICAL COMPARISONS HERE!
#print true_count.shape,count.shape, len(true_count), len(count)
#print 'LESS THAN ZERO?? ', len(np.where(count < 0.))
count[np.where(count < 0.)] = 0.
n = len(true_count)
summation_arg = (np.log(count+1.) - np.log(true_count+1.))**2.
rmsle = np.sqrt(np.sum(summation_arg)/n)
print 'RMSLE', rmsle, '\n'
else:
print 'Saving Predictions in file: output.csv\n'
f = open('output.csv', 'w')
open_file_object = csv.writer(f)
open_file_object.writerow(['datetime','count'])
for i in range(len(datetime)):
open_file_object.writerow([datetime[i],count[i]])
f.close()
return
### end of learning_selection()
#========================================
# Plotting option function
def plotting_selection(important, train_data, train_count):
print '\nCHOOSE FEATURES TO PLOT.\nYou can select any one of the features that you used for analysis. \nCOUNT will be plotted as a function of your variable.\n'
for j,item in enumerate(important):
print str(j+1)+'. '+item
plotfeatures = raw_input('\nENTER THE # of A FEATURE: ')
index = int(plotfeatures) - 1
x_var = train_data[:,index] #x-axis variable
y_var = train_count #y-axis variable
jitter = np.random.randn(len(y_var))/2.
if important[index] in ['season','holiday','workingday','weather']:
jitter = jitter/5. #smaller jitter
plt.scatter(x_var+jitter,y_var,color='darkcyan', marker='+')
if important[index] in ['holiday','workingday']:
ix = np.digitize(x_var,bins=[-0.5,0.5,1.5])
elif important[index] in ['season','weather']:
ix = np.digitize(x_var,bins=[0.5,1.5,2.5,3.5,4.5])
else:
b = [i for i in range(int(np.min(x_var)-1),int(np.max(x_var)+1),3)]
ix = np.digitize(x_var,bins=b)
for i in list(set(ix)):
here = np.where(ix == i)
x_mean = np.mean(x_var[here])
y_mean = np.mean(y_var[here])
plt.scatter(x_mean,y_mean,color='black', marker='o', s=100)
plt.xlabel(important[index])
plt.ylabel('count')
#plt.title('blue = lived, orange = died')
return
### end of plotting_selection
####################################
bikerides()
| jesford/bike_sharing | bikerides.py | Python | mit | 12,669 | [
"Gaussian"
] | d1491a042bfb8f659b3d0a445046db90348bed86b1437460067519d04890aea4 |
import numpy as np
import mygis
from bunch import Bunch
def write_file(date,info,cesm):
"""writes cesm input data to a netcdf file"""
filename=info.output_file+str(date).replace(" ","_")
dims=("time","level","lat","lon")
dims_3d=("time","lat","lon")
dims_2d=("lat","lon")
extra_vars=[]
# 3D variables (+time)
# cloud,ice,qv,u,v,t,p
# 3D variables (constant in time)
# z
# 2D variables (+time)
# latent_heat,PBL_height,sensible_heat
# 2D variables (constant in time)
# hgt, latitude, longitude
# used as primary variable in io.write
# atts=Bunch(long_name="Specific Humidity",units="kg kg**-1")
# extra_vars.append(Bunch(name="qv",data=cesm["qv"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Cloud liquid water content",units="kg kg**-1")
extra_vars.append(Bunch(name="cloud",data=cesm["cloud"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Cloud ice water content",units="kg kg**-1")
extra_vars.append(Bunch(name="ice",data=cesm["ice"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="U (E/W) wind speed",units="m s**-1")
extra_vars.append(Bunch(name="u",data=cesm["u"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="V (N/S) wind speed",units="m s**-1")
extra_vars.append(Bunch(name="v",data=cesm["v"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Potential Temperature",units="kg kg**-1")
extra_vars.append(Bunch(name="theta",data=cesm["t"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Pressure",units="Pa")
extra_vars.append(Bunch(name="p",data=cesm["p"],dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Layer thicknesses",units="m")
extra_vars.append(Bunch(name="dz",data=cesm["dz"].astype("f"),dims=(dims[1],),dtype="f",attributes=atts))
atts=Bunch(long_name="Layer height",units="m")
extra_vars.append(Bunch(name="z",data=cesm["z"].astype("f"),dims=dims,dtype="f",attributes=atts))
atts=Bunch(long_name="Topographic Height",units="m")
print(cesm["hgt"].shape,dims_2d,cesm.qv.shape)
extra_vars.append(Bunch(name="hgt",data=cesm["hgt"],dims=dims_2d,dtype="f",attributes=atts))
atts=Bunch(long_name="Surface Shortwave Radiation (positive down)",units="W m**-2")
extra_vars.append(Bunch(name="swdown",data=cesm["sw"],dims=dims_3d,dtype="f",attributes=atts))
atts=Bunch(long_name="Surface Longwave Radiation (positive down)",units="W m**-2")
extra_vars.append(Bunch(name="lwdown",data=cesm["lw"],dims=dims_3d,dtype="f",attributes=atts))
atts=Bunch(long_name="Skin Temperature",units="K")
extra_vars.append(Bunch(name="tskin",data=cesm["ts"],dims=dims_3d,dtype="f",attributes=atts))
atts=Bunch(long_name="latitude",units="degrees")
extra_vars.append(Bunch(name="lat",data=info.lat_data,dims=dims_2d,dtype="f",attributes=atts))
atts=Bunch(long_name="longitude",units="degrees")
extra_vars.append(Bunch(name="lon",data=info.lon_data,dims=dims_2d,dtype="f",attributes=atts))
atts=Bunch(long_name="xland",units="")
extra_vars.append(Bunch(name="xland",data=cesm["land"],dims=dims_2d,dtype="f",attributes=atts))
for k in cesm.keys():
print(k,cesm[k].shape)
qvatts=Bunch(long_name="Specific Humidity",units="kg kg**-1")
print(" ")
print(" ")
print("Writing:"+filename)
print(" ")
print(" ")
# write to output file
mygis.write(filename=filename,varname="qv",data=cesm.qv,dims=dims, attributes=qvatts,dtype="f",
extravars=extra_vars)#,history=" Produced by cesm2icar v."+info.version)
| gutmann/icar | helpers/cesm/output.py | Python | mit | 3,666 | [
"NetCDF"
] | 56a5ccfe64dc75f689929fb2401f0902349060307de0a5570bdaeed02bd05aca |
##########################################################################
# this script was generated by openmm-builder. to customize it further,
# you can save the file to disk and edit it with your favorite editor.
##########################################################################
from __future__ import print_function
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import argparse
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--checkpoint",
help="path to an OpenMM checkpoint",
type=str
)
return parser.parse_args()
freq = int(1e5)
total = int(1e9)
pdb = PDBFile('input.pdb')
forcefield = ForceField('amber03.xml', 'amber03_obc.xml')
system = forcefield.createSystem(pdb.topology, nonbondedMethod=NoCutoff, constraints=None)
integrator = LangevinIntegrator(300*kelvin, 50/picoseconds, 1.0*femtoseconds)
platform = Platform.getPlatformByName('OpenCL')
properties = {'OpenCLDeviceIndex' : '0'}
simulation = Simulation(pdb.topology, system, integrator, platform, properties)
simulation.context.setPositions(pdb.positions)
simulation.reporters.append(app.DCDReporter('trajectory.dcd', freq))
simulation.reporters.append(app.StateDataReporter("sim.csv", freq, step=True,
potentialEnergy=True, totalEnergy=True, temperature=True, separator='\t'))
simulation.reporters.append(app.CheckpointReporter('ckpt.chk', freq))
print('Running Production...')
total_steps = 0
while total_steps < total:
simulation.step(freq)
total_steps += freq
print('Done!')
| LCLS/Protein-Folding-Sims | hp24stab/amber03/OPENCL/amber03_implicit_noconstraints.py | Python | mit | 1,661 | [
"OpenMM"
] | a68db23e739c59435a34f5420ac464a9d4998835b77e36cb364e77eb2d35e67f |
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .script_interface import ScriptObjectRegistry, ScriptInterfaceHelper, script_interface_register
import numpy as np
@script_interface_register
class MeanVarianceCalculator(ScriptInterfaceHelper):
"""
Accumulates results from observables.
Parameters
----------
obs : :class:`espressomd.observables.Observable`
delta_N : :obj:`int`
Number of timesteps between subsequent samples for the auto update mechanism.
Methods
-------
update()
Update the accumulator (get the current values from the observable).
"""
_so_name = "Accumulators::MeanVarianceCalculator"
_so_bind_methods = (
"update",
"shape",
)
_so_creation_policy = "LOCAL"
def mean(self):
"""
Returns the samples mean values of the respective observable with
which the accumulator was initialized.
"""
return np.array(self.call_method("mean")).reshape(self.shape())
def variance(self):
"""
Returns the samples variance for the observable.
"""
return np.array(self.call_method("variance")).reshape(self.shape())
def std_error(self):
"""
Returns the standard error calculated from the samples variance for the observable by
assuming uncorrelated samples.
"""
return np.array(self.call_method("std_error")).reshape(self.shape())
@script_interface_register
class TimeSeries(ScriptInterfaceHelper):
"""
Records results from observables.
Parameters
----------
obs : :class:`espressomd.observables.Observable`
delta_N : :obj:`int`
Number of timesteps between subsequent samples for the auto update mechanism.
Methods
-------
update()
Update the accumulator (get the current values from the observable).
clear()
Clear the data
"""
_so_name = "Accumulators::TimeSeries"
_so_bind_methods = (
"update",
"shape",
"clear"
)
_so_creation_policy = "LOCAL"
def time_series(self):
"""
Returns the recorded values of the observable.
"""
return np.array(self.call_method("time_series")).reshape(self.shape())
@script_interface_register
class Correlator(ScriptInterfaceHelper):
"""
Calculates the correlation of two observables :math:`A` and :math:`B`,
or of one observable against itself (i.e. :math:`B = A`).
The correlation can be compressed using the :ref:`multiple tau correlation
algorithm <Details of the multiple tau correlation algorithm>`.
The operation that is performed on :math:`A(t)` and :math:`B(t+\\tau)`
to obtain :math:`C(\\tau)` depends on the ``corr_operation`` argument:
* ``"scalar_product"``: Scalar product of :math:`A` and
:math:`B`, i.e., :math:`C=\\sum\\limits_{i} A_i B_i`
* ``"componentwise_product"``: Componentwise product of
:math:`A` and :math:`B`, i.e., :math:`C_i = A_i B_i`
* ``"square_distance_componentwise"``: Each component of
the correlation vector is the square of the difference
between the corresponding components of the observables, i.e.,
:math:`C_i = (A_i-B_i)^2`. Example: when :math:`A` is
:class:`espressomd.observables.ParticlePositions`, it produces the
mean square displacement (for each component separately).
* ``"tensor_product"``: Tensor product of :math:`A` and
:math:`B`, i.e., :math:`C_{i \\cdot l_B + j} = A_i B_j`
with :math:`l_B` the length of :math:`B`.
* ``"fcs_acf"``: Fluorescence Correlation Spectroscopy (FCS)
autocorrelation function, i.e.,
.. math::
G_i(\\tau) =
\\frac{1}{N} \\left< \\exp \\left(
- \\frac{\\Delta x_i^2(\\tau)}{w_x^2}
- \\frac{\\Delta y_i^2(\\tau)}{w_y^2}
- \\frac{\\Delta z_i^2(\\tau)}{w_z^2}
\\right) \\right>
where :math:`N` is the average number of fluorophores in the
illumination area,
.. math::
\\Delta x_i^2(\\tau) = \\left( x_i(0) - x_i(\\tau) \\right)^2
is the square displacement of particle
:math:`i` in the :math:`x` direction, and :math:`w_x`
is the beam waist of the intensity profile of the
exciting laser beam,
.. math::
W(x,y,z) = I_0 \\exp
\\left( - \\frac{2x^2}{w_x^2} - \\frac{2y^2}{w_y^2} -
\\frac{2z^2}{w_z^2} \\right).
The values of :math:`w_x`, :math:`w_y`, and :math:`w_z`
are passed to the correlator as ``args``. The correlator calculates
.. math::
C_i(\\tau) =
\\exp \\left(
- \\frac{\\Delta x_i^2(\\tau)}{w_x^2}
- \\frac{\\Delta y_i^2(\\tau)}{w_y^2}
- \\frac{\\Delta z_i^2(\\tau)}{w_z^2}
\\right)
Per each 3 dimensions of the observable, one dimension of the correlation
output is produced. If ``"fcs_acf"`` is used with other observables than
:class:`espressomd.observables.ParticlePositions`, the physical meaning
of the result is unclear.
The above equations are a generalization of the formula presented by
Höfling et al. :cite:`hofling11a`. For more information, see references
therein.
Parameters
----------
obs1 : :class:`espressomd.observables.Observable`
The observable :math:`A` to be correlated with :math:`B` (``obs2``).
If ``obs2`` is omitted, autocorrelation of ``obs1`` is calculated by
default.
obs2 : :class:`espressomd.observables.Observable`, optional
The observable :math:`B` to be correlated with :math:`A` (``obs1``).
corr_operation : :obj:`str`
The operation that is performed on :math:`A(t)` and :math:`B(t+\\tau)`.
delta_N : :obj:`int`
Number of timesteps between subsequent samples for the auto update mechanism.
tau_max : :obj:`float`
This is the maximum value of :math:`\\tau` for which the
correlation should be computed. Warning: Unless you are using
the multiple tau correlator, choosing ``tau_max`` of more than
``100 * dt`` will result in a huge computational overhead. In a
multiple tau correlator with reasonable parameters, ``tau_max``
can span the entire simulation without too much additional cpu time.
tau_lin : :obj:`int`
The number of data-points for which the results are linearly spaced
in ``tau``. This is a parameter of the multiple tau correlator. If you
want to use it, make sure that you know how it works. ``tau_lin`` must
be divisible by 2. By setting ``tau_lin`` such that
``tau_max >= dt * delta_N * tau_lin``, the
multiple tau correlator is used, otherwise the trivial linear
correlator is used. By setting ``tau_lin = 1``, the value will be
overridden by ``tau_lin = ceil(tau_max / (dt * delta_N))``, which
will result in either the multiple or linear tau correlator.
In many cases, ``tau_lin=16`` is a
good choice but this may strongly depend on the observables you are
correlating. For more information, we recommend to read
ref. :cite:`ramirez10a` or to perform your own tests.
compress1 : :obj:`str`
These functions are used to compress the data when
going to the next level of the multiple tau
correlator. This is done by producing one value out of two.
The following compression functions are available:
* ``"discard2"``: (default value) discard the second value from the time series, use the first value as the result
* ``"discard1"``: discard the first value from the time series, use the second value as the result
* ``"linear"``: make a linear combination (average) of the two values
If only ``compress1`` is specified, then
the same compression function is used for both
observables. If both ``compress1`` and ``compress2`` are specified,
then ``compress1`` is used for ``obs1`` and ``compress2`` for ``obs2``.
Both ``discard1`` and ``discard2`` are safe for all
observables but produce poor statistics in the
tail. For some observables, ``"linear"`` compression
can be used which makes an average of two
neighboring values but produces systematic
errors. Depending on the observable, the
systematic error using the ``"linear"`` compression
can be anything between harmless and disastrous.
For more information, we recommend to read ref.
:cite:`ramirez10a` or to perform your own tests.
compress2 : :obj:`str`, optional
See ``compress1``.
args: :obj:`float` of length 3
Three floats which are passed as arguments to the correlation
function. Currently it is only used by ``"fcs_acf"``, which
will square these values in the core; if you later decide to
update these weights with ``obs.args = [...]``, you'll have to
provide already squared values! Other correlation operations
will ignore these values.
"""
_so_name = "Accumulators::Correlator"
_so_bind_methods = (
"update",
"shape",
"finalize")
_so_creation_policy = "LOCAL"
def result(self):
"""
Get correlation.
Returns
-------
:obj:`ndarray` of :obj:`float`
The result of the correlation function. The shape of the array
is determined by the shape of the input observable(s) and the
correlation operation.
"""
return np.array(self.call_method(
"get_correlation")).reshape(self.shape())
def lag_times(self):
"""
Returns
-------
:obj:`ndarray` of :obj:`float`
Lag times of the correlation.
"""
return np.array(self.call_method("get_lag_times"))
def sample_sizes(self):
"""
Returns
-------
:obj:`ndarray` of :obj:`int`
Samples sizes for each lag time.
"""
return np.array(self.call_method("get_samples_sizes"), dtype=int)
@script_interface_register
class AutoUpdateAccumulators(ScriptObjectRegistry):
"""
Class for handling the auto-update of accumulators used by
:class:`espressomd.system.System`.
"""
_so_name = "Accumulators::AutoUpdateAccumulators"
_so_creation_policy = "LOCAL"
def add(self, accumulator):
"""
Adds an accumulator instance to the auto-update list.
"""
self.call_method("add", object=accumulator)
def remove(self, accumulator):
"""
Removes an accumulator from the auto-update list.
"""
self.call_method("remove", object=accumulator)
def clear(self):
"""
Removes all accumulators from the auto-update list.
"""
self.call_method("clear")
| pkreissl/espresso | src/python/espressomd/accumulators.py | Python | gpl-3.0 | 11,637 | [
"ESPResSo",
"exciting"
] | 74ce95b1fe3aace694ce3f1ea046329a0390f3e3c2ad42ea63f6051a0fb0429e |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Proxy class for the Gramps databases. Filter out all data marked private.
"""
#-------------------------------------------------------------------------
#
# Python modules
#
#-------------------------------------------------------------------------
import sys
if sys.version_info[0] < 3:
from itertools import ifilter as filter
else:
pass #python 3 has filter
import types
#-------------------------------------------------------------------------
#
# GRAMPS libraries
#
#-------------------------------------------------------------------------
from ..db.base import DbReadBase, DbWriteBase
class ProxyCursor(object):
"""
A cursor for moving through proxied data.
"""
def __init__(self, get_raw, get_handles):
self.get_raw = get_raw
self.get_handles = get_handles
def __enter__(self):
"""
Context manager enter method
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __iter__(self):
for handle in self.get_handles():
yield handle, self.get_raw(handle)
class ProxyMap(object):
"""
A dictionary-like object for accessing "raw" proxied data. Of
course, proxied data may have been changed by the proxy.
"""
def __init__(self, db, get_raw, get_keys):
self.get_raw = get_raw
self.get_keys = get_keys
self.db = db
def __getitem__(self, handle):
return self.get_raw(handle)
def keys(self):
return self.get_keys()
class ProxyDbBase(DbReadBase):
"""
ProxyDbBase is a base class for building a proxy to a Gramps database.
This class attempts to implement functions that are likely to be common
among proxy classes. Functions that are not likely to be common raise a
NotImplementedError to remind the developer to implement those functions.
Real database proxy classes can inherit from this class to make sure the
database interface is properly implemented.
"""
def __init__(self, db):
"""
Create a new ProxyDb instance.
"""
self.db = self.basedb = db
while isinstance(self.basedb, ProxyDbBase):
self.basedb = self.basedb.db
self.name_formats = db.name_formats
self.bookmarks = db.bookmarks
self.family_bookmarks = db.family_bookmarks
self.event_bookmarks = db.event_bookmarks
self.place_bookmarks = db.place_bookmarks
self.source_bookmarks = db.source_bookmarks
self.citation_bookmarks = db.citation_bookmarks
self.repo_bookmarks = db.repo_bookmarks
self.media_bookmarks = db.media_bookmarks
self.note_bookmarks = db.note_bookmarks
self.person_map = ProxyMap(self, self.get_raw_person_data,
self.get_person_handles)
self.family_map = ProxyMap(self, self.get_raw_family_data,
self.get_family_handles)
self.event_map = ProxyMap(self, self.get_raw_event_data,
self.get_event_handles)
self.place_map = ProxyMap(self, self.get_raw_place_data,
self.get_place_handles)
self.source_map = ProxyMap(self, self.get_raw_source_data,
self.get_source_handles)
self.repository_map = ProxyMap(self, self.get_raw_repository_data,
self.get_repository_handles)
self.media_map = ProxyMap(self, self.get_raw_object_data,
self.get_media_object_handles)
self.note_map = ProxyMap(self, self.get_raw_note_data,
self.get_note_handles)
def is_open(self):
"""
Return 1 if the database has been opened.
"""
return self.db.is_open
def get_researcher(self):
"""returns the Researcher instance, providing information about
the owner of the database"""
return self.db.get_researcher()
def include_something(self, handle, obj=None):
"""
Model predicate. Returns True if object referred to by handle is to be
included, otherwise returns False.
"""
if obj is None:
obj = self.get_unfiltered_something(handle)
# Call function to determine if object should be included or not
return obj.include()
# Define default predicates for each object type
include_person = \
include_family = \
include_event = \
include_source = \
include_citation = \
include_place = \
include_media_object = \
include_repository = \
include_note = \
include_tag = \
None
def get_person_cursor(self):
return ProxyCursor(self.get_raw_person_data,
self.get_person_handles)
def get_family_cursor(self):
return ProxyCursor(self.get_raw_family_data,
self.get_family_handles)
def get_event_cursor(self):
return ProxyCursor(self.get_raw_event_data,
self.get_event_handles)
def get_source_cursor(self):
return ProxyCursor(self.get_raw_source_data,
self.get_source_handles)
def get_citation_cursor(self):
return ProxyCursor(self.get_raw_citation_data,
self.get_citation_handles)
def get_place_cursor(self):
return ProxyCursor(self.get_raw_place_data,
self.get_place_handles)
def get_media_cursor(self):
return ProxyCursor(self.get_raw_object_data,
self.get_media_object_handles)
def get_repository_cursor(self):
return ProxyCursor(self.get_raw_repository_data,
self.get_repository_handles)
def get_note_cursor(self):
return ProxyCursor(self.get_raw_note_data,
self.get_note_handles)
def get_tag_cursor(self):
return ProxyCursor(self.get_raw_tag_data,
self.get_tag_handles)
def get_person_handles(self, sort_handles=False):
"""
Return a list of database handles, one handle for each Person in
the database.
"""
if self.db.is_open:
return list(self.iter_person_handles())
else:
return []
def get_family_handles(self, sort_handles=True):
"""
Return a list of database handles, one handle for each Family in
the database.
"""
if self.db.is_open:
return list(self.iter_family_handles())
else:
return []
def get_event_handles(self, sort_handles=True):
"""
Return a list of database handles, one handle for each Event in
the database.
"""
if self.db.is_open:
return list(self.iter_event_handles())
else:
return []
def get_source_handles(self, sort_handles=False):
"""
Return a list of database handles, one handle for each Source in
the database.
"""
if self.db.is_open:
return list(self.iter_source_handles())
else:
return []
def get_citation_handles(self, sort_handles=False):
"""
Return a list of database handles, one handle for each Citation in
the database.
"""
if self.db.is_open:
return list(self.iter_citation_handles())
else:
return []
def get_place_handles(self, sort_handles=False):
"""
Return a list of database handles, one handle for each Place in
the database.
"""
if self.db.is_open:
return list(self.iter_place_handles())
else:
return []
def get_media_object_handles(self, sort_handles=False):
"""
Return a list of database handles, one handle for each MediaObject in
the database.
"""
if self.db.is_open:
return list(self.iter_media_object_handles())
else:
return []
def get_repository_handles(self, sort_handles=True):
"""
Return a list of database handles, one handle for each Repository in
the database.
"""
if self.db.is_open:
return list(self.iter_repository_handles())
else:
return []
def get_note_handles(self, sort_handles=True):
"""
Return a list of database handles, one handle for each Note in
the database.
"""
if self.db.is_open:
return list(self.iter_note_handles())
else:
return []
def get_tag_handles(self, sort_handles=False):
"""
Return a list of database handles, one handle for each Tag in
the database.
"""
if self.db.is_open:
return list(self.iter_tag_handles())
else:
return []
def get_default_person(self):
"""returns the default Person of the database"""
return self.db.get_default_person()
def get_default_handle(self):
"""returns the default Person of the database"""
return self.db.get_default_handle()
def iter_person_handles(self):
"""
Return an iterator over database handles, one handle for each Person in
the database.
"""
return filter(self.include_person, self.db.iter_person_handles())
def iter_family_handles(self):
"""
Return an iterator over database handles, one handle for each Family in
the database.
"""
return filter(self.include_family, self.db.iter_family_handles())
def iter_event_handles(self):
"""
Return an iterator over database handles, one handle for each Event in
the database.
"""
return filter(self.include_event, self.db.iter_event_handles())
def iter_source_handles(self):
"""
Return an iterator over database handles, one handle for each Source in
the database.
"""
return filter(self.include_source, self.db.iter_source_handles())
def iter_citation_handles(self):
"""
Return an iterator over database handles, one handle for each Citation
in the database.
"""
return filter(self.include_citation, self.db.iter_citation_handles())
def iter_place_handles(self):
"""
Return an iterator over database handles, one handle for each Place in
the database.
"""
return filter(self.include_place, self.db.iter_place_handles())
def iter_media_object_handles(self):
"""
Return an iterator over database handles, one handle for each Media
Object in the database.
"""
return filter(self.include_media_object, self.db.iter_media_object_handles())
def iter_repository_handles(self):
"""
Return an iterator over database handles, one handle for each
Repository in the database.
"""
return filter(self.include_repository, self.db.iter_repository_handles())
def iter_note_handles(self):
"""
Return an iterator over database handles, one handle for each Note in
the database.
"""
return filter(self.include_note, self.db.iter_note_handles())
def iter_tag_handles(self):
"""
Return an iterator over database handles, one handle for each Tag in
the database.
"""
return filter(self.include_tag, self.db.iter_tag_handles())
@staticmethod
def __iter_object(selector, method):
""" Helper function to return an iterator over an object class """
return filter(lambda obj: ((selector is None) or selector(obj.handle)),
method())
def iter_people(self):
"""
Return an iterator over Person objects in the database
"""
return self.__iter_object(self.include_person, self.db.iter_people)
def iter_families(self):
"""
Return an iterator over Family objects in the database
"""
return self.__iter_object(self.include_family, self.db.iter_families)
def iter_events(self):
"""
Return an iterator over Event objects in the database
"""
return self.__iter_object(self.include_event, self.db.iter_events)
def iter_places(self):
"""
Return an iterator over Place objects in the database
"""
return self.__iter_object(self.include_place, self.db.iter_places)
def iter_sources(self):
"""
Return an iterator over Source objects in the database
"""
return self.__iter_object(self.include_source, self.db.iter_sources)
def iter_citations(self):
"""
Return an iterator over Citation objects in the database
"""
return self.__iter_object(self.include_citation, self.db.iter_citations)
def iter_media_objects(self):
"""
Return an iterator over Media objects in the database
"""
return self.__iter_object(self.include_media_object,
self.db.iter_media_objects)
def iter_repositories(self):
"""
Return an iterator over Repositories objects in the database
"""
return self.__iter_object(self.include_repository,
self.db.iter_repositories)
def iter_notes(self):
"""
Return an iterator over Note objects in the database
"""
return self.__iter_object(self.include_note, self.db.iter_notes)
def iter_tags(self):
"""
Return an iterator over Tag objects in the database
"""
return self.__iter_object(self.include_tag, self.db.iter_tags)
@staticmethod
def gfilter(predicate, obj):
"""
Returns obj if predicate is True or not callable, else returns None
"""
if predicate is not None and obj is not None:
return obj if predicate(obj.handle) else None
return obj
def __getattr__(self, name):
""" Handle unknown attribute lookups """
if name == "readonly":
return True
sname = name.split('_')
if sname[:2] == ['get', 'unfiltered']:
"""
Handle get_unfiltered calls. Return the name of the access
method for the base database object. Call setattr before
returning so that the lookup happens at most once for a given
method call and a given object.
"""
attr = getattr(self.basedb, 'get_' + sname[2] + '_from_handle')
setattr(self, name, attr)
return attr
# if a write-method:
if (name in DbWriteBase.__dict__ and
not name.startswith("__") and
type(DbWriteBase.__dict__[name]) is types.FunctionType):
raise AttributeError
# Default behaviour: lookup attribute in parent object
return getattr(self.db, name)
def get_person_from_handle(self, handle):
"""
Finds a Person in the database from the passed gramps handle.
If no such Person exists, None is returned.
"""
return self.gfilter(self.include_person,
self.db.get_person_from_handle(handle))
def get_family_from_handle(self, handle):
"""
Finds a Family in the database from the passed gramps handle.
If no such Family exists, None is returned.
"""
return self.gfilter(self.include_family,
self.db.get_family_from_handle(handle))
def get_event_from_handle(self, handle):
"""
Finds a Event in the database from the passed gramps handle.
If no such Event exists, None is returned.
"""
return self.gfilter(self.include_event,
self.db.get_event_from_handle(handle))
def get_source_from_handle(self, handle):
"""
Finds a Source in the database from the passed gramps handle.
If no such Source exists, None is returned.
"""
return self.gfilter(self.include_source,
self.db.get_source_from_handle(handle))
def get_citation_from_handle(self, handle):
"""
Finds a Citation in the database from the passed gramps handle.
If no such Citation exists, None is returned.
"""
return self.gfilter(self.include_citation,
self.db.get_citation_from_handle(handle))
def get_place_from_handle(self, handle):
"""
Finds a Place in the database from the passed gramps handle.
If no such Place exists, None is returned.
"""
return self.gfilter(self.include_place,
self.db.get_place_from_handle(handle))
def get_object_from_handle(self, handle):
"""
Finds an Object in the database from the passed gramps handle.
If no such Object exists, None is returned.
"""
return self.gfilter(self.include_media_object,
self.db.get_object_from_handle(handle))
def get_repository_from_handle(self, handle):
"""
Finds a Repository in the database from the passed gramps handle.
If no such Repository exists, None is returned.
"""
return self.gfilter(self.include_repository,
self.db.get_repository_from_handle(handle))
def get_note_from_handle(self, handle):
"""
Finds a Note in the database from the passed gramps handle.
If no such Note exists, None is returned.
"""
return self.gfilter(self.include_note,
self.db.get_note_from_handle(handle))
def get_tag_from_handle(self, handle):
"""
Finds a Tag in the database from the passed gramps handle.
If no such Tag exists, None is returned.
"""
return self.gfilter(self.include_tag,
self.db.get_tag_from_handle(handle))
def get_person_from_gramps_id(self, val):
"""
Finds a Person in the database from the passed GRAMPS ID.
If no such Person exists, None is returned.
"""
return self.gfilter(self.include_person,
self.db.get_person_from_gramps_id(val))
def get_family_from_gramps_id(self, val):
"""
Finds a Family in the database from the passed GRAMPS ID.
If no such Family exists, None is returned.
"""
return self.gfilter(self.include_family,
self.db.get_family_from_gramps_id(val))
def get_event_from_gramps_id(self, val):
"""
Finds an Event in the database from the passed GRAMPS ID.
If no such Event exists, None is returned.
"""
return self.gfilter(self.include_event,
self.db.get_event_from_gramps_id(val))
def get_place_from_gramps_id(self, val):
"""
Finds a Place in the database from the passed gramps' ID.
If no such Place exists, None is returned.
"""
return self.gfilter(self.include_place,
self.db.get_place_from_gramps_id(val))
def get_source_from_gramps_id(self, val):
"""
Finds a Source in the database from the passed gramps' ID.
If no such Source exists, None is returned.
"""
return self.gfilter(self.include_source,
self.db.get_source_from_gramps_id(val))
def get_citation_from_gramps_id(self, val):
"""
Finds a Citation in the database from the passed gramps' ID.
If no such Citation exists, None is returned.
"""
return self.gfilter(self.include_citation,
self.db.get_citation_from_gramps_id(val))
def get_object_from_gramps_id(self, val):
"""
Finds a MediaObject in the database from the passed gramps' ID.
If no such MediaObject exists, None is returned.
"""
return self.gfilter(self.include_media_object,
self.db.get_object_from_gramps_id(val))
def get_repository_from_gramps_id(self, val):
"""
Finds a Repository in the database from the passed gramps' ID.
If no such Repository exists, None is returned.
"""
return self.gfilter(self.include_repository,
self.db.get_repository_from_gramps_id(val))
def get_note_from_gramps_id(self, val):
"""
Finds a Note in the database from the passed gramps' ID.
If no such Note exists, None is returned.
"""
return self.gfilter(self.include_note,
self.db.get_note_from_gramps_id(val))
def get_tag_from_name(self, val):
"""
Finds a Tag in the database from the passed tag name.
If no such Tag exists, None is returned.
"""
return self.gfilter(self.include_tag,
self.db.get_tag_from_name(val))
def get_name_group_mapping(self, surname):
"""
Return the default grouping name for a surname
"""
return self.db.get_name_group_mapping(surname)
def has_name_group_key(self, name):
"""
Return if a key exists in the name_group table
"""
return self.db.has_name_group_key(name)
def get_name_group_keys(self):
"""
Return the defined names that have been assigned to a default grouping
"""
return self.db.get_name_group_keys()
def get_number_of_people(self):
"""
Return the number of people currently in the database.
"""
return len(self.get_person_handles())
def get_number_of_families(self):
"""
Return the number of families currently in the database.
"""
return len(self.get_family_handles())
def get_number_of_events(self):
"""
Return the number of events currently in the database.
"""
return len(self.get_event_handles())
def get_number_of_places(self):
"""
Return the number of places currently in the database.
"""
return len(self.get_place_handles())
def get_number_of_sources(self):
"""
Return the number of sources currently in the database.
"""
return len(self.get_source_handles())
def get_number_of_citations(self):
"""
Return the number of Citations currently in the database.
"""
return len(self.get_citation_handles())
def get_number_of_media_objects(self):
"""
Return the number of media objects currently in the database.
"""
return len(self.get_media_object_handles())
def get_number_of_repositories(self):
"""
Return the number of source repositories currently in the database.
"""
return len(self.get_repository_handles())
def get_number_of_notes(self):
"""
Return the number of notes currently in the database.
"""
return len(self.get_note_handles())
def get_number_of_tags(self):
"""
Return the number of tags currently in the database.
"""
return len(self.get_tag_handles())
def get_save_path(self):
"""returns the save path of the file, or "" if one does not exist"""
return self.db.get_save_path()
def get_event_attribute_types(self):
"""returns a list of all Attribute types associated with Event
instances in the database"""
return self.db.get_event_attribute_types()
def get_event_types(self):
"""returns a list of all event types in the database"""
return self.db.get_event_types()
def get_person_event_types(self):
"""Deprecated: Use get_event_types"""
return self.db.get_event_types()
def get_person_attribute_types(self):
"""returns a list of all Attribute types associated with Person
instances in the database"""
return self.db.get_person_attribute_types()
def get_family_attribute_types(self):
"""returns a list of all Attribute types associated with Family
instances in the database"""
return self.db.get_family_attribute_types()
def get_family_event_types(self):
"""Deprecated: Use get_event_types"""
return self.db.get_event_types()
def get_media_attribute_types(self):
"""returns a list of all Attribute types associated with Media
and MediaRef instances in the database"""
return self.db.get_media_attribute_types()
def get_family_relation_types(self):
"""returns a list of all relationship types associated with Family
instances in the database"""
return self.db.get_family_relation_types()
def get_child_reference_types(self):
"""returns a list of all child reference types associated with Family
instances in the database"""
return self.db.get_child_reference_types()
def get_event_roles(self):
"""returns a list of all custom event role names associated with Event
instances in the database"""
return self.db.get_event_roles()
def get_name_types(self):
"""returns a list of all custom names types associated with Person
instances in the database"""
return self.db.get_name_types()
def get_origin_types(self):
"""returns a list of all custom origin types associated with Person/Surname
instances in the database"""
return self.db.get_origin_types()
def get_repository_types(self):
"""returns a list of all custom repository types associated with
Repository instances in the database"""
return self.db.get_repository_types()
def get_note_types(self):
"""returns a list of all custom note types associated with
Note instances in the database"""
return self.db.get_note_types()
def get_source_attribute_types(self):
"""returns a list of all Attribute types associated with Source/Citation
instances in the database"""
return self.db.get_source_attribute_types()
def get_source_media_types(self):
"""returns a list of all custom source media types associated with
Source instances in the database"""
return self.db.get_source_media_types()
def get_url_types(self):
"""returns a list of all custom names types associated with Url
instances in the database"""
return self.db.get_url_types()
def get_raw_person_data(self, handle):
return self.get_person_from_handle(handle).serialize()
def get_raw_family_data(self, handle):
return self.get_family_from_handle(handle).serialize()
def get_raw_object_data(self, handle):
return self.get_object_from_handle(handle).serialize()
def get_raw_place_data(self, handle):
return self.get_place_from_handle(handle).serialize()
def get_raw_event_data(self, handle):
return self.get_event_from_handle(handle).serialize()
def get_raw_source_data(self, handle):
return self.get_source_from_handle(handle).serialize()
def get_raw_citation_data(self, handle):
return self.get_citation_from_handle(handle).serialize()
def get_raw_repository_data(self, handle):
return self.get_repository_from_handle(handle).serialize()
def get_raw_note_data(self, handle):
return self.get_note_from_handle(handle).serialize()
def get_raw_tag_data(self, handle):
return self.get_tag_from_handle(handle).serialize()
def has_person_handle(self, handle):
"""
Returns True if the handle exists in the current Person database.
"""
return self.gfilter(self.include_person,
self.db.get_person_from_handle(handle)) is not None
def has_family_handle(self, handle):
"""
Returns True if the handle exists in the current Family database.
"""
return self.gfilter(self.include_family,
self.db.get_family_from_handle(handle)) is not None
def has_event_handle(self, handle):
"""
returns True if the handle exists in the current Event database.
"""
return self.gfilter(self.include_event,
self.db.get_event_from_handle(handle)) is not None
def has_source_handle(self, handle):
"""
returns True if the handle exists in the current Source database.
"""
return self.gfilter(self.include_source,
self.db.get_source_from_handle(handle)) is not None
def has_citation_handle(self, handle):
"""
returns True if the handle exists in the current Citation database.
"""
return self.gfilter(self.include_citation,
self.db.get_citation_from_handle(handle)) is not None
def has_place_handle(self, handle):
"""
returns True if the handle exists in the current Place database.
"""
return self.gfilter(self.include_place,
self.db.get_place_from_handle(handle)) is not None
def has_object_handle(self, handle):
"""
returns True if the handle exists in the current MediaObjectdatabase.
"""
return self.gfilter(self.include_media_object,
self.db.get_object_from_handle(handle)) is not None
def has_repository_handle(self, handle):
"""
returns True if the handle exists in the current Repository database.
"""
return self.gfilter(self.include_repository,
self.db.get_repository_from_handle(handle)) is not None
def has_note_handle(self, handle):
"""
returns True if the handle exists in the current Note database.
"""
return self.gfilter(self.include_note,
self.db.get_note_from_handle(handle)) is not None
def has_tag_handle(self, handle):
"""
returns True if the handle exists in the current Tag database.
"""
return self.gfilter(self.include_tag,
self.db.get_tag_from_handle(handle)) is not None
def get_mediapath(self):
"""returns the default media path of the database"""
return self.db.get_mediapath()
def get_gramps_ids(self, obj_key):
return self.db.get_gramps_ids(obj_key)
def has_gramps_id(self, obj_key, gramps_id):
return self.db.has_gramps_id(obj_key, gramps_id)
def get_bookmarks(self):
"""returns the list of Person handles in the bookmarks"""
return self.bookmarks
def get_family_bookmarks(self):
"""returns the list of Family handles in the bookmarks"""
return self.family_bookmarks
def get_event_bookmarks(self):
"""returns the list of Event handles in the bookmarks"""
return self.event_bookmarks
def get_place_bookmarks(self):
"""returns the list of Place handles in the bookmarks"""
return self.place_bookmarks
def get_source_bookmarks(self):
"""returns the list of Source handles in the bookmarks"""
return self.source_bookmarks
def get_citation_bookmarks(self):
"""returns the list of Citation handles in the bookmarks"""
return self.citation_bookmarks
def get_media_bookmarks(self):
"""returns the list of Media handles in the bookmarks"""
return self.media_bookmarks
def get_repo_bookmarks(self):
"""returns the list of Repository handles in the bookmarks"""
return self.repo_bookmarks
def get_note_bookmarks(self):
"""returns the list of Note handles in the bookmarks"""
return self.note_bookmarks
def close(self):
"""
Close on a proxy closes real database.
"""
self.basedb.close()
def find_initial_person(self):
"""
Find an initial person, given that they might not be
available.
"""
person = self.basedb.find_initial_person()
if person and self.has_person_handle(person.handle):
return person
else:
return None
def get_dbid(self):
"""
Return the database ID.
"""
return self.basedb.get_dbid()
| pmghalvorsen/gramps_branch | gramps/gen/proxy/proxybase.py | Python | gpl-2.0 | 33,787 | [
"Brian"
] | 4d15991218bcac6894e4cfc65b4818c4c8cced1093e3d11510ade2304a8c9653 |
from django.utils.translation import ugettext_lazy as _
from django.forms import ValidationError # noqa
from django.core.urlresolvers import reverse
from horizon import exceptions
from horizon import forms
from horizon import messages
from crystal_dashboard.dashboards.crystal import exceptions as sdsexception
from crystal_dashboard.api import swift as api
class CreateStoragePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"),
help_text=_("The name of the new policy."),
widget=forms.TextInput(
attrs={"ng-model": "name", "not-blank": ""}
))
replicas = forms.CharField(max_length=255,
label=_("Num. Replicas"),
help_text=_("Number of replicas"),
initial=3)
partition_power = forms.CharField(max_length=255,
label=_("Partiton Power"),
help_text=_("If the value is x the num of partitions will be 2^x"),
initial=10)
time = forms.CharField(max_length=255,
label=_("Time"),
help_text=_("Time between moving a partition more than once. In hours"),
initial=1)
policy_type = forms.CharField(widget=forms.HiddenInput(), initial='replication')
deprecated = forms.CharField(widget=forms.HiddenInput(), initial='False')
deployed = forms.CharField(widget=forms.HiddenInput(), initial='False')
default = forms.CharField(widget=forms.HiddenInput(), initial='False')
devices = forms.CharField(widget=forms.HiddenInput(), initial='[]')
def __init__(self, request, *args, **kwargs):
super(CreateStoragePolicy, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
response = api.swift_new_storage_policy(request, data)
if 200 <= response.status_code < 300:
messages.success(request, _("Storage policy successfully created."))
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:rings:index")
error_message = "Unable to create storage policy.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class UpdateStoragePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"),
help_text=_("The name of the new policy."))
default = forms.BooleanField(required=False, label="Default")
deprecated = forms.BooleanField(required=False, label="Deprecated")
def __init__(self, request, *args, **kwargs):
super(UpdateStoragePolicy, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
response = api.swift_edit_storage_policy(request, self.initial['storage_policy_id'], data)
if 200 <= response.status_code < 300:
messages.success(request, _("Storage policy successfully updated."))
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:rings:index")
error_message = "Unable to update storage policy.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
class CreateECStoragePolicy(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"),
help_text=_("The name of the new policy."),
widget=forms.TextInput(
attrs={"ng-model": "name", "not-blank": ""}
))
partition_power = forms.CharField(max_length=255,
label=_("Partiton Power"),
help_text=_("If the value is x the num of partitions will be 2^x"),
initial=10)
time = forms.CharField(max_length=255,
label=_("Time"),
help_text=_("Time between moving a partition more than once. In hours"),
initial=1)
ec_type = forms.CharField(max_length=255,
label=_("EC Type"),
required=True,
help_text=_("Is chosen from the list of EC backends supported by PyECLib"),
initial='liberasurecode_rs_vand')
ec_num_data_fragments = forms.CharField(max_length=255,
label=_("Num. Data Fragments"),
required=True,
help_text=_("The total number of fragments that will be comprised of data."),
initial=10)
ec_num_parity_fragments = forms.CharField(max_length=255,
label=_("Num. Parity Fragments"),
required=True,
help_text=_("The total number of fragments that will be comprised of parity."),
initial=4)
ec_object_segment_size = forms.CharField(max_length=255,
label=_("Object Segment Size"),
required=True,
help_text=_("The amount of data that will be buffered up before feeding a segment into the encoder/decoder."),
initial=1048576)
ec_duplication_factor = forms.CharField(max_length=255,
label=_("Duplication Factor"),
required=True,
help_text=_("EC Duplication enables Swift to make duplicated copies of fragments of erasure coded objects."),
initial=1)
policy_type = forms.CharField(widget=forms.HiddenInput(), initial='EC')
deprecated = forms.CharField(widget=forms.HiddenInput(), initial='False')
deployed = forms.CharField(widget=forms.HiddenInput(), initial='False')
devices = forms.CharField(widget=forms.HiddenInput(), initial='[]')
default = forms.CharField(widget=forms.HiddenInput(), initial='False')
def __init__(self, request, *args, **kwargs):
super(CreateECStoragePolicy, self).__init__(request, *args, **kwargs)
def handle(self, request, data):
try:
response = api.swift_new_storage_policy(request, data)
if 200 <= response.status_code < 300:
messages.success(request, _("Storage policy successfully created."))
return data
else:
raise sdsexception.SdsException(response.text)
except Exception as ex:
redirect = reverse("horizon:crystal:rings:index")
error_message = "Unable to create storage policy.\t %s" % ex.message
exceptions.handle(request, _(error_message), redirect=redirect)
| Crystal-SDS/dashboard | crystal_dashboard/dashboards/crystal/rings/storage_policies/forms.py | Python | gpl-3.0 | 7,588 | [
"CRYSTAL"
] | 62b3eee6d327c5c153b1f402ae46b7d16740f3b8e82db0eb6547487760a205c2 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RNlme(RPackage):
"""Fit and compare Gaussian linear and nonlinear mixed-effects models."""
homepage = "https://cran.r-project.org/package=nlme"
url = "https://cran.r-project.org/src/contrib/nlme_3.1-130.tar.gz"
list_url = homepage
version('3.1-131', '0f1215ec4d4e3bca939282d122f4d1fa')
version('3.1-130', '1935d6e308a8018ed8e45d25c8731288')
version('3.1-128', '3d75ae7380bf123761b95a073eb55008')
depends_on('r-lattice', type=('build', 'run'))
| lgarren/spack | var/spack/repos/builtin/packages/r-nlme/package.py | Python | lgpl-2.1 | 1,745 | [
"Gaussian"
] | 3391935589c122f89b0b598df35f2a974a97a60e96a4ba7f403f225f8868f63b |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# update_backscatter.py, Angeline G. Burrell (AGB), UoL
#
# Comments: Update the beam's groundscatter flag, calculate the virtual height
# and propagation path, determine the origin field-of-view, update
# the elevation.
#-----------------------------------------------------------------------------
"""update_backscatter
Routines to update the groundscatter and elevation angle, as well as determine
the virtual height, hop, and origin field-of-view for each backscatter point.
Functions
------------------------------------------------------------------------------
assign_region ionosphere region based on virtual height
test_propagation test propgation against reality
select_alt_groups determine altitude limits for range gate
get_beam load beams from list or pointer
calc_distance calculate slant range
select_beam_groundscatter filter to select groundscatter data
calc_frac_points calculate precentage of groundscatter
update_bs_w_scan update propagation parameters, 1 > beam
update_beam_fit update beam data
update_backscatter update propagation parameters, one beam
beam_ut_struct_test test for continuity in UT across beams
------------------------------------------------------------------------------
Author: Angeline G. Burrell (AGB)
Date: January 15, 2015
Inst: University of Leicester (UoL)
"""
# Import python packages
import numpy as np
from scipy import constants as scicon
from scipy import stats as stats
from scipy import optimize as optimize
from scipy import signal as scisig
import datetime as dt
import logging
# Import DaViTpy packages is done within routines to prevent this causing
# an error when initially loading davitpy
#---------------------------------------------------------------------------
def assign_region(vheight, region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0}, case="upper"):
"""Assign an ionospheric region based on virtual height.
"D" (75 - 115 km)
"E" (115 - 200 km) is detected at distances lass than 900 km
"F" (200 -900 km) is detected at all distances
"" if values fall outside of specified altitudes
Parameters
-------------
vheight : (float)
Virtual height of peak of propagation path
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":400.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
case : (str)
Provide the output in uppercase or lowercase (default=upper)
Returns
-----------
region : (str)
one (or zero) character string denoting region
"""
region = ""
rpad = {"D":0.0, "E":0.0, "F":1.0}
for rr in region_hmax.keys():
if region_hmin[rr] <= vheight and vheight < region_hmax[rr] + rpad[rr]:
region = rr.lower() if case is "lower" else rr
return region
#---------------------------------------------------------------------------
def test_propagation(hop, vheight, dist,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0}):
"""Test the propagation path for realism. Use the basic properties of HF
radars.
D-region (<= 115 km) is detected at distances less than 500 km
E-region (115 - 150(or 200?) km) is detected at distances lass than X km
F-region (>= 150 km) is detected at all distances
Parameters
-------------
hop : (float)
Number of hops traveled by this radar signal
vheight : (float)
Virtual height of the peak of the propagation path
dist : (float)
Distance from the radar to the first peak of the propagation path in km
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":400.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
Returns
-----------
good : (boolian)
True if the path is realistic, False if it is not
"""
good = True
if region_hmax.has_key("D") and vheight <= region_hmax["D"]:
if hop > 0.5 or dist > 500.0:
# D-region backscatter is restricted to 0.5 hop ionospheric
# backscatter near the radar (flat earth-approximation holds,
# great circle distance is rounded down, to simplify things)
good = False
elif region_hmax.has_key("E") and vheight <= region_hmax["E"]:
if hop < 1.5 and dist > 900.0:
# 0.5E and 1.0E backscatter is restrictued to slant path distances
# of 1000 km or less. 1.5E backscatter is typically seen at far
# range gates and can be entirely E-region or F/E-region
good = False
return good
#---------------------------------------------------------------------------
def select_alt_groups(gate, vheight, rmin, rmax, vh_box, min_pnts=3):
"""Determine appropriate altitude limits for the data in this range gate
box. This is done by fitting a Gaussian curve to each of the occurance
peaks and setting the range to +/-3 sigma from the mean. Areas with points
not encompassed by the fitted limits are constructed using the vh_box as
a guide for total width.
Parameters
-------------
gate : (np.ndarray)
Array of range gates for the selected points
vheight : (np.ndarray)
Array of virtual heights for the selected points (km)
rmin : (float)
Minimum virtual height for this region in km
rmax : (float)
Maximum virtual height for this region in km
vh_box : (float)
Suggested virtual height width in km
min_pnts : (int)
Minimum number of points needed to actually use a height bracket.
(default=3)
Returns
-----------
vh_mins : (list)
list of virtual height minima
vh_maxs : (list)
List of virtual height maxima
"""
# Define local functions
def gaussian(x, *p):
A, mu, sigma = p
return A * np.exp(-(x - mu)**2 / (2. * sigma**2))
# Initialize output
vh_mins = list()
vh_maxs = list()
vh_peaks = list()
# Create a histogram of the number of observations at each virtual height
bnum = int((rmax-rmin) / (vh_box * 0.25))
hnum, hbin = np.histogram(vheight, bnum if bnum > 10 else 10, (rmin,rmax))
# Find the maxima in the histogram
hmax = scisig.argrelmax(hnum, order=2)[0]
# Since the signal routine won't be able to identify a maxima if two bins
# next to each other have the same value, use the global maximum if no
# local maxima were identified
if len(hmax) == 0 and max(hnum) > min_pnts:
hmax = np.array([list(hnum).index(max(hnum))])
# Consider each maxima seperately or, if none could be found, set limits
# using the suggested width.
tmin = np.nanmin(vheight)
tmax = np.nanmax(vheight)
if len(hmax) == 0:
if np.isnan(tmin) or np.isnan(tmax):
return vh_mins, vh_maxs
vnum = np.ceil((tmax - tmin) / vh_box)
vmin = (tmax - tmin) / vnum + tmin - vh_box
vh_mins = [vmin + n * vh_box for n in np.arange(vnum)]
vh_maxs = [n + vh_box for n in vh_mins]
else:
# For each maxima, fit a Gaussian
param = [0.0, 0.0, vh_box * 0.5]
cbin = (hbin[:-1] + hbin[1:]) / 2.0
hpeak = {hnum[ih]:ih for ih in hmax}
for hh in sorted(hpeak.keys(), reverse=True):
ih = hpeak[hh]
param[0] = hh
param[1] = cbin[ih]
try:
coeff, var = optimize.curve_fit(gaussian, cbin, hnum, p0=param)
# Evaluate for np.nan in coefficients
try:
np.isnan(coeff).tolist().index(True)
except:
# Get the 3 sigma limits
vmin = coeff[1] - 3.0 * coeff[2]
if vmin < rmin:
vmin = rmin
vmax = coeff[1] + 3.0 * coeff[2]
if vmax > rmax:
vmax = rmax
# Get the 2 sigma limits
vlow = coeff[1] - 2.0 * coeff[2]
if vlow < rmin:
vlow = rmin
vhigh = coeff[1] + 2.0 * coeff[2]
if vhigh > rmax:
vhigh = rmax
# If the fitted curve does not include the detected peak
# within a 2 sigma limit, throw out this fit.
if cbin[ih] < vlow or cbin[ih] > vhigh:
coeff = list()
else:
# To allow secondary peaks to be fitted, remove this
# peak from consideration
hnum = [hnum[ic] if cc < vmin or cc >= vmax else 0
for ic,cc in enumerate(cbin)]
# Save the initial peak boundaries
vh_mins.append(vmin)
vh_maxs.append(vmax)
vh_peaks.append(coeff[1])
except:
pass
# Evaluate the current limits to see if they overlap other limits
# or to see if there are gaps. Re-order the limits to start at the
# lowest and end at the highest. If no limits were found, set them.
if len(vh_maxs) == 0:
vnum = np.ceil((tmax - tmin) / vh_box)
vmin = (tmax - tmin) / vnum + tmin - vh_box
vh_mins = [vmin + n * vh_box for n in np.arange(vnum)]
vh_maxs = [n + vh_box if n + vh_box < rmax else rmax
for n in vh_mins]
for n,vmin in enumerate(vh_mins):
if vmin < rmin:
vh_mins[n] = rmin
else:
break
else:
new_min = list()
new_max = list()
new_peak = list()
priority = list() # Low number means high priority to keep limits
# If there are points that fall below the lower limit, add more
# regions to include these points.
if min(vh_mins) > tmin:
vmax = min(vh_mins)
vnum = round((vmax - tmin) / vh_box)
if vnum == 0.0:
# The outlying points are close enough that the lower limit
# should be expanded
imin = vh_mins.index(min(vh_mins))
vh_mins[imin] = np.floor(tmin)
if vh_mins[imin] < rmin:
vh_mins[imin] = rmin
else:
vspan = (vmax - tmin) / vnum
for n in np.arange(vnum):
nmin = tmin + n * vspan
if nmin < rmin:
nmin = rmin
new_min.append(nmin)
new_max.append(tmin + (n + 1.0) * vspan)
new_peak.append(tmin + (n + 0.5) * vspan)
priority.append(len(vh_mins) + len(new_min))
# Sort the Gaussian limits by minimum virtual height and cycle
# through them.
for vmin in sorted(vh_mins):
iv = vh_mins.index(vmin)
if len(new_min) > 0:
# Test for overlaps or gaps with the previous height window
if new_max[-1] >= vh_peaks[iv] or vmin <= new_peak[-1]:
# There is a significant overlap between the two regions
if priority[-1] < iv:
# Adjust the current boundaries
vmin = new_max[-1]
else:
# Adjust the previous boundaries
new_max[-1] = vmin
# If this adjustment places the previous maximum
# at or below the previous minimum, remove that
# division
if new_max[-1] <= new_min[-1]:
new_max.pop()
new_min.pop()
new_peak.pop()
priority.pop()
elif new_max[-1] < vmin:
# There is a gap between the two windows. Construct
# bridging window(s) before adding the current max and
# min to the list.
bmin = new_max[-1]
bmax = vmin
vnum = round((bmax - bmin) / vh_box)
if vnum == 0.0:
# The outlying points are close enough that the
# last upper limit should be expanded
new_max[-1] = vmin
else:
vspan = (bmax - bmin) / vnum
for n in np.arange(vnum):
new_min.append(bmin + n * vspan)
new_max.append(bmin + (n + 1.0) * vspan)
new_peak.append(bmin + (n + 0.5) * vspan)
priority.append(len(vh_mins) + len(new_min))
# Now append the current window, if it is wide enough to
# be sensible
if vmin < vh_maxs[iv]:
new_min.append(vmin)
new_max.append(vh_maxs[iv])
new_peak.append(vh_peaks[iv])
priority.append(iv)
# If there are points that fall above the upper limit, add more
# regions to include these points.
if len(new_max) == 0 or max(new_max) < tmax:
vmin = max(new_max)
vnum = round((tmax - vmin) / vh_box)
if vnum == 0.0:
# The outlying points are close enough that the upper limit
# should be expanded
imax = new_max.index(max(new_max))
new_max[imax] = np.ceil(tmax)
if new_max[imax] > rmax:
new_max[imax] = rmax
else:
vspan = (tmax - vmin) / vnum
for n in np.arange(vnum):
nmax = vmin + (n + 1.0) * vspan
if nmax > rmax:
nmax = rmax
new_min.append(vmin + n * vspan)
new_max.append(rmax)
new_peak.append(vmin + (n + 0.5) * vspan)
priority.append(len(vh_mins) + len(new_min))
# Rename the output
vh_mins = new_min
vh_maxs = new_max
# Return the limits
return vh_mins, vh_maxs
#---------------------------------------------------------------------------
def get_beam(radar_beams, nbeams):
"""Define a routine to load the beams from either a list/np.array or
pointer
Parameters
------------
radar_beams : (list, numpy array, or class `sdio.radDataTypes.radDataPtr`)
Object containing the radar beam data
nbeams : (int)
Number of beams returned before this beam
Returns
--------
beam : (class `sdio.radDataTypes.radDataBeam` or NoneType)
Beam containing radar data or None, if no data is available
nbeams : (int)
Number of beams retrieved from radar_beams, including this beam
"""
import davitpy.pydarn.sdio as sdio
if((isinstance(radar_beams, list) or isinstance(radar_beams, np.ndarray))
and nbeams < len(radar_beams)):
beam = radar_beams[nbeams]
nbeams += 1
elif isinstance(radar_beams, sdio.radDataTypes.radDataPtr):
beam = radar_beams.readRec()
nbeams += 1
else:
beam = None
return beam, nbeams
#----------------------------------------------------------------------------
def calc_distance(beam, rg_attr="slist", dist_units="km", hop=.5):
"""A routine to calculate distance in either meters or kilometers along the
slant path from the radar to the first ionospheric reflection/refraction
point using the range gate and a propagation path specified by the hop
number. Currently only simple propagation paths (same ionospheric region)
are allowed.
Hop examples
-------------
0.5 hop : For ionospheric backscatter located in the observed range gate.
The distance is the distance to the observed range gate.
1.0 hop : For ground backscatter; the distance is half the distance to the
observed range gate
1.5 hop : For ionospheric backscatter; the distance is one thrid the of the
distance to the observed range gate
Parameters
-----------
beam : (class `pydarn.sdio.radDataTypes.beamData`)
Data for a single radar and beam along all range gates at a given time
rg_attr : (str)
Beam attribute for range gate (default="slist")
dist_units : (str)
Units of the distance to backscatter location data. May supply "km" or
"m". (default="km")
hop : (float)
Specifies the hop location of the range gate. Nominally, range gates
are located at 0.5 hop (assumes ionospheric scatter). (default=0.5)
Returns
--------
dist : (np.array or NoneType)
A list of floats of the same size as the myBeam.fit.slist list,
containing the distance along the slant path from the radar to the first
ionospheric reflection/refraction point given the specified propagation
path for for each range gate. Returns None upon input error.
"""
import davitpy.pydarn.sdio as sdio
#---------------------------------
# Check the input
estr = None
if not isinstance(beam, sdio.radDataTypes.beamData):
estr = 'the beam must be a beamData class'
elif not isinstance(rg_attr, str) or not hasattr(beam.fit, rg_attr):
estr = 'no range gate attribute [{:}]'.format(rg_attr)
elif dist_units is not "km" and dist_units is not "m":
estr = 'unknown units for distance [{:}]'.format(dist_units)
elif not isinstance(hop, float) and hop > 0.0 and hop % 0.5 == 0.0:
estr = 'unknown hop number [{:}]'.format(hop)
else:
# Load the range gate data
try:
rg = getattr(beam.fit, rg_attr)
if not isinstance(rg, list) or len(rg) == 0:
estr = 'unable to load range gate'
except:
estr = 'unable to load range gate'
#---------------------------------------------------------
# Convert from range gates to distance or exit with error
if estr is None:
# Determine the number of reflection/refraction points along the
# propagation path
bounces = 2.0 * hop
# Determine the unit conversion
units = 1000.0 if dist_units is "m" else 1.0
# Calculate the distance
dist = 5.0e-10 * scicon.c * units * (np.array(rg) * beam.prm.smsep
+ beam.prm.lagfr) / bounces
else:
logging.error(estr)
dist = None
return dist
#---------------------------------------------------------------------------
def select_beam_groundscatter(beam, dist, min_rg=10, max_rg=76, rg_box=5,
max_p=5.0, max_v=30.0, max_w=90.0, gs_tol=.5,
nmin=5):
"""A routine to select groundscatter data. Currently uses a range gate
limit where all data beyond the maximum range gate is rejected, all
data with 0.5 hop distances closer than 160 km are rejected, and all points
closer than the minimum range gate that have a power greater than the
specified power maximum are also rejected. One these requirements have
been met, the data must have positive power, and have the groundscatter
flag set.
Parameters
------------
beam : (class beamData)
An object with radar data for a certain beam, channel, and radar
dist : (list or np.array)
List of slant path (radar to reflection point) distances in km
min_rg : (int)
Minimum range gate to look for groundscatter with any power level
(default=10)
max_rg : (int)
Maximum range gate to look for groundscatter with any power level
(default=76)
rg_box : (int)
Number of range gates to search above and below the range gate
specified by rg_index. (default=10)
max_p : (float)
Maximum power to allow at range gates closer than the minimum range
gate (default=5 dB)
max_v : (float)
Maximum velocity to allow at range gates closer than the minimum range
gate (default=30 m/s)
max_w : (float)
Maximum spectral width to allow at range gates closer than the minimum
rangegate (default=90 m/s)
gs_tol : (float)
Minimum fraction of points within a range gate box that should be
groundscatter if this point is to actually be considered groundscatter.
(default=0.5)
nmin : (int)
Minimum number of points that must bepresent within a range gate box to
consider the backscatter anything other than noise. (default=3)
Returns
------------
gnd_index : (list)
List of indices corresponding to selected groundscatter data in the
input beam (eg slist, p_l, etc.)
If there is an input error, exits with an exception
"""
import davitpy.pydarn.sdio as sdio
#---------------------
# Check input
assert isinstance(beam, sdio.radDataTypes.beamData), \
logging.error("beam is not a beamData object")
assert((isinstance(dist, list) or isinstance(dist, np.ndarray))
and len(dist) == len(beam.fit.slist)), \
logging.error("distance list does not match this beam")
if isinstance(min_rg, float):
min_rg = int(min_rg)
assert isinstance(min_rg, int), \
logging.error("min_rg is not an integer")
if isinstance(max_rg, float):
max_rg = int(max_rg)
assert isinstance(max_rg, int), \
logging.error("max_rg is not an integer")
if isinstance(rg_box, float):
rg_box = int(rg_box)
assert(isinstance(rg_box, int) and rg_box > 0), \
logging.error("rg_box is not a positive integer")
if isinstance(max_p, int):
max_p = float(max_p)
assert isinstance(max_p, float), \
logging.error("maximum power is not a float")
if isinstance(max_v, int):
max_v = float(max_v)
assert isinstance(max_v, float), \
logging.error("maximum velocity is not a float")
if isinstance(max_w, int):
max_w = float(max_w)
assert isinstance(max_w, float), \
logging.error("maximum spectral width is not a float")
assert(isinstance(gs_tol, float) and gs_tol >= 0.0 and gs_tol <= 1.0), \
logging.error("gs_tol is not a positive fraction")
if isinstance(nmin, float):
nmin = int(nmin)
assert(isinstance(nmin, int) and nmin > 0), \
logging.error("rg_box is not a positive integer")
#--------------------------------------------------------------------
# Identify all instances that are flagged as ground scatter and have
# appropriate power fits based on their location
def isgroundscatter(rg, dist, p_l, p_s, sd_gflg):
"""A routine to apply the logic that states whether or not a point is
groundscatter or not, rejecting groundscatter points that are
ambiguous
Parameters
-----------
rg : (int)
Range gate
dist : (float)
Slant path distance to from radar to reflection point (km)
p_l : (float)
Power determined using exponential fit (dB)
p_s : (float)
Power determined using Gaussian fit (dB)
sd_gflg : (int)
SuperDARN groundscatter flag
Returns
---------
gflg : (boolean)
New groundscatter flag
"""
gflg = False
# To be groundscatter, the point must have been identified by the
# SuperDARN routine (which uses velocity and spectral width to flag
# all points that are most likely not ionospheric scatter) and have
# successful exponential and Gaussain power fits. The distance
# must also be greater than 78 km from the radar, since this is the
# smallest imaginable distance that groundscatter could possibly occur
# at (yeilds a virtual height of 110 km for an elevation angle of 45
# deg)
if sd_gflg == 1 and p_l >= 0.0 and p_s >= 0.0 and dist > 78.0:
# Test the nearby range gates to ensure the power is not too high.
# This will remove slow moving ionospheric scatter
if rg < min_rg:
if p_l <= max_p and p_s <= max_p:
gflg = True
else:
gflg = True
return gflg
# END isgroundscatter
gi = [i for i,s in enumerate(beam.fit.slist)
if(s <= max_rg and isgroundscatter(s, dist[i], beam.fit.p_l[i],
beam.fit.p_s[i],
beam.fit.gflg[i]))]
#--------------------------------------------------------------------------
# Ensure that the flagged groundscatter is not mislabeled by testing to see
# if it is an isolated point surrounded by ionospheric scatter or not.
gnd_index = list()
for i in gi:
gs_frac, npnts = calc_frac_points(beam, "slist", gi, i, box=rg_box,
dat_min=0, dat_max=beam.prm.nrang)
if gs_frac >= gs_tol and npnts >= nmin:
gnd_index.append(i)
return(gnd_index)
#----------------------------------------------------------------------
def calc_frac_points(beam, dat_attr, dat_index, central_index, box,
dat_min=None, dat_max=None):
"""Calculate the fraction of points within a certain distance about a
specified range gate are groundscatter.
Parameters
------------
beam : (class beamData)
An object with radar data for a certain beam, channel, and radar
dat_attr : (str)
Attribute of data type
dat_index : (list of int)
A list containing the indexes of acceptable data points within the
specified beam.fit attribute list
central_index : (int)
The index of the desired data point to search about.
box : (float or int)
Size of to data box to search above and below the central data value
specified by the central_index. This must be in units of the specified
data.
dat_min : (float or int)
Lowest possible value of the data (eg 0 for range gates). (default=None)
dat_max : (float or int)
Highest possible value of the data (eg 75 for range gates at han).
(default=None)
Returns
----------
frac : (float)
A number between 0.0 and 1.0, indicating the fraction of points in the
specified area that are acceptable according to the dat_index list.
npnts : (int)
Total number of observations in the specified box.
If there is an input error, exits with an exception
"""
import davitpy.pydarn.sdio as sdio
#----------------
# Check input
assert isinstance(beam, sdio.radDataTypes.beamData), \
logging.error("beam is not a beamData object")
assert isinstance(dat_attr, str) and hasattr(beam.fit, dat_attr), \
logging.error("beam does not contain attribute {:}".format(dat_attr))
assert isinstance(dat_index, list) and isinstance(dat_index[0], int), \
logging.error("dat_index is not a list of integers")
assert box > 0, logging.error("box is not positive")
assert isinstance(dat_min, type(box)) or dat_min is None, \
logging.error("dat_min is of a different type is suspect")
assert isinstance(dat_max, type(box)) or dat_max is None, \
logging.error("dat_max is of a different type is suspect")
# Get the data list and ensure there is a value to search about
data = getattr(beam.fit, dat_attr)
assert isinstance(central_index, int) and central_index < len(data), \
logging.error("no value for central_index in {:s}".format(dat_attr))
#-------------------------------------------------------------------------
# Set evaluation variables, restraining range gate box to realistic values
dmin = data[central_index] - box
dmax = data[central_index] + box
if dat_min is not None and dmin < dat_min:
dmin = dat_min
if dat_max is not None and dmax > dat_max:
dinc = 1 if isinstance(dat_max, int) else 1.0
dmax = dat_max + dinc
#---------------------
# Initialize output
frac = 0.0
npnts = 0
#-----------------------------------------------------------------------
# Cycle through the range gates, updating the total number of points and
# total number of groundscatter ponts
for i,d in enumerate(data):
if d >= dmin and d < dmax:
npnts += 1
try:
dat_index.index(i)
frac += 1.0
except Exception:
pass
if npnts > 0 and frac > 0.0:
frac /= float(npnts)
return(frac, npnts)
#---------------------------------------------------------------------------
def update_bs_w_scan(scan, hard, min_pnts=3,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0},
rg_box=[2,5,10,20], rg_max=[5,25,40,76],
vh_box=[50.0,50.0,50.0,150.0], max_hop=3.0, tdiff=None,
tdiff_args=list(), tdiff_e=None, tdiff_e_args=list(),
ptest=True, strict_gs=False, bmaz_e=0.0, boresite_e=0.0,
ix_e=0.0, iy_e=0.0, iz_e=0.0, step=6):
"""Updates the propagation path, elevation, backscatter type, structure
flag, and origin field-of-view (FoV) for all backscatter observations in
each beam for a scan of data. A full scan is not necessary, but if the
number of beams is less than the specified minimum, a less rigerous
evaluation method is used.
Parameters
-------------
scan : (list or np.array)
A list of beamData class objects, representing a scan across the
radar's field-of-view (as performed in most common operational modes).
hard : (class `pydarn.radar.radStruct.site`)
Radar hardware data for this scan
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":900.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
rg_box : (list or np.array of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list or np.array of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_hop : (list or np.array of float)
The maximum hop to consider for the range gate and height criteria
specified by each list element in rg_box, srg_box, vh_box, and svh_box.
(default=[3.0])
tdiff : (function or NoneType)
A function to retrieve tdiff values (in microsec) using the radar ID
number, current datetime, and transmisson frequency as input.
Additional inputs may be specified using tdiff_args. Example:
def get_tdiff(stid, time, tfreq, filename) { do things } return tdiff
tdiff=get_tdiff, tdiff_args=["tdiff_file"]
(default=None)
tdiff_args : (list)
A list specifying any arguements other than radar, time, and
transmission frequency to run the specified tdiff function.
(default=list())
tdiff_e : (function or NoneType)
A function to retrieve tdiff error values (in microsec) using the radar
ID number, current datetime, and transmisson frequency as input.
Additionalinputs may be specified using tdiff_e_args. Example:
def get_tdiffe(stid, time, tfreq, filename) { do things } return tdiffe
tdiff_e=get_tdiffe, tdiff_e_args=["tdiff_file"]
(default=None)
tdiff_e_args : (list)
A list specifying any arguements other than radar, time, and
transmission frequency to run the specified tdiff_e function.
(default=list())
ptest : (boolian)
Perform test to see if propagation modes are realistic? (default=True)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=False)
bmaz_e : (float)
Error in beam azimuth in degrees (default=0.0)
boresite_e : (float)
Error in the boresite location in degrees (default=0.0)
ix_e : (float)
Error in the interferometer x coordinate in meters (default=0.0)
iy_e : (float)
Error in the interferometer y coordinate in meters (default=0.0)
iz_e : (float)
Error in the interferometer z coordinate in meters (default=0.0)
step : (int)
Integer denoting the number of processing steps to perform. This should
always be set to 6 (or greater) unless one wishes to reproduce the
demonstration plots in Burrell et al (2015). (default=6) The step
numbers coincide with those indicated in the paper:
1 or 2: Examine the elevation structure across each scan
3: Add assignments for points with realistic heights in only one FoV
4: Add assignments using single-beam elevation angle variations
5 or more: Test assignements for consistency along the scan.
Returns
---------
beams : (np.array)
An array of updated beamData class objects. These updated objects have
the following additional/updated attributes
beam.fit.fovelv : added : Accounts for adjusted tdiff and origin FoV
beam.fit.fovelv_e : added : elevation error
beam.fit.felv : added : Elevation angle assuming front FoV
beam.fit.felv_e : added : Elevation angle error assuming front FoV
beam.fit.belv : added : Elevation angle assuming rear FoV
beam.fit.belv_e : added : Elevation angle error assuming front FoV
beam.fit.vheight : added : virtual height of ionosphere in km
beam.fit.vheight_e : added : error in virtual height (km)
beam.fit.fvheight : added : virtual height assuming front FoV
beam.fit.fvheight_e : added : error in virtual height assuming front FoV
beam.fit.bvheight : added : virtual height assuming rear FoV
beam.fit.bvheight_e : added : error in virtual height assuming rear FoV
beam.fit.hop : added : Hop assuming the assigned origin FoV
beam.fit.fhop : added : Hop assuming the front FoV
beam.fit.bhop : added : Hop assuming the rear FoV
beam.fit.region : added : Region assuming the assigned origin FoV
beam.fit.fregion : added : Region assuming the front FoV
beam.fit.bregion : added : Region assuming the rear FoV
beam.fit.fovflg : added : Flag indicating origin FoV (1=front, -1=back,
0=indeterminate)
beam.fit.fovpast : added : Flag indicating past FoV assignments
beam.fit.gflg : updated : Flag indicating backscatter type
(1=ground, 0=ionospheric, -1=indeterminate)
beam.prm.tdiff : added : tdiff used in elevation (microsec)
beam.prm.tdiff_e : added : tdiff error (microsec)
"""
import davitpy.pydarn.sdio as sdio
import davitpy.pydarn.radar as pyrad
max_std = 3.0 # This is the maximum standard deviation in degrees.
max_score = 3.0 # This is the maximum z-score. z = (x - mean(X)) / std(X)
fov_frac = 2.0 / 3.0
fov = {1:"front", -1:"back"}
near_rg = -1
#----------------------------------
# Test input
if(not ((isinstance(scan, list) or isinstance(scan, np.ndarray)) and
len(scan) > 0 and len(scan) <= hard.maxbeam and
isinstance(scan[0], sdio.radDataTypes.beamData))
and not isinstance(scan, sdio.radDataTypes.radDataPtr)):
estr = 'need a list of beams or a radar data pointer with [1-'
estr = '{:s}{:d}] beams: length={:d}'.format(estr, hard.maxbeam,
len(scan))
logging.error(estr)
return None
if isinstance(min_pnts, float):
min_pnts = int(min_pnts)
if not isinstance(min_pnts, int) or min_pnts < 0:
logging.error('unknown point minimum [{:}]'.format(min_pnts))
return None
if not isinstance(region_hmin, dict) or min(region_hmin.values()) < 0.0:
estr = 'unknown minimum virtual heights [{:}]'.format(region_hmin)
logging.error(estr)
return None
if not isinstance(region_hmax, dict):
estr = 'unknown maximum virtual heights [{:}]'.format(region_hmax)
logging.error(estr)
return None
if((not isinstance(rg_box, list) and not isinstance(rg_box, np.ndarray))
or min(rg_box) < 1.0):
logging.error('bad FoV range gate box[{:}]'.format(rg_box))
return None
if((not isinstance(vh_box, list) and not isinstance(vh_box, np.ndarray))
or min(vh_box) < 0.0):
logging.error('bad FoV virtual height box [{:}]'.format(vh_box))
return None
#-------------------------------------------------------------------------
# Loading the beams into the output list, updating the distance,
# groundscatter flag, virtual height, and propogation path
beams = np.empty(shape=(hard.maxbeam,), dtype='O')
elvs = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
elv_errs = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
vheights = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
vherrs = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
hops = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
regions = {"front":[list() for bi in range(hard.maxbeam)],
"back":[list() for bi in range(hard.maxbeam)]}
bnum = 0
snum = 0
while scan is not None:
# Load beams from scan, accounting for different input types
if isinstance(scan, list) or isinstance(scan, np.ndarray):
if snum < len(scan):
beams[bnum] = scan[snum]
snum += 1
else:
scan = None
else:
try:
beams[bnum] = scan.readRec()
except:
estr = "{:s} INFO: empty data pointer".format(rn)
logging.info(estr)
scan = None
bnum += 1
# If a new beam was loaded, update the beam
if bnum > len(beams):
bnum = len(beams)
elif beams[bnum-1] is None:
bnum -= 1
else:
# Update the beam parameters
if tdiff is None:
beams[bnum-1].prm.tdiff = None
else:
args = [beams[bnum-1].stid, beams[bnum-1].time,
beams[bnum-1].prm.tfreq]
args.extend(tdiff_args)
beams[bnum-1].prm.tdiff = tdiff(*args)
if tdiff_e is None:
beams[bnum-1].prm.tdiff_e = None
else:
args = [beams[bnum-1].stid, beams[bnum-1].time,
beams[bnum-1].prm.tfreq]
args.extend(tdiff_e_args)
beams[bnum-1].prm.tdiff_e = tdiff_e(*args)
# Update the beam fit values
(beams[bnum-1], e, eerr, vh, verr, hh, rr,
nhard) = update_beam_fit(beams[bnum-1], hard=hard,
region_hmax=region_hmax,
region_hmin=region_hmin, max_hop=max_hop,
ptest=ptest, strict_gs=strict_gs,
bmaz_e=bmaz_e, boresite_e=boresite_e,
ix_e=ix_e, iy_e=iy_e, iz_e=iz_e)
if e is None or nhard is None:
beams[bnum-1] = None
bnum -= 1
else:
if near_rg < 0:
near_rg = ((500.0 / (5.0e-10 * scicon.c) -
beams[bnum-1].prm.lagfr)
/ beams[bnum-1].prm.smsep)
for ff in e.keys():
elvs[ff][bnum-1] = e[ff]
elv_errs[ff][bnum-1] = eerr[ff]
vheights[ff][bnum-1] = vh[ff]
vherrs[ff][bnum-1] = verr[ff]
hops[ff][bnum-1] = hh[ff]
regions[ff][bnum-1] = rr[ff]
if bnum == 0:
logging.error("unable to update any beams in this scan")
return None
if bnum < len(beams):
beams.resize(bnum)
#-------------------------------------------------------------------------
# To determine the FoV, evaluate the elevation variations across all beams
# for a range gate and virtual height band, considering each propagation
# path (region and hop) seperately.
min_inc = 0.5 * min(rg_box)
min_rg = int(min_inc)
max_rg = hard.maxgate if hard.maxgate < max(rg_max) else max(rg_max)
max_rg = int(np.ceil(max_rg - min_inc))
fovbelong = [[{"out":0, "in":0, "mix":0} for r in beams[bi].fit.slist]
for bi in range(bnum)]
fovpast = [[0 for r in beams[bi].fit.slist] for bi in range(bnum)]
fovflg = [[0 for r in beams[bi].fit.slist] for bi in range(bnum)]
fovstd = [[100.0 + max_std for r in beams[bi].fit.slist]
for bi in range(bnum)]
fovslope = [[0.01 for r in beams[bi].fit.slist] for bi in range(bnum)]
fovscore = [[max_score + 100.0 for r in beams[bi].fit.slist]
for bi in range(bnum)]
for r in np.arange(min_rg, max_rg + 1):
rgnum = 0
rgelv = {"front":list(), "back":list()}
rgvh = {"front":list(), "back":list()}
rghop = {"front":list(), "back":list()}
rgreg = {"front":list(), "back":list()}
rgbi = list()
rgsi = list()
rgrg = list()
ilim = 0
while ilim < len(rg_max) and r >= rg_max[ilim]:
ilim += 1
if ilim >= len(rg_max):
estr = "range gate [{:d}] is above the allowed maximum [".format(r)
logging.info("{:s}{:d}]".format(estr, rg_max[-1]))
continue
width = np.floor(0.5 * rg_box[ilim])
rmin = r - int(width)
rmin = rmin if rmin >= 0 else 0
rmax = int(r + int(width) + (rg_box[ilim] % 2.0))
rmax = (rmax if rmax <= hard.maxgate else
(hard.maxgate if hard.maxgate < max(rg_max) else max(rg_max)))
# For each beam, load the data for this range gate window
for bi in range(bnum):
b = beams[bi]
for ir in np.arange(rmin, rmax):
try:
si = b.fit.slist.index(ir)
except:
si = -1
# Only load data if an elevation has been calculated for
# at least one field-of-view
if si >= 0 and (not np.isnan(elvs["front"][bi][si]) or
not np.isnan(elvs["back"][bi][si])):
# Save the data for determining FoV if this value falls
# within the desired range
if ir >= rmin and ir < rmax:
rgbi.append(bi)
rgsi.append(si)
rgrg.append(ir)
goodpath = False
for ff in fov.values():
rgelv[ff].append(elvs[ff][bi][si])
rgvh[ff].append(vheights[ff][bi][si])
rghop[ff].append(hops[ff][bi][si])
rgreg[ff].append(regions[ff][bi][si])
if(not np.isnan(hops[ff][bi][si]) and
len(regions[ff][bi][si]) == 1):
goodpath = True
if goodpath:
rgnum += 1
if rgnum < min_pnts:
continue
rgbi = np.array(rgbi)
rgsi = np.array(rgsi)
rgrg = np.array(rgrg)
rgpath = set(["{:.1f}{:s}".format(rghop[ff][ii], reg)
for ii,reg in enumerate(rgreg[ff])
if len(reg) == 1 and not np.isnan(rghop[ff][ii])
for ff in fov.values()])
for ff in fov.values():
rgelv[ff] = np.array(rgelv[ff])
rgvh[ff] = np.array(rgvh[ff])
# Determine the standard deviation of the elevation for the observations
# at each virtual height at this range gate window and hop.
for pp in rgpath:
hop = float(pp[0:3])
reg = pp[3:4]
# Seperate this propagation path into virtual height groups and
# test the linear regression of the elevation angles
for ff in fov.keys():
itest = [it for it,fhop in enumerate(rghop[fov[ff]])
if fhop == hop and rgreg[fov[ff]][it] == reg]
if len(itest) < min_pnts:
estr = "insufficient points to determine virtual height "
estr = "{:s}limits in the {:s} field-".format(estr, fov[ff])
estr = "{:s}of-view for propagation path [".format(estr)
estr = "{:s}{:s}] at range gate [{:d}]".format(estr, pp, r)
logging.info(estr)
else:
# Establish the virtual height windows
vmins, vmaxs = select_alt_groups(rgrg[itest],
rgvh[fov[ff]][itest],
region_hmin[reg],
region_hmax[reg],
vh_box[ilim], min_pnts)
for iv,vmin in enumerate(vmins):
# Select the data for this height range
velv = list()
vbm = list()
vrg = list()
vih = list()
for ih,vh in enumerate(rgvh[fov[ff]][itest]):
if(not np.isnan(vh) and vh >= vmin and
vh < vmaxs[iv]):
velv.append(rgelv[fov[ff]][itest][ih])
vbm.append(rgbi[itest][ih])
vrg.append(rgrg[itest][ih])
vih.append(ih)
# See if there are enough beams at this height
if len(list(set(vbm))) < min_pnts:
estr = "insufficient beams to evaluate "
estr = "{:s}{:s} field-of-".format(estr, fov[ff])
estr = "{:s}view between [{:.0f}".format(estr, vmin)
estr = "{:s}-{:.0f} km] at ".format(estr, vmaxs[iv])
estr = "{:s}range gate {:d}".format(estr, r)
logging.info(estr)
else:
# Initialize evaluation statistics to bad values
line_std = max_std + 100.0
line_dev = [max_std + 100.0 for ee in velv]
# Get the linear regression of the elevation
# angles as a function of range gate. The slope
# of this line must be flat or negative.
# Aliasing will cause positive jumps, but these
# should not be present in all boxes, allowing
# data to be assigned at times when the aliasing
# jump is not present. A more robust method
# (such as RANSAC or Theil-Sen) was not used
# since the number of points available are small
try:
ecoeff = stats.linregress(vrg, velv)
except:
# If there are not enough points to
# perform a linear regression, assume a flat
# slope with an intercept given by the mean
ecoeff = [0.0, np.nanmean(velv)]
if not np.isnan(ecoeff[0]) and ecoeff[0] <= 0.0:
lval = np.array([ecoeff[1] + ecoeff[0]
* rr for rr in vrg])
ldev = lval - np.array(velv)
lstd = np.nanstd(ldev)
lscore = [abs(ss) for ss in stats.zscore(ldev)]
# Use the current and past z-scores to
# determine whether or not each point is
# well characterized by the linear
# regression
if lstd <= max_std:
for ih,bi in enumerate(vbm):
si = rgsi[itest][vih[ih]]
if(lscore[ih] <= max_score and
lstd <= max_std and
lscore[ih] < fovscore[bi][si]
and lstd <= fovstd[bi][si]):
# If the FoV is changing, record
# that this point also met the
# criteria for the other Fov
if fovflg[bi][si] != ff:
fovpast[bi][si] = fovflg[bi][si]
# Replace if the FoV criteria are
# better, regardless of the FoV
fovflg[bi][si] = ff
fovstd[bi][si] = lstd
fovslope[bi][si] = ecoeff[0]
fovscore[bi][si] = lscore[ih]
#--------------------------------------------------------------------------
# Assign FoV to points that have realistic elevation angles in only one
# FoV. Also evaluate points that don't have FoV flags due to insufficient
# data across the range gates. Evaluate elevation spread using a (possibly)
# expanded range gate window
inc_rg_box = 3.0
for bi in range(bnum):
if step < 3:
estr = "not testing backscatter unassigned after performing scan"
logging.info("{:s}evaluation".format(estr))
break
lelv = {"front":np.array(elvs["front"][bi]),
"back":np.array(elvs["back"][bi])}
lvh = {"front":np.array(vheights["front"][bi]),
"back":np.array(vheights["back"][bi])}
for si,ifov in enumerate(fovflg[bi]):
if np.isnan(lelv['front'][si]) and np.isnan(lelv['back'][si]):
continue
if ifov == 0:
rg = beams[bi].fit.slist[si]
# If this point is unassigned, there is only one realistic
# elevation, and aliasing is unlikely, assign the FoV with the
# realistic elevation
if(np.isnan(lelv['front'][si])
and not np.isnan(lelv['back'][si]) and rg < near_rg):
fovflg[bi][si] = -1
fovstd[bi][si] = 0.0
fovslope[bi][si] = 0.0
fovscore[bi][si] = 0.0
elif(not np.isnan(lelv['front'][si])
and np.isnan(lelv['back'][si]) and rg < near_rg):
fovflg[bi][si] = 1
fovstd[bi][si] = 0.0
fovslope[bi][si] = 0.0
fovscore[bi][si] = 0.0
else:
if step < 4:
estr = "not assigning backscatter by testing the single"
logging.info("{:s} beam variations".format(estr))
continue
# Examine the surrounding observations along the beam using
# an extended range gate window
#
# Differentiate by hop
ilim = 0
while(ilim < len(rg_max) and rg >= rg_max[ilim]):
ilim += 1
if ilim >= len(rg_max):
estr = "no guidelines provided for range gate "
logging.info("{:s}[{:d}]".format(estr, rg))
continue
rg_half = (0.5 * (rg_box[ilim] + inc_rg_box))
irg_half = int(np.floor(rg_half))
min_si = si - irg_half if si >= irg_half else 0
max_si = (si + irg_half if si + irg_half < hard.maxgate
else (hard.maxgate - 1
if hard.maxgate < max(rg_max)
else max(rg_max) - 1))
# Load the front and back elevations for this range gate
# and within the extended range gate window
for ff in fov.keys():
ihop = hops[fov[ff]][bi][si]
ireg = regions[fov[ff]][bi][si]
test_rg = beams[bi].fit.slist[min_si:max_si]
test_si = list()
ecoeff = list()
lstd = max_std + 100.0
lscore = max_score + 100.0
if not np.isnan(ihop) and len(ireg) == 1:
for ri,r in enumerate(test_rg):
rsi = min_si + ri
if(hops[fov[ff]][bi][rsi] == ihop and
regions[fov[ff]][bi][rsi] == ireg and
abs(rg - beams[bi].fit.slist[rsi])
<= rg_half):
test_si.append(rsi)
if len(test_si) < min_pnts:
# If there are not enough points to perform a
# comparison continue without assigning a FoV flag
if not np.isnan(ihop) and len(ireg) == 1:
estr = "not enough points to do single-beam "
estr = "{:s}test for the ".format(estr)
estr = "{:s}{:s} field-of".format(estr, fov[ff])
estr = "{:s}-view for hop [".format(estr)
estr = "{:s}{:.1f}{:s}".format(estr, ihop, ireg)
estr = "{:s}] beam [{:d}] ".format(estr, bi)
estr = "{:s}range gate [{:d}]".format(estr, rg)
logging.info(estr)
else:
test_rg = np.array(beams[bi].fit.slist)[test_si]
ri = test_si.index(si)
try:
ecoeff = stats.linregress(test_rg, \
lelv[fov[ff]][test_si])
except:
ecoeff = [0.0,
np.nanmean(lelv[fov[ff]][test_si])]
if ecoeff[0] <= 0.0:
lval = np.array([ecoeff[1] + ecoeff[0] * rr
for rr in test_rg])
ldev = lval - np.array(lelv[fov[ff]][test_si])
lstd = np.nanstd(ldev)
lscore = [abs(ss) for ss in stats.zscore(ldev)]
# Evaluate the standard deviations and the FoV
# of the surrounding points to determine the
# FoV for this point
if lstd <= max_std:
for ih,ti in enumerate(test_si):
if(lscore[ih] <= max_score and
lstd <= max_std and
lscore[ih] < fovscore[bi][si]
and lstd <= fovstd[bi][si]):
# If the FoV is changing, record
# that this point also met the
# criteria for the other Fov
if fovflg[bi][si] != ff:
fovpast[bi][si] = fovflg[bi][si]
# Replace if this new FoV
# criteria are better, regardless
# of whether or not the FoV changes
fovflg[bi][si] = ff
fovstd[bi][si] = lstd
fovslope[bi][si] = ecoeff[0]
fovscore[bi][si] = lscore[ih]
#--------------------------------------------------------------------------
# Evaluate the FoV flags, removing points that are surrounded by data
# assigned to the opposite FoV.
for r in np.arange(min_rg, max_rg + 1):
if step < 5:
estr = "not testing backscatter assignments with azimuthal "
logging.info("{:s}continuity".format(estr))
break
# Initialize the hop-dependent data
sihop = {ih:list() for ih in np.arange(0.5, max_hop + 0.5, 0.5)}
bihop = {ih:list() for ih in np.arange(0.5, max_hop + 0.5, 0.5)}
fovhop = {ih:list() for ih in np.arange(0.5, max_hop + 0.5, 0.5)}
reghop = {ih:list() for ih in np.arange(0.5, max_hop + 0.5, 0.5)}
min_range = hard.maxgate
max_range = 0
ilim = 0
# Calculate the range gate limits
while ilim < len(rg_max) and r >= rg_max[ilim]:
ilim += 1
width = np.floor(0.5 * (rg_box[ilim] + inc_rg_box))
rm = r - int(width)
rmin = rm if rm >= 0 else 0
if rmin < min_range:
min_range = rmin
rm = r + int(width + rg_box[ilim] % 2.0)
rmax = rm if rm <= hard.maxgate else hard.maxgate + 1
if rmax > max_range:
max_range = rmax
# For each beam in the maximum possible range gate window, gather the
# range gate, FoV flag, beam index, and range gate index for each hop
for bi in range(bnum):
b = beams[bi]
for ir in np.arange(min_range, max_range):
try:
si = b.fit.slist.index(ir)
except:
si = -1
# Save the data if a FoV flag has been found and the range
# gate limits are appropriate for the hop
if si >= 0 and fovflg[bi][si] != 0:
ihop = hops[fov[fovflg[bi][si]]][bi][si]
ireg = regions[fov[fovflg[bi][si]]][bi][si]
if(len(ireg) == 1 and not np.isnan(ihop) and ihop <= max_hop
and ir >= rmin and ir < rmax):
bihop[ihop].append(bi)
sihop[ihop].append(si)
fovhop[ihop].append(fovflg[bi][si])
reghop[ihop].append(ireg)
# Determine the fraction of each points in the front and back Fov for
# azimuthally constraints (beam limits) added to the previous limits.
# If there are an overwhelming number of points in one FoV, remove
# all FoV flags from the points in the other Fov.
for ihop in fovhop.keys():
for ireg in set(reghop[ihop]):
rind = [ii for ii,rr in enumerate(reghop[ihop]) if rr == ireg]
# If there are sufficient points, evaluate the data at this hop
if len(rind) > min_pnts:
# Evaluate the data in an azimuthal box
for bi in set(np.array(bihop[ihop])[rind]):
# Determine the azimuthal limits
bmnum = beams[bi].bmnum
bwidth = int(min_pnts * 0.75)
bmin = bmnum - bwidth if bmnum >= min_pnts else 0
if bmnum <= hard.maxbeam - bwidth:
bmax = bmnum + bwidth
else:
bmax = hard.maxbeam
ibeam = [ii for ii in rind
if(beams[bihop[ihop][ii]].bmnum >= bmin and
beams[bihop[ihop][ii]].bmnum < bmax)]
bad_fov = 0
good_fov = False
if len(ibeam) > min_pnts:
# Sum the points in this box
fn = sum([1 for ff in np.array(fovhop[ihop])[ibeam]
if ff == 1])
bn = sum([1 for ff in np.array(fovhop[ihop])[ibeam]
if ff == -1])
else:
fn = 0
bn = 0
if fn + bn > 0:
ffrac = float(fn) / float(fn + bn)
if ffrac >= fov_frac and bn > 0:
bad_fov = -1
good_fov = True
elif (1.0 - ffrac) >= fov_frac and fn > 0:
bad_fov = 1
good_fov = True
# Tag all points whose FoV are or are not consistent
# with the observed structure at this hop
for ff,ifov in enumerate(np.array(fovhop[ihop])[ibeam]):
ii = ibeam[ff]
si = sihop[ihop][ii]
ci = bihop[ihop][ii]
if good_fov:
if ifov != bad_fov:
# This point is associated with a structure
# that is predominantly the same FoV
fovbelong[ci][si]["in"] += 1
else:
# If this point is not associated with a
# structure that is predominately the same
# FoV and this is not the only FoV capable
# of producing a realistic elevation angle,
# flag this point as an outlier
ir = beams[ci].fit.slist[si]
if(not (np.isnan(elvs[fov[-ifov]][ci][si])
and ir < near_rg)):
fovbelong[ci][si]["out"] += 1
else:
fovbelong[ci][si]["mix"] += 1
# If any points have been flagged as outliers, remove or change their FoV
for bi in range(bnum):
# Break this loop if no continuity tests are desired
if step < 5:
break
for si,bdict in enumerate(fovbelong[bi]):
if bdict["out"] > 0 and bdict["in"] < bdict["out"] + bdict["mix"]:
# This point is an outlier in a structure with the opposite FoV.
# If this point fit the criteria for the other FoV in the past,
# assign that FoV. Otherwise remove any FoV assignment.
if bdict['out'] > bdict['mix'] and bdict['out'] > bdict['in']:
fovflg[bi][si] = fovpast[bi][si]
else:
fovflg[bi][si] = 0
fovpast[bi][si] = 0
estr = "field-of-view is not consistent with the observed "
estr = "{:s}structure at hop [{:.1f}".format(estr, ihop)
estr = "{:s}{:s}] beam [".format(estr, ireg)
estr = "{:s}{:d}] range gate [".format(estr, beams[bi].bmnum)
estr = "{:s}{:d}]".format(estr, beams[bi].fit.slist[si])
logging.info(estr)
#--------------------------------------------------------------------------
# Assign the appropriate virtual heights and elevation angles to each
# point based on their FoV. Also assign initial regions based on virtual
# height
for bi in range(bnum):
snum = len(beams[bi].fit.slist)
beams[bi].fit.region = ["" for si in range(snum)]
beams[bi].fit.hop = [np.nan for si in range(snum)]
beams[bi].fit.vheight = [np.nan for si in range(snum)]
beams[bi].fit.vheight_e = [np.nan for si in range(snum)]
beams[bi].fit.fovelv = [np.nan for si in range(snum)]
beams[bi].fit.fovelv_e = [np.nan for si in range(snum)]
beams[bi].fit.fovflg = fovflg[bi]
for si,ifov in enumerate(beams[bi].fit.fovflg):
if ifov == 0 or np.isnan(ifov):
# Default to front FoV if none was found
beams[bi].fit.fovelv[si] = elvs["front"][bi][si]
beams[bi].fit.vheight[si] = vheights["front"][bi][si]
beams[bi].fit.hop[si] = hops["front"][bi][si]
beams[bi].fit.region[si] = regions["front"][bi][si]
else:
# Assign the appropriate FoV
beams[bi].fit.region[si] = regions[fov[ifov]][bi][si]
beams[bi].fit.hop[si] = hops[fov[ifov]][bi][si]
beams[bi].fit.vheight[si] = vheights[fov[ifov]][bi][si]
beams[bi].fit.vheight_e[si] = vherrs[fov[ifov]][bi][si]
beams[bi].fit.fovelv_e[si] = elv_errs[fov[ifov]][bi][si]
beams[bi].fit.fovelv[si] = elvs[fov[ifov]][bi][si]
# Additional values returned for use in analysis and UT continuity test
beams[bi].fit.felv = elvs["front"][bi]
beams[bi].fit.felv_e = elv_errs["front"][bi]
beams[bi].fit.belv = elvs["back"][bi]
beams[bi].fit.belv_e = elv_errs["back"][bi]
beams[bi].fit.fvheight = vheights["front"][bi]
beams[bi].fit.fvheight_e = vherrs["front"][bi]
beams[bi].fit.bvheight = vheights["back"][bi]
beams[bi].fit.bvheight_e = vherrs["back"][bi]
beams[bi].fit.fhop = hops["front"][bi]
beams[bi].fit.bhop = hops["back"][bi]
beams[bi].fit.fregion = regions["front"][bi]
beams[bi].fit.bregion = regions["back"][bi]
beams[bi].fit.pastfov = fovpast[bi]
return beams
#-------------------------------------------------------------------------
def update_beam_fit(beam, hard=None,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0}, max_hop=3.0,
ptest=True, strict_gs=False, bmaz_e=0.0, boresite_e=0.0,
ix_e=0.0, iy_e=0.0, iz_e=0.0):
"""Update the beam.fit and beam.prm class, updating and adding attributes
needed for common data analysis.
Currently the earth radius error and slant distance error have no update
option through this routine and are identically zero.
Parameters
------------
beam : (class `sdio.radDataTypes.beamData`)
Radar data for a specific beam
hard : (class `pydarn.radar.radStruct.site` or NoneType)
Hardware information for this radar. Will load if not supplied.
(default=None)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":400.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
max_hop : (float)
The maximum allowable hop to be considered physical. (default=2.0)
ptest : (boolian)
Perform test to see if propagation modes are realistic? (default=True)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=False)
bmaz_e : (float)
Error in beam azimuth in degrees (default=0.0)
boresite_e : (float)
Error in the boresite location in degrees (default=0.0)
ix_e : (float)
Error in the interferometer x coordinate in meters (default=0.0)
iy_e : (float)
Error in the interferometer y coordinate in meters (default=0.0)
iz_e : (float)
Error in the interferometer z coordinate in meters (default=0.0)
Returns
---------
return beam, elvs, elv_errs, vheights, vherrs, hops, regions, hard
beam : (class beamData)
Updated beamDAta class object. The beam as the following additional
or adjusted attributes:
beam.fit.gflg : updated : Flag indicating backscatter type
(1=ground, 0=ionospheric, -1=indeterminate)
beam.prm.tdiff : possibly updated : tdiff used in elevation (microsec)
beam.prm.tdiff_e : possibly updated : tdiff error (microsec)
elvs : (dict)
Elevation angles for the front "front" and rear "back" FoV
elv_errs : (dict)
Elevation angle errors for the front "front" and rear "back" FoV.
There is currently no method for calculating these errors from the
fit data, so np.nan will be returned in all cases.
vheights : (dict)
Virtual heights for the front "front" and rear "back" FoV
vherrs : (dict)
Virtual height errors for the front "front" and rear "back" FoV.
There is currently no method for calculating these errors from the
fit data, so np.nan will be returned in all cases.
hops : (dict)
Hops for the front "front" and rear "back" FoV
regions : (dict)
Ionospheric regions for the front "front" and rear "back" FoV
hard : (class `pydarn.radar.radStruct.site`)
Radar hardware data for this scan
"""
import davitpy.pydarn.sdio as sdio
import davitpy.pydarn.radar as pyrad
import davitpy.utils.geoPack as geo
import davitpy.pydarn.proc.fov.calc_elevation as ce
import davitpy.pydarn.proc.fov.calc_height as ch
#----------------------------------
# Test input
if not isinstance(region_hmin, dict) or min(region_hmin.values()) < 0.0:
estr = 'unknown minimum virtual heights [{:}]'.format(region_hmin)
logging.error(estr)
return beam, None, None, None, None, None, None, None
if not isinstance(region_hmax, dict):
estr = 'unknown maximum virtual heights [{:}]'.format(region_hmax)
logging.error(estr)
return beam, None, None, None, None, None, None, None
if isinstance(max_hop, int):
max_hop = float(max_hop)
if not isinstance(max_hop, float) or max_hop < 0.5:
logging.error('maximum hop must be a float greater than 0.5')
return beam, None, None, None, None, None, None, None
if beam is None or beam.fit.slist is None or len(beam.fit.slist) <= 0:
logging.warning("no fit data in beam at {:}".format(beam.time))
return beam, None, None, None, None, None, None, None
#-----------------------------------
# Initialize FoV dependent values
slist = getattr(beam.fit, "slist")
elvs_aliased = {"front":[np.nan for s in slist],
"back":[np.nan for s in slist]}
elva_errs = {"front":[np.nan for s in slist],
"back":[np.nan for s in slist]}
elvs = {"front":[np.nan for s in slist], "back":[np.nan for s in slist]}
elv_errs = {"front":[np.nan for s in slist], "back":[np.nan for s in slist]}
vheights = {"front":[np.nan for s in slist], "back":[np.nan for s in slist]}
vheights_aliased = {"front":[np.nan for s in slist],
"back":[np.nan for s in slist]}
vheighta_errs = {"front":[np.nan for s in slist],
"back":[np.nan for s in slist]}
vherrs = {"front":[np.nan for s in slist], "back":[np.nan for s in slist]}
hops = {"front":[0.5 for s in slist], "back":[0.5 for s in slist]}
regions = {"front":["" for s in slist], "back":["" for s in slist]}
# Initialize local constants
vmin = min(region_hmin.values())
vmax = max(region_hmax.values())
#------------------------------------------------------------------------
# Load the radar hardware data and calculate hardware specific variables,
# if it hasn't been done already
if hard is None:
try:
hard = pyrad.site(radId=beam.stid, dt=beam.time)
except:
estr = "unable to load hardware data for radar "
estr = "{:s}{:d} at {:}".format(estr, beam.stid, beam.time)
logging.warning(estr)
return beam, elvs, elv_errs, vheights, vherrs, None, None, None
# Use the geodetic/geocentric conversion to get the terrestrial radius at
# the radar location (same in both coordinate systems)
(lat, lon, radius) = geo.geodToGeoc(hard.geolat, hard.geolon, False)
# Calculate the 0.5 hop distance and initialize the hop list
dlist = calc_distance(beam)
dist = {'front':np.array(dlist), "back":np.array(dlist)}
# Update the groundscatter flag (both distances are the same)
gflg = select_beam_groundscatter(beam, dist['front'], max_rg=hard.maxgate)
for i,g in enumerate(beam.fit.gflg):
if g == 1:
try:
gflg.index(i)
# If this is groundscatter, update the distance and the hop
hops['front'][i] = 1.0
hops['back'][i] = 1.0
dist['front'][i] *= 0.5
dist['back'][i] *= 0.5
except:
# This point was found not to be groundscatter. It is probably
# slow moving ionospheric backscatter, so treat it like
# ionospheric backscatter but change the flag to let the user
# know that it was not flagged by the initial ionospheric
# backscatter test
beam.fit.gflg[i] = -1
if strict_gs:
hops['front'][i] = np.nan
hops['back'][i] = np.nan
dist['front'][i] = np.nan
dist['back'][i] = np.nan
# Remove backscatter with negative power estimates
if beam.fit.p_l[i] < 0.0 or beam.fit.p_s[i] < 0.0:
hops['front'][i] = np.nan
hops['back'][i] = np.nan
dist['front'][i] = np.nan
dist['back'][i] = np.nan
# Calculate the elevation angles for the front and rear FoV, after
# initializing the beam parameters with the supplied tdiff
if not hasattr(beam.prm, "tdiff") or beam.prm.tdiff is None:
beam.prm.tdiff = hard.tdiff
if not hasattr(beam.prm, "tdiff_e") or beam.prm.tdiff_e is None:
beam.prm.tdiff_e = np.nan
for ff in ["front", "back"]:
# Calculate the elevation
try:
(elvs[ff], elv_errs[ff], pamb,
hard) = ce.calc_elv_w_err(beam, hard=hard, bmaz_e=bmaz_e,
boresite_e=boresite_e, ix_e=ix_e,
iy_e=iy_e, iz_e=iz_e,
tdiff=beam.prm.tdiff,
tdiff_e=beam.prm.tdiff_e, fov=ff)
(elvs_aliased[ff], elva_errs[ff], pamb,
hard) = ce.calc_elv_w_err(beam, hard=hard, bmaz_e=bmaz_e,
boresite_e=boresite_e, ix_e=ix_e,
iy_e=iy_e, iz_e=iz_e,
tdiff=beam.prm.tdiff, alias=1.0, fov=ff)
except:
estr = "can't get elevation for beam {:d} at {:}".format(beam.bmnum,
beam.time)
logging.info(estr)
elvs[ff] = None
if elvs[ff] is not None:
# Get the virtual height and virtual height error
vheights[ff], vherrs[ff] = \
ch.calc_virtual_height_w_err(beam, radius, elv=elvs[ff],
elv_e=elv_errs[ff], dist=dist[ff],
dist_e=[0.0 for dd in dist[ff]],
dist_units="km")
vheights_aliased[ff], vheighta_errs[ff] = \
ch.calc_virtual_height_w_err(beam, radius, elv=elvs_aliased[ff],
elv_e=elva_errs[ff], dist=dist[ff],
dist_e=[0.0 for dd in dist[ff]],
dist_units="km")
# Test the virtual height
for i,vh in enumerate(vheights[ff]):
if not np.isnan(vh) and vh < vmin:
# This height is too low. Replace it with a value corrected
# with a 2 pi alias or remove it from consideration for
# this FoV
if vheights_aliased[ff][i] < vmin:
elvs[ff][i] = elvs_aliased[ff][i]
elv_errs[ff][i] = elva_errs[ff][i]
vheights[ff][i] = vheights_aliased[ff][i]
vherrs[ff][i] = vheighta_errs[ff][i]
else:
elvs[ff][i] = np.nan
vheights[ff][i] = np.nan
vh = vheights[ff][i]
vhe = vherrs[ff][i]
if not np.isnan(vh):
hop = hops[ff][i]
dd = dlist[i] * 0.5 / hop
ghop = True
while vh > vmax and hop <= max_hop:
# This height is too high. Increase the hop
# number to acheive a realistic value
hop += 1.0
dd = dlist[i] * 0.5 / hop
vout = ch.calc_virtual_height_w_err(beam, radius, \
elv=[elvs[ff][i]], elv_e=[elv_errs[ff][i]],\
dist=[dd], dist_e=[0.0], dist_units="km")
vh = vout[0][0]
vhe = vout[1][0]
# Test the distance and hop to ensure that this
# mode is realistic
if ptest:
ghop = test_propagation(hop, vh, dd,
region_hmax=region_hmax,
region_hmin=region_hmin)
if not ghop:
# If this is not a valid propagation path, attempt to
# use the elevation angle with a 2pi alias added
ea = elvs_aliased[ff][i]
ee = elva_errs[ff][i]
vh = vheights_aliased[ff][i]
vhe = vheighta_errs[ff][i]
hop = 1.0 if beam.fit.gflg[i] == 1 else 0.5
dd = dlist[i] * 0.5 / hop
while vh > vmax and hop <= max_hop:
# This height is too high. Increase the hop
# number to acheive a realistic value
hop += 1.0
dd = dlist[i] * 0.5 / hop
vout = ch.calc_virtual_height_w_err(beam, radius, \
elv=[ea],
elv_e=[ee], \
dist=[dd], dist_e=[0.0], dist_units="km")
vh = vout[0][0]
vhe = vout[1][0]
if vh >= vmin:
ghop = test_propagation(hop, vh, dd,
region_hmax=region_hmax,
region_hmin=region_hmin)
else:
ea = elvs[ff][i]
ee = elv_errs[ff][i]
if hop <= max_hop and ghop:
# Update the lists
hops[ff][i] = hop
dist[ff][i] = dd
vheights[ff][i] = vh
vherrs[ff][i] = vhe
elvs[ff][i] = ea
elv_errs[ff][i] = ee
regions[ff][i] = assign_region(vh,
region_hmax=region_hmax,
region_hmin=region_hmin)
else:
# Unable to calculate a realistic virtual
# height within a sane number of hops, even accounting
# for possible aliasing
hops[ff][i] = np.nan
elvs[ff][i] = np.nan
vheights[ff][i] = np.nan
else:
hops[ff] = [np.nan for r in slist]
elvs[ff] = [np.nan for r in slist]
vheights[ff] = [np.nan for r in slist]
return beam, elvs, elv_errs, vheights, vherrs, hops, regions, hard
#---------------------------------------------------------------------------
def update_backscatter(rad_bms, min_pnts=3,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0},
rg_box=[2,5,10,20], vh_box=[50.0,50.0,50.0,150.0],
max_rg=[5,25,40,76], max_hop=3.0,
ut_box=dt.timedelta(minutes=20.0), tdiff=None,
tdiff_args=list(), tdiff_e=None, tdiff_e_args=list(),
ptest=True, strict_gs=False, bmaz_e=0.0, boresite_e=0.0,
ix_e=0.0, iy_e=0.0, iz_e=0.0, step=6):
"""Updates the propagation path, elevation, backscatter type, and origin
field-of-view (FoV) for all backscatter observations in each beam. Scans
of data are used to determine the origin field-of-view (FoV), but a full
scan is not necessary, but if the number of beams is less than the specified
minimum, a less rigerous evaluation method is used.
Parameters
-------------
rad_bms : (list or class `pydarn.sdio.radDataTypes.radDataPtr`)
A list of or pointer to beamData class objects
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":900.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
rg_box : (list of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_hop : (list of floats)
Maximum hop that the corresponding rg_box and vh_box values applies
to. (default=3.0)
ut_box : (class `dt.timedelta`)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
tdiff : (function or NoneType)
A function to retrieve tdiff values (in microsec) using the radar ID
number current datetime, and transmisson frequency as input.
Additional inputs may be specified using tdiff_args. Example:
def get_tdiff(stid, time, tfreq, filename) { do things } return tdiff
tdiff=get_tdiff, tdiff_args=["tdiff_file"]
(default=None)
tdiff_args : (list)
A list specifying any arguements other than radar, time, and
transmission frequency to run the specified tdiff function.
(default=list())
tdiff_e : function or NoneType)
A function to retrieve tdiff error values (in microsec) using the radar
ID number, current datetime, and transmisson frequency as input.
Additional inputs may be specified using tdiff_e_args. Example:
def get_tdiffe(stud, time, tfreq, filename) { do things } return tdiffe
tdiff_e=get_tdiffe, tdiff_e_args=["tdiff_file"]
(default=None)
tdiff_e_args : (list)
A list specifying any arguements other than radar, time, and
transmission frequency to run the specified tdiff_e function.
(default=list())
ptest : (boolian)
Test to see if a propagation path is realistic (default=True)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=False)
bmaz_e : (float)
Error in beam azimuth in degrees (default=0.0)
boresite_e : (float)
Error in the boresite location in degrees (default=0.0)
ix_e : (float)
Error in the interferometer x coordinate in meters (default=0.0)
iy_e : (float)
Error in the interferometer y coordinate in meters (default=0.0)
iz_e : (float)
Error in the interferometer z coordinate in meters (default=0.0)
step : (int)
Integer denoting the number of processing steps to perform. This should
always be set to 6 (or greater) unless one wishes to reproduce the
demonstration plots in Burrell et al (2015). (default=6) The step
numbers coincide with those indicated in the paper:
1 or 2: Examine the elevation structure across each scan
3: Add assignments for points with realistic heights in only one FoV
4: Add assignments using single-beam elevation angle variations
5 or more: Test assignements for consistency along the scan.
Returns
---------
beams : (list)
A dictionary of updated beamData class objects. The dictionary keys
correspond to the beam numbers, and contain np.arrays of beams sorted
by UT with the following additional/updated attributes
beam.fit.fovelv : added : Accounts for adjusted tdiff and origin FoV
beam.fit.fovelv_e : added : elevation error
beam.fit.felv : added : Elevation angle assuming front FoV
beam.fit.felv_e : added : Elevation angle error assuming front FoV
beam.fit.belv : added : Elevation angle assuming rear FoV
beam.fit.belv_e : added : Elevation angle error assuming front FoV
beam.fit.vheight : added : virtual height of ionosphere in km
beam.fit.vheight_e : added : error in virtual height (km)
beam.fit.fvheight : added : virtual height assuming front FoV
beam.fit.fvheight_e : added : error in virtual height assuming front FoV
beam.fit.bvheight : added : virtual height assuming rear FoV
beam.fit.bvheight_e : added : error in virtual height assuming rear FoV
beam.fit.hop : added : Hop assuming the assigned origin FoV
beam.fit.fhop : added : Hop assuming the front FoV
beam.fit.bhop : added : Hop assuming the rear FoV
beam.fit.region : added : Region assuming the assigned origin FoV
beam.fit.fregion : added : Region assuming the front FoV
beam.fit.bregion : added : Region assuming the rear FoV
beam.fit.fovflg : added : Flag indicating origin FoV (1=front, -1=back,
0=indeterminate)
beam.fit.pastfov : added : Flag indicating past FoV assignments
beam.fit.gflg : updated : Flag indicating backscatter type
(1=ground, 0=ionospheric, -1=indeterminate)
beam.prm.tdiff : added : tdiff used in elevation (microsec)
beam.prm.tdiff_e : possibly added : tdiff error (microsec)
If the input is incorrect, exits with an exception
"""
import davitpy.pydarn.sdio as sdio
import davitpy.pydarn.radar as pyrad
#----------------------------------
# Test input
assert(((isinstance(rad_bms, list) or isinstance(rad_bms, np.ndarray)) and
isinstance(rad_bms[0], sdio.radDataTypes.beamData)) or
isinstance(rad_bms, sdio.radDataTypes.radDataPtr)), \
logging.error('need a list/array of beams or a radar data pointer')
if isinstance(min_pnts, float):
min_pnts = int(min_pnts)
assert isinstance(min_pnts, int) and min_pnts >= 0, \
logging.error('unknown point minimum [{:}]'.format(min_pnts))
assert isinstance(region_hmin, dict) and min(region_hmin.values()) >= 0.0, \
logging.error("unknown minimum h' [{:}]".format(region_hmin))
assert isinstance(region_hmax, dict), \
logging.error("unknown maximum h' [{:}]".format(region_hmax))
assert((isinstance(rg_box, list) or isinstance(rg_box, np.ndarray))
and min(rg_box) >= 1.0), \
logging.error('range gate box is too small [{:}]'.format(rg_box))
assert((isinstance(vh_box, list) or isinstance(vh_box, np.ndarray))
and min(vh_box) >= 0.0), \
logging.error('virtual height box is too small [{:}]'.format(vh_box))
assert((isinstance(max_rg, list) or isinstance(max_rg, np.ndarray))
and min(max_rg) >= 0), \
logging.error('max range gate box is too small [{:}]'.format(max_rg))
if isinstance(max_hop, int):
max_hop = float(max_hop)
assert isinstance(max_hop, float) and max_hop >= 0.5, \
logging.error('hop limits are unrealistic [{:}]'.format(max_hop))
assert isinstance(ut_box, dt.timedelta) and ut_box.total_seconds() > 0.0, \
logging.error('UT box must be a positive datetime.timdelta object')
if isinstance(step, float):
step = int(step)
assert isinstance(step, int), logging.error('step flag must be an int')
#-----------------------------------------------------------------------
# Cycle through all the beams
snum = 0
num = 0
bm, num = get_beam(rad_bms, num)
max_del_beam = 3
have_scan = False
# Load the hardware data for the first time
try:
hard = pyrad.site(radId=bm.stid, dt=bm.time)
except:
logging.error("no data available in input rad structure")
return None
#----------------------------------------------------------------
# Cycle through the data, updating the beams one scan at a time
scan = np.empty(shape=(hard.maxbeam,), dtype=type(bm))
beams = list()
while bm is not None:
# Load the beam into the current scan if the scan is empty or if
# the current beam is within a specified period of time considering
# the difference in beams
if snum == 0:
bm.scan_time = bm.time
scan[snum] = bm
snum += 1
bm_sign = 0
else:
del_time = (bm.time - scan[snum-1].time).total_seconds()
del_beam = bm.bmnum - scan[snum-1].bmnum
time_inc = bm.prm.inttsc + bm.prm.inttus * 1.0e-6
if(del_beam != 0 and bm.cp == scan[0].cp and
del_time <= 3.0 * abs(del_beam) * time_inc and
abs(del_beam) <= max_del_beam):
if bm_sign == 0 or bm_sign == np.sign(del_beam):
bm_sign = np.sign(del_beam)
bm.scan_time = scan[0].time
scan[snum] = bm
snum += 1
else:
have_scan = True
else:
have_scan = True
#-----------------------------------------------------------------
# If a scan has been loaded, update the backscatter data in the
# beams and load the current beam as the first element of a new scan
if have_scan:
if snum >= min_pnts:
st = scan[0].time
b = update_bs_w_scan(scan[0:snum], hard, min_pnts=min_pnts,
region_hmax=region_hmax,
region_hmin=region_hmin,
rg_box=rg_box, vh_box=vh_box,
rg_max=max_rg, max_hop=max_hop,
tdiff=tdiff, tdiff_args=tdiff_args,
tdiff_e=tdiff_e,
tdiff_e_args=tdiff_e_args, ptest=ptest,
strict_gs=strict_gs, bmaz_e=bmaz_e,
boresite_e=boresite_e, ix_e=ix_e,
iy_e=iy_e, iz_e=iz_e, step=step)
if b is not None:
beams.extend(list(b))
else:
logging.info("unable to update scan at {:}".format(st))
bm.scan_time = bm.time
scan[0] = bm
snum = 1
bm_sign = 0
have_scan = False
# Cycle to next beam
bm, num = get_beam(rad_bms, num)
#---------------------------------------------------------------------
# Once the scans have been loaded, beam-UT tests of the FoV flags can
# be performed
inc_rg_box = 3
beam_dict = beam_ut_struct_test(beams, frg_box=np.array(rg_box)+inc_rg_box,
max_rg=max_rg, ut_box=ut_box,
reg_attr="region", hop_attr="hop",
fov_attr="fovflg", step=step)
return(beam_dict)
def beam_ut_struct_test(rad_bms, min_frac=.10, frg_box=[5,8,13,23],
max_rg=[5,25,40,76], ut_box=dt.timedelta(minutes=20.0),
reg_attr="region", hop_attr="hop", fov_attr="fovflg",
restrict_attr=[], restrict_lim=[], step=6):
"""Routine to test for field-of-view (FoV) and structure continuity in UT
across each beam. Hop (or groundscatter flag) will be used to seperate
structure types.
Parameters
-----------
rad_bms : (list or class `sdio.radDataTypes.radDataPtr`)
List of or pointer to beam data
min_frac : (float)
Minimum fraction of possible backscatter points needed in the RG/UT
box to perform the FoV calculation (default=.1)
frg_box : (list, np.array)
Total width of range gate box to examine for backscatter FoV
continuity. (default=[5,8,13,23])
ut_box : (class `dt.timedelta`)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
reg_attr : (string)
beam.fit attribute name to seperate different ionospheric regions.
Can discard by entering nothing. (default="region")
hop_attr : (string)
beam.fit attribute name to seperate different structure types. Designed
to use either the groundscatter flag or the hop data. (default="hop")
fov_attr : (string)
beam.fit attribute name of the FoV flag
restrict_attr : (list)
List containing strings with attribute names. Used to restrict the
consideration further, such as by virtual height or slant path distance
from the radar to the first ionospheric reflection point. An empty list
means no additional restrictions are desired. (default=[])
restrict_lim : (list)
List containing two-element lists with the minimum and maximum values
of the restriction limits for the attributes contained in restrict_attr.
(default=[])
step : (int)
Integer denoting the number of processing steps to perform. This should
always be set to 6 (or greater) unless one wishes to reproduce the
demonstration plots in Burrell et al (2015). (default=6) The step
numbers coincide with those indicated in the paper:
1-5: Examine the elevation structure and consistency along the scan
6: Test for temporal consistency
Returns
----------
beams : (dict)
Dictionary containing lists of beams with updated FoV flags seperated
by beam number. The beam numbers are the dictionary keys
"""
import davitpy.pydarn.sdio as sdio
import davitpy.pydarn.radar as pyrad
fov_frac = 2.0 / 3.0
near_rg = -1
#----------------------------
# Initialize the output
beams = dict()
#----------------------------------
# Test input
if(not isinstance(rad_bms, list) and
not isinstance(rad_bms, sdio.radDataTypes.radDataPtr)):
logging.error('need a list of beams or a radar data pointer')
return beams
if(isinstance(rad_bms, list) and
(len(rad_bms) <= 0 or not isinstance(rad_bms[0],
sdio.radDataTypes.beamData))):
logging.error('list must contain at least one beam')
return beams
if isinstance(min_frac, int):
min_frac = float(min_frac)
if not isinstance(min_frac, float) or min_frac <= 0.0 or min_frac > 1.0:
estr = 'unrealistic minimum FoV fraction [{:}]'.format(min_frac)
logging.error(estr)
return beams
if((not isinstance(frg_box, list) and not isinstance(frg_box, np.ndarray))
or len(frg_box) <= 0):
estr = 'unrealistic FoV range gate box [{:}]'.format(frg_box)
logging.error(estr)
return beams
if((not isinstance(max_rg, list) and not isinstance(max_rg, np.ndarray))
or len(max_rg) <= 0):
estr = 'unrealistic maximum range gate box [{:}]'.format(max_rg)
logging.error(estr)
return beams
if not isinstance(ut_box, dt.timedelta) or ut_box.total_seconds() <= 0.0:
logging.error('unrealistic UT box [{:}]'.format(ut_box))
return beams
if not isinstance(restrict_attr, list):
logging.error('provide more restricting attributes in a list')
return beams
if not isinstance(restrict_lim, list):
logging.error('provide more restricting limits in a list')
return beams
if isinstance(step, float):
step = int(step)
if not isinstance(step, int):
logging.error('unrealistic step flag [{:}]'.format(step))
return beams
if not isinstance(reg_attr, str) or len(reg_attr) <= 0:
logging.error('badly formated region attribute [{:}]'.format(reg_attr))
return beams
if not isinstance(hop_attr, str) or len(reg_attr) <= 0:
logging.error('badly formated hop attribute [{:}]'.format(hop_attr))
return beams
if not isinstance(fov_attr, str) or len(reg_attr) <= 0:
estr = 'badly formated FoV flag attribute [{:}]'.format(fov_attr)
logging.error(estr)
return beams
#-----------------------------------------------------------------------
# Load the first beam and initialize limits
num = 0
bm, num = get_beam(rad_bms, num)
rhalf = [int(r * 0.5) for r in frg_box]
try:
hard = pyrad.site(radId=bm.stid, dt=bm.time)
except:
logging.error("no data available in input rad structure")
return(beams)
while bm is not None:
bnum = bm.bmnum
if near_rg < 0:
near_rg = ((500.0 / (5.0e-10 * scicon.c) - bm.prm.lagfr)
/ bm.prm.smsep)
# Load the beams into the output dictionary
if beams.has_key(bnum):
beams[bnum].append(bm)
else:
beams[bnum] = [bm]
# Cycle to the next beam
bm, num = get_beam(rad_bms, num)
#-----------------------------------------------------------------------
# Test the step flag and see if the temporal continuity test should be
# performed
if step < 6:
estr = "not testing backscatter assignments with temporal continuity"
logging.info(estr)
return(beams)
#-----------------------------------------------------------------------
# Cycle through all the beams, updating the FoV flag and structure flag
# once enough data has been loaded
for bnum in beams.keys():
bis = 0
fovbelong = [[{"out":0, "in":0, "mix":0}
for j in beams[bnum][i].fit.slist]
for i in np.arange(0, len(beams[bnum]))]
fovpast = [[j for j in beams[bnum][i].fit.pastfov]
for i in np.arange(0, len(beams[bnum]))]
for i in np.arange(0, len(beams[bnum])):
# See if there is enough data at this beam to begin the evaluation
while beams[bnum][i].time - beams[bnum][bis].time >= ut_box:
# Check the common program of each of the beams. For a UT
# comparision, the cp must be the same for all beams
bicp = [bis + j for j,b in enumerate(beams[bnum][bis:i])
if(b.cp == beams[bnum][bis].cp and
b.time - beams[bnum][bis].time < ut_box)]
# Test to see if there is enough data to fill the time window
if beams[bnum][i].time - beams[bnum][bis].time < ut_box:
break
# Get the range gate, FoV flag, hop, beam index, and range
# gate index for all backscatter points at these beams
rgates = list()
fovflg = list()
onefov = list()
hops = list()
regions = list()
bi = list()
ri = list()
rdata = dict()
for rattr in restrict_attr:
rdata[rattr] = list()
for bb in bicp:
b = beams[bnum][bb]
# Load data from the beam, if it exists
if(b.fit is not None and hasattr(b.fit, "slist") and
hasattr(b.fit, fov_attr) and hasattr(b.fit, hop_attr)):
slist = getattr(b.fit, "slist")
rgates.extend(slist)
bi.extend([bb for j in slist])
ri.extend([j for j,r in enumerate(slist)])
fflg = getattr(b.fit, fov_attr)
fovflg.extend(fflg)
otherelv = [b.fit.felv[oe] if ff == -1 else
b.fit.belv[oe] for oe,ff in enumerate(fflg)]
onefov.extend([np.isnan(oe) if slist[j] < near_rg
else False
for j,oe in enumerate(otherelv)])
hops.extend(getattr(b.fit, hop_attr))
if len(reg_attr) > 0 and hasattr(b.fit, reg_attr):
regions.extend(getattr(b.fit, reg_attr))
for j,rattr in enumerate(restrict_attr):
if hasattr(b.fit, rattr):
rdata[rattr].extend(getattr(b.fit, rattr))
else:
rdata[rattr].extend([restrict_lim[j][0]
for r in slist])
if len(rgates) > 0:
# Cycle through range gate boxes
range_min = np.nanmin(rgates)
range_max = np.nanmax(rgates)
if range_max > max(max_rg):
range_max = max(max_rg)
rgates = np.array(rgates)
fovflg = np.array(fovflg)
onefov = np.array(onefov)
# Combine hop and region data (if available), to allow
# a comprehensive division by propagation path
if len(regions) == len(hops):
chops = ["{:.1f}{:s}".format(hops[ihop], reg)
if not np.isnan(hops[ihop]) and len(reg) > 0
else np.nan for ihop,reg in enumerate(regions)]
else:
chops = hops
for rattr in restrict_attr:
rdata[rattr] = np.array(rdata[rattr])
for r in np.arange(range_min, range_max + 1):
# Select the indexes for this range gate box
ilim = 0
while ilim < len(max_rg) and r >= max_rg[ilim]:
ilim += 1
rmin = r - rhalf[ilim]
rmax = r + rhalf[ilim]
# If the box size is even, then the testing
# conditions will put too many points in the box
# unless the size is reduced. Effectively sets:
# jr = np.where(rgates[ir] < rmax)[0]
if frg_box[ilim] % 2 == 0:
rmax -= 1
# Now that we know how big our window is, we can
# determine the maximum number of points
max_pnts = float(len(bicp) * frg_box[ilim])
ir = np.where(rgates >= rmin)[0]
jr = np.where(rgates[ir] <= rmax)[0]
# Find the hop numbers to consider
shop = set([chops[ihop] for ihop in ir[jr]
if isinstance(chops[ihop], str) or
not np.isnan(chops[ihop])])
for ihop in shop:
hr = [ih for ih in ir[jr] if chops[ih] == ihop]
# Test any additional restrictions
if float(len(hr)) / max_pnts >= min_frac:
for j,rattr in enumerate(restrict_attr):
if len(restrict_lim[j]) == 2:
hk = [hr[k] for k,rd in
enumerate(rdata[rattr][hr])
if(rd >= restrict_lim[j][0]
and rd < restrict_lim[j][1])]
hr = hk
# Quit testing if there aren't enough
# points to perform the UT structure
# evaluation
if float(len(hr)) / max_pnts < min_frac:
break
# Evaluate the temporal FoV structures
if float(len(hr)) / max_pnts < min_frac:
# There are not enough points in this range
# gate and UT box to evaluate the
# backscatter structures at this hop
estr = "unable to evaluate beam ["
estr = "{:s}{:d}] at [".format(estr, bnum)
estr = "{:s}{:}".format(estr,
beams[bnum][bis].time)
estr = "{:s}] gate [{:d}], ".format(estr, r)
estr = "{:s}insufficient ".format(estr)
estr = "{:s}backscatter [".format(estr)
estr = "{:s}{:d} < ".format(estr, len(hr))
estr = "{:s}{:.0f}".format(estr, max_pnts
* min_frac)
estr = "{:s}] at hop [{:s}]".format(estr, ihop)
logging.info(estr)
elif float(len(hr)) / max_pnts > 1.0:
estr = "maximum number of points exceeded for "
estr = "{:s}beam [{:d}] ".format(estr, bnum)
estr = "{:s}between range gates ".format(estr)
estr = "{:s}[{:d}-{:d}".format(estr, rmin, rmax)
estr = "{:s}] at [{:}".format(estr, \
beams[bnum][bis].time)
estr = "{:s} to {:}]".format(estr, \
beams[bnum][max(bicp)].time)
estr = "{:s}: {:d} > ".format(estr, len(hr))
estr = "{:s}{:f}".format(estr, max_pnts)
logging.error(estr)
else:
# Get the number of backscatter observations
# in each field-of-view
rr = dict()
rr[1] = np.where(fovflg[hr] == 1)[0]
rr[-1] = np.where(fovflg[hr] == -1)[0]
fn = float(len(rr[1]))
bn = float(len(rr[-1]))
tn = fn + bn
ffrac = fn / tn if tn > 0.0 else -1.0
bad_fov = 0
good_fov = False
if(ffrac > 0.0 and ffrac >= fov_frac and
bn > 0.0):
good_fov = True
bad_fov = -1
elif(ffrac >= 0.0 and 1.0-ffrac >= fov_frac
and fn > 0.0):
good_fov = True
bad_fov = 1
# Tag the FoV for being consistent or not and
# mixed or not, unless this backscatter point
# only as one valid FoV
if good_fov:
for irr in rr[bad_fov]:
if not onefov[hr[irr]]:
zz = bi[hr[irr]]
yy = ri[hr[irr]]
fovbelong[zz][yy]['out'] += 1
for irr in rr[-bad_fov]:
zz = bi[hr[irr]]
yy = ri[hr[irr]]
fovbelong[zz][yy]['in'] += 1
else:
for ih in hr:
if abs(fovflg[ih]) == 1:
zz = bi[ih]
yy = ri[ih]
fovbelong[zz][yy]['mix'] += 1
del rgates, fovflg, hops, bi, ri
bis += 1
# Update the fovflags
for i in np.arange(0, len(beams[bnum])):
for j,bdict in enumerate(fovbelong[i]):
if(bdict["out"] > 0 and
bdict["in"] < bdict["out"] + bdict["mix"]):
# Update the FoV flag and the structure flag, since a
# structure cannot be set without a FoV
if(bdict['out'] > bdict['mix'] and
bdict['out'] > bdict['in']):
beams[bnum][i].fit.fovflg[j] = fovpast[i][j]
else:
beams[bnum][i].fit.fovflg[j] = 0
if fovpast[i][j] != 0:
if fovpast[i][j] == -1:
nelv = beams[bnum][i].fit.belv[j]
nelve = beams[bnum][i].fit.belv_e[j]
nheight = beams[bnum][i].fit.bvheight[j]
nheighte = beams[bnum][i].fit.bvheight_e[j]
nhop = beams[bnum][i].fit.bhop[j]
nreg = beams[bnum][i].fit.bregion[j]
else:
nelv = beams[bnum][i].fit.felv[j]
nelve = beams[bnum][i].fit.felv_e[j]
nheight = beams[bnum][i].fit.fvheight[j]
nheighte = beams[bnum][i].fit.fvheight_e[j]
nhop = beams[bnum][i].fit.fhop[j]
nreg = beams[bnum][i].fit.fregion[j]
beams[bnum][i].fit.fovelv[j] = nelv
beams[bnum][i].fit.fovelv_e[j] = nelve
beams[bnum][i].fit.vheight[j] = nheight
beams[bnum][i].fit.vheight_e[j] = nheighte
beams[bnum][i].fit.hop[j] = nhop
beams[bnum][i].fit.region[j] = nreg
fovpast[i][j] = 0
return(beams)
| aburrell/davitpy | davitpy/pydarn/proc/fov/update_backscatter.py | Python | gpl-3.0 | 115,585 | [
"Gaussian"
] | 5dfff6a6505b9e42242bde9edffed3c1d82225b55db571cd5cb60cf1cceb3016 |
from pymbar import timeseries
from pymbar import testsystems
import numpy as np
from scipy import stats
from pymbar.utils_for_testing import eq, skipif
from six.moves import xrange
try:
import statsmodels.api as sm
HAVE_STATSMODELS = True
except ImportError as err:
HAVE_STATSMODELS = False
def generate_data(N=10000, K=10):
var = np.ones(N)
for replica in xrange(2, K + 1):
var = np.concatenate((var, np.ones(N)))
X = np.random.normal(np.zeros(K * N), var).reshape((K, N)) / 10.0
Y = np.random.normal(np.zeros(K * N), var).reshape((K, N))
energy = 10 * (X ** 2) / 2.0 + (Y ** 2) / 2.0
return X, Y, energy
def test_statistical_inefficiency_single():
X, Y, energy = generate_data()
timeseries.statisticalInefficiency(X[0])
timeseries.statisticalInefficiency(X[0], X[0])
timeseries.statisticalInefficiency(X[0] ** 2)
timeseries.statisticalInefficiency(X[0] ** 2, X[0] ** 2)
timeseries.statisticalInefficiency(energy[0])
timeseries.statisticalInefficiency(energy[0], energy[0])
timeseries.statisticalInefficiency(X[0], X[0] ** 2)
# TODO: Add some checks to test statistical inefficinecies are within normal range
def test_statistical_inefficiency_multiple():
X, Y, energy = generate_data()
timeseries.statisticalInefficiencyMultiple(X)
timeseries.statisticalInefficiencyMultiple(X ** 2)
timeseries.statisticalInefficiencyMultiple(X[0, :] ** 2)
timeseries.statisticalInefficiencyMultiple(X[0:2, :] ** 2)
timeseries.statisticalInefficiencyMultiple(energy)
# TODO: Add some checks to test statistical inefficinecies are within normal range
@skipif(not HAVE_STATSMODELS, "Skipping FFT based tests because statsmodels not installed.")
def test_statistical_inefficiency_fft():
X, Y, energy = generate_data()
timeseries.statisticalInefficiency_fft(X[0])
timeseries.statisticalInefficiency_fft(X[0] ** 2)
timeseries.statisticalInefficiency_fft(energy[0])
g0 = timeseries.statisticalInefficiency_fft(X[0])
g1 = timeseries.statisticalInefficiency(X[0])
g2 = timeseries.statisticalInefficiency(X[0], X[0])
g3 = timeseries.statisticalInefficiency(X[0], fft=True)
eq(g0, g1)
eq(g0, g2)
eq(g0, g3)
@skipif(not HAVE_STATSMODELS, "Skipping FFT based tests because statsmodels not installed.")
def test_statistical_inefficiency_fft_gaussian():
# Run multiple times to get things with and without negative "spikes" at C(1)
for i in range(5):
x = np.random.normal(size=100000)
g0 = timeseries.statisticalInefficiency(x, fast=False)
g1 = timeseries.statisticalInefficiency(x, x, fast=False)
g2 = timeseries.statisticalInefficiency_fft(x)
g3 = timeseries.statisticalInefficiency(x, fft=True)
eq(g0, g1, decimal=5)
eq(g0, g2, decimal=5)
eq(g0, g3, decimal=5)
eq(np.log(g0), np.log(1.0), decimal=1)
for i in range(5):
x = np.random.normal(size=100000)
x = np.repeat(x, 3) # e.g. Construct correlated gaussian e.g. [a, b, c] -> [a, a, a, b, b, b, c, c, c]
g0 = timeseries.statisticalInefficiency(x, fast=False)
g1 = timeseries.statisticalInefficiency(x, x, fast=False)
g2 = timeseries.statisticalInefficiency_fft(x)
g3 = timeseries.statisticalInefficiency(x, fft=True)
eq(g0, g1, decimal=5)
eq(g0, g2, decimal=5)
eq(g0, g3, decimal=5)
eq(np.log(g0), np.log(3.0), decimal=1)
def test_detectEquil():
x = np.random.normal(size=10000)
(t, g, Neff_max) = timeseries.detectEquilibration(x)
@skipif(not HAVE_STATSMODELS, "Skipping FFT based tests because statsmodels not installed.")
def test_detectEquil_binary():
x = np.random.normal(size=10000)
(t, g, Neff_max) = timeseries.detectEquilibration_binary_search(x)
@skipif(not HAVE_STATSMODELS, "Skipping FFT based tests because statsmodels not installed.")
def test_compare_detectEquil(show_hist=False):
"""
compare detectEquilibration implementations (with and without binary search + fft)
"""
t_res = []
N=100
for _ in xrange(100):
A_t = testsystems.correlated_timeseries_example(N=N, tau=5.0) + 2.0
B_t = testsystems.correlated_timeseries_example(N=N, tau=5.0) + 1.0
C_t = testsystems.correlated_timeseries_example(N=N*2, tau=5.0)
D_t = np.concatenate([A_t, B_t, C_t, np.zeros(20)]) #concatenate and add flat region to one end (common in MC data)
bs_de = timeseries.detectEquilibration_binary_search(D_t, bs_nodes=10)
std_de = timeseries.detectEquilibration(D_t, fast=False, nskip=1)
t_res.append(bs_de[0]-std_de[0])
t_res_mode = float(stats.mode(t_res)[0][0])
eq(t_res_mode,0.,decimal=1)
if show_hist:
import matplotlib.pyplot as plt
plt.hist(t_res)
plt.show()
def test_detectEquil_constant_trailing():
# This explicitly tests issue #122, see https://github.com/choderalab/pymbar/issues/122
x = np.random.normal(size=100) * 0.01
x[50:] = 3.0
# The input data is some MCMC chain where the trailing end of the chain is a constant sequence.
(t, g, Neff_max) = timeseries.detectEquilibration(x)
"""
We only check that the code doesn't give an exception. The exact value of Neff can either be
~50 if we try to include part of the equilibration samples, or it can be Neff=1 if we find that the
whole first half is discarded.
"""
| kyleabeauchamp/pymbar | pymbar/tests/test_timeseries.py | Python | lgpl-2.1 | 5,513 | [
"Gaussian"
] | 58b25ce355b2148cc85876628935f9c10766d49cd7c5732e8fe53f4d7075e90a |
from flask import session, url_for
from flask_nav import Nav
from flask_nav.elements import Navbar, View, Subgroup, Link, Text, Separator, RawTag
from flask_bootstrap.nav import BootstrapRenderer
from hashlib import sha1
from dominate import tags
nav = Nav()
class SidedViewImage(View):
def __init__(self, image_url, text, left, endpoint, *args, **kwargs):
self.text = tags.img(src=image_url, alt=text)
self.left = left
self.endpoint = endpoint
self.url_for_args = args
self.url_for_kwargs = kwargs
class SidedView(View):
def __init__(self, text, left, endpoint, *args, **kwargs):
self.text = text
self.left = left
self.endpoint = endpoint
self.url_for_args = args
self.url_for_kwargs = kwargs
class SidedSubgroup(Subgroup):
def __init__(self, title, left, *items):
self.title = title
self.left = left
self.items = items
class SidedLink(Link):
def __init__(self, text, dest, left):
self.text = text
self.dest = dest
self.left = left
@nav.navigation('guest')
def guest():
return Navbar(
SidedViewImage('https://aquaponics.systemsbiology.net/static/images/pflogo2.png', 'Project Feed 1010', True, 'frontend.index'),
SidedLink('Login with Google+', '/social/Home', False),
View('Home', 'frontend.index'),
View('About', 'frontend.about'),
View('Explore', 'dav.explore'),
View('Resources', 'frontend.resources'),
View('Questions?', 'frontend.contact'),
)
@nav.navigation('member')
def member():
return Navbar(
SidedViewImage('https://pf1010.systemsbiology.net/static/images/pflogo_isblogo.png', 'Project Feed 1010', True, 'social.index'),
View('Home', 'social.index'),
View('Explore', 'dav.explore'),
View('Search Systems', 'social.search_systems'),
Subgroup('Socialize',
View('Friends', 'social.friends'),
View('Groups', 'social.groups')
),
View('Resources', 'frontend.resources'),
View('Questions?', 'frontend.contact'),
SidedSubgroup(session['displayName'], False,
View('View Profile', 'social.profile', google_id = 'me'),
View('Edit Profile', 'social.editprofile'),
Separator(),
View('Logout', 'social.logout'),
)
)
@nav.renderer('nav_renderer')
class NavRenderer(BootstrapRenderer):
def visit_Navbar(self, node):
root = tags.div()
root['class'] = 'navbar-fixed-top'
node_id = self.id or sha1(str(id(node)).encode()).hexdigest()
top = root.add(tags.nav())
top['class'] = 'navbar navbar-default'
top['id'] = 'navbar-top'
container = top.add(tags.div(_class='container'))
header = container.add(tags.div(_class='navbar-header'))
button = header.add(tags.button())
button['type'] = 'button'
button['class'] = 'navbar-toggle collapsed'
button['data-toggle'] = 'collapse'
button['data-target'] = '#' + node_id
button['aria-expanded'] = 'false'
button['aria-controls'] = 'navbar'
button.add(tags.span('Toggle navigation', _class='sr-only'))
button.add(tags.span(_class='icon-bar'))
button.add(tags.span(_class='icon-bar'))
button.add(tags.span(_class='icon-bar'))
if node.title is not None:
if hasattr(node.title, 'get_url'):
header.add(tags.a(node.title.text, _class='navbar-brand', href=node.title.get_url()))
else:
header.add(tags.span(node.title, _class='navbar-brand'))
bar = container.add(tags.div(_class='navbar-collapse collapse', id=node_id))
bar_left = bar.add(tags.ul(_class='nav navbar-nav navbar-left visible-xs'))
bar_right = bar.add(tags.ul(_class='nav navbar-nav navbar-right hidden-xs'))
for item in node.items:
bar_left.add(self.visit(item))
if not getattr(item, 'left', True):
bar_right.add(self.visit(item))
spacer = root.add(tags.div())
spacer['id'] = 'navbar-spacer'
bottom = root.add(tags.nav())
bottom['class'] = 'navbar navbar-inverse hidden-xs'
bottom['id'] = 'navbar-bottom'
container = bottom.add(tags.div(_class='container'))
bar = container.add(tags.div(_class='navbar-collapse collapse'))
bar_left = bar.add(tags.ul(_class='nav navbar-nav navbar-left'))
for item in node.items:
if getattr(item, 'left', True):
bar_left.add(self.visit(item))
return root
| baliga-lab/pf1010-web | aqxWeb/nav.py | Python | lgpl-3.0 | 4,675 | [
"VisIt"
] | cfb5d2c8c3156794808e8ec0948474f997261fdddd35d89b90b1e9b4e9a2a15a |
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext as _
from koalixcrm.crm.contact.phone_address import PhoneAddress
from koalixcrm.crm.contact.email_address import EmailAddress
from koalixcrm.crm.contact.postal_address import PostalAddress
from koalixcrm.crm.contact.call import Call
from koalixcrm.crm.contact.person import *
from koalixcrm.crm.const.purpose import *
from koalixcrm.global_support_functions import xstr
from koalixcrm.crm.inlinemixin import LimitedAdminInlineMixin
class Contact(models.Model):
name = models.CharField(max_length=300,
verbose_name=_("Name"))
date_of_creation = models.DateTimeField(verbose_name=_("Created at"),
auto_now_add=True)
last_modification = models.DateTimeField(verbose_name=_("Last modified"),
auto_now=True)
last_modified_by = models.ForeignKey('auth.User', limit_choices_to={'is_staff': True},
blank=True,
verbose_name=_("Last modified by"),
editable=True)
class Meta:
app_label = "crm"
verbose_name = _('Contact')
verbose_name_plural = _('Contact')
def __str__(self):
return self.name
class PhoneAddressForContact(PhoneAddress):
purpose = models.CharField(verbose_name=_("Purpose"),
max_length=1,
choices=PURPOSESADDRESSINCUSTOMER)
person = models.ForeignKey(Contact)
class Meta:
app_label = "crm"
verbose_name = _('Phone Address For Contact')
verbose_name_plural = _('Phone Address For Contact')
def __str__(self):
return str(self.phone)
class EmailAddressForContact(EmailAddress):
purpose = models.CharField(verbose_name=_("Purpose"),
max_length=1,
choices=PURPOSESADDRESSINCUSTOMER)
person = models.ForeignKey(Contact)
class Meta:
app_label = "crm"
verbose_name = _('Email Address For Contact')
verbose_name_plural = _('Email Address For Contact')
def __str__(self):
return str(self.email)
class PostalAddressForContact(PostalAddress):
purpose = models.CharField(verbose_name=_("Purpose"),
max_length=1,
choices=PURPOSESADDRESSINCUSTOMER)
person = models.ForeignKey(Contact)
class Meta:
app_label = "crm"
verbose_name = _('Postal Address For Contact')
verbose_name_plural = _('Postal Address For Contact')
def __str__(self):
return xstr(self.pre_name) + ' ' + xstr(self.name) + ' ' + xstr(self.address_line_1)
class ContactPostalAddress(admin.StackedInline):
model = PostalAddressForContact
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('prefix',
'pre_name',
'name',
'address_line_1',
'address_line_2',
'address_line_3',
'address_line_4',
'zip_code',
'town',
'state',
'country',
'purpose')
}),
)
allow_add = True
class ContactPhoneAddress(admin.TabularInline):
model = PhoneAddressForContact
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('phone', 'purpose',)
}),
)
allow_add = True
class ContactEmailAddress(admin.TabularInline):
model = EmailAddressForContact
extra = 1
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': ('email', 'purpose',)
}),
)
allow_add = True
class ContactPersonAssociation(models.Model):
contact = models.ForeignKey(Contact, related_name='person_association', blank=True, null=True)
person = models.ForeignKey(Person, related_name='contact_association', blank=True, null=True)
class Meta:
app_label = "crm"
verbose_name = _('Contacts')
verbose_name_plural = _('Contacts')
def __str__(self):
return ''
class PeopleInlineAdmin(admin.TabularInline):
model = ContactPersonAssociation
extra = 0
show_change_link = True
class CompaniesInlineAdmin(admin.TabularInline):
model = ContactPersonAssociation
extra = 0
show_change_link = True
class OptionPerson(admin.ModelAdmin):
list_display = ('id',
'name',
'pre_name',
'email',
'role',
'get_companies',)
fieldsets = (('', {'fields': ('prefix',
'name',
'pre_name',
'role',
'email',
'phone',)}),)
allow_add = True
inlines = [CompaniesInlineAdmin]
def get_companies(self, obj):
items = []
for c in obj.companies.all():
items.append(c.name)
return ','.join(items)
get_companies.short_description = _("Works at")
class CallForContact(Call):
company = models.ForeignKey(Contact)
cperson = models.ForeignKey(Person, verbose_name=_("Person"),
blank=True,
null=True)
purpose = models.CharField(verbose_name=_("Purpose"),
max_length=1,
choices=PURPOSECALLINCUSTOMER)
class Meta:
app_label = "crm"
verbose_name = _('Call')
verbose_name_plural = _('Calls')
def __str__(self):
return xstr(self.description) + ' ' + xstr(self.date_due)
class VisitForContact(Call):
company = models.ForeignKey(Contact)
cperson = models.ForeignKey(Person,
verbose_name=_("Person"),
blank=True,
null=True)
purpose = models.CharField(verbose_name=_("Purpose"),
max_length=1,
choices=PURPOSEVISITINCUSTOMER)
ref_call = models.ForeignKey(CallForContact,
verbose_name=_("Reference Call"),
blank=True,
null=True)
class Meta:
app_label = "crm"
verbose_name = _('Visit')
verbose_name_plural = _('Visits')
def __str__(self):
return xstr(self.description) + ' ' + xstr(self.date_due)
class ContactCall(LimitedAdminInlineMixin, admin.StackedInline):
model = CallForContact
extra = 0
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': (
'description',
'date_due',
'purpose',
'status',
'cperson',)
}),
)
allow_add = True
def get_filters(self, request, obj):
return getattr(self, 'filters', ()) if obj is None else (('cperson', dict(companies=obj.id)),)
class ContactVisit(LimitedAdminInlineMixin, admin.StackedInline):
model = VisitForContact
extra = 0
classes = ['collapse']
fieldsets = (
('Basics', {
'fields': (
'description',
'date_due',
'purpose',
'status',
'cperson',
'ref_call',)
}),
)
allow_add = True
def get_filters(self, request, obj):
return getattr(self, 'filters', ()) if obj is None else (('cperson', dict(companies=obj.id)),('ref_call', dict(company=obj.id, status='S')))
class StateFilter(admin.SimpleListFilter):
title = _('State')
parameter_name = 'state'
def lookups(self, request, model_admin):
items = []
for a in PostalAddressForContact.objects.values('state').distinct():
if a['state']:
items.append((a['state'], _(a['state'])))
return (
items
)
def queryset(self, request, queryset):
if self.value():
matching_addresses = PostalAddressForContact.objects.filter(state=self.value())
ids = [a.person.id for a in matching_addresses]
return queryset.filter(pk__in=ids)
return queryset
class CityFilter(admin.SimpleListFilter):
title = _('City')
parameter_name = 'city'
def lookups(self, request, model_admin):
items = []
state = request.GET.get('state', None)
unique_list = PostalAddressForContact.objects.all().order_by('town')
adjusted_queryset = unique_list if state is None else unique_list.filter(state=state)
for a in adjusted_queryset.values('town').distinct():
if a['town']:
items.append((a['town'], _(a['town'])))
return (
items
)
def queryset(self, request, queryset):
if self.value():
matching_addresses = PostalAddressForContact.objects.filter(town=self.value())
ids = [(a.person.id) for a in matching_addresses]
return queryset.filter(pk__in=ids)
return queryset
| scaphilo/koalixcrm | koalixcrm/crm/contact/contact.py | Python | bsd-3-clause | 9,499 | [
"VisIt"
] | 5770b153a8bf8a2610cb0b1bfc7512fefc4aa6d017da55351af0ffa896d27e3b |
"""
Utilities for downloading or generating datasets, splitting data, and computing accuracy metrics.
"""
import os
import numpy as np
import scipy.stats as st
import requests
from io import BytesIO
from equadratures.scalers import scaler_minmax
def gen_linear(n_observations=100, n_dim=5, n_relevent=5,bias=0.0, noise=0.0, random_seed=None):
""" Generate a synthetic linear dataset for regression.
Data is generated using a random linear regression model with ``n_relevent`` input dimensions.
The remaining dimensions are "irrelevent" noise i.e. they do not affect the output.
Gaussian noise with standard deviation ``noise`` is added.
Parameters
----------
n_observations : int, optional
The number of observations (samples).
n_dim : int, optional
The total number of dimensions.
n_relevent : int, optional
The number of relevent input dimensions, i.e., the number of features used to build the linear model used to generate the output.
bias : float, optional
The bias term in the underlying linear model.
noise : float, optional
The standard deviation of the gaussian noise applied to the output.
random_seed : int, optional
Random number generator seed.
Returns
-------
tuple
Tuple (X,y) containing two numpy.ndarray's; One with shape (n_observations,n_dim) containing the inputs,
and one with shape (n_observations,1) containing the outputs/targets.
"""
# Generate input data
n_relevent = min(n_dim, n_relevent)
if np.__version__ >= '1.17':
generator = np.random.default_rng(random_seed)
else:
generator = np.random.RandomState(random_seed)
X = generator.standard_normal((n_observations,n_dim))
X = scaler_minmax().transform(X)
# Generate the truth model with n_relevent input dimensions
truth_model = np.zeros((n_dim, 1))
# truth_model[:n_relevent, :] = generator.standard_normal((n_relevent,1))
truth_model[:n_relevent, :] = generator.uniform(-1,1,n_relevent).reshape(-1,1)
y = scaler_minmax().transform(np.dot(X, truth_model)) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
return X, y
def gen_friedman(n_observations=100, n_dim=5, noise=0.0, random_seed=None,normalise=False):
""" Generates the friedman regression problem described by Friedman [1] and Breiman [2].
Inspired by :obj:`sklearn.datasets.make_friedman1`. The function has ``n_dim=5``, and choosing ``n_dim>5`` adds irrelevent input dimensions.
Gaussian noise with standard deviation ``noise`` is added.
Parameters
----------
n_observations : int, optional
The number of observations (samples).
n_dim : int, optional
The total number of dimensions. n_dim>=5, with n_dim>5 adding irrelevent input dimensions.
noise : float, optional
The standard deviation of the gaussian noise applied to the output.
random_seed : int, optional
Random number generator seed.
normalise : bool, optional
Normalise y to lie between -1 to 1.
Returns
-------
tuple
Tuple (X,y) containing two numpy.ndarray's; One with shape (n_observations,n_dim) containing the inputs,
and one with shape (n_observations,1) containing the outputs/targets.
References
----------
1. J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991.
2. L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996.
"""
if n_dim < 5:
raise ValueError("n_dim must be at least five.")
if np.__version__ >= '1.17':
generator = np.random.default_rng(random_seed)
else:
generator = np.random.RandomState(random_seed)
X = generator.standard_normal((n_observations,n_dim))
X = scaler_minmax().transform(X)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4]
y+= noise*np.std(y)*generator.standard_normal(n_observations)
if normalise:
y = scaler_minmax().transform(y.reshape(-1,1))
return X,y
def load_eq_dataset(dataset,data_dir=None,verbose=True):
"""
Loads the requested dataset from the `equadratures dataset repository <https://github.com/Effective-Quadratures/data-sets>`__.
Visit the aforementioned repo for a description of the available datasets.
The requested dataset can either be downloaded directly upon request, or to minimise downloads the repo can be cloned
once by the user, and the local repo directory can be given via ``data_dir`` (see examples).
Parameters
----------
dataset : str
The dataset to download. Options are ```naca0012```, ```blade_envelopes```, ```probes```, ```3Dfan_blades```.
data_dir : str, optional
Directory name where a local clone of the data-sets repo is located. If given, the dataset will be loaded from here
instead of downloading from the remote repo.
verbose: bool, optional
Option to print verbose messages to screen.
Returns
-------
NpzFile
NpzFile instance (see `numpy.lib.format <https://numpy.org/devdocs/reference/generated/numpy.lib.format.html#module-numpy.lib.format>`__)
containing the dataset. Contents can be accessed in the usual way e.g. ``X = NpzFile['X']``.
Examples
--------
Loading from remote repository
>>> # Load the naca0012 aerofoil dataset
>>> data = eq.datasets.load_eq_dataset('naca0012')
>>> print(data.files)
['X', 'Cp', 'Cl', 'Cd']
>>> X = data['X']
>>> y = data['Cp']
Loading from a locally cloned repository
>>> git clone https://github.com/Effective-Quadratures/data-sets.git
>>> data = eq.datasets.load_eq_dataset('naca0012', data_dir='/Users/user/Documents/data-sets')
"""
# Check if valid dataset
datasets = ['naca0012','blade_envelopes','probes', '3Dfan_blades']
if dataset not in datasets:
raise ValueError('dataset specified in load_eq_dataset not recognised, avaiable datasets: ', datasets)
# Download from github
if data_dir is None:
print('Downloading the ' + dataset + ' dataset from github...')
# .npz file
git_url = os.path.join('https://github.com/Effective-Quadratures/data-sets/raw/main/',dataset,dataset+'.npz')
try:
r = requests.get(git_url,stream=True)
r.raise_for_status()
data = np.load(BytesIO(r.raw.read()))
except requests.exceptions.RequestException as e:
raise SystemExit(e)
# .md file
git_url = os.path.join('https://raw.githubusercontent.com/Effective-Quadratures/data-sets/main',dataset,'README.md')
try:
r = requests.get(git_url)
r.raise_for_status()
if verbose: print('\n',r.text)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
# If the user has cloned the data-sets repo and provided its location in data_dir
else:
print('Loading the dataset from ', data_dir)
data = np.load(os.path.join(data_dir,dataset,dataset+'.npz'))
f = open(os.path.join(data_dir,dataset,'README.md'))
if verbose: print(f.read())
return data
def train_test_split(X,y,train=0.7,random_seed=None,shuffle=True):
""" Split arrays or matrices into random train and test subsets.
Inspired by :obj:`sklearn.model_selection.train_test_split`.
Parameters
----------
X : numpy.ndarray
Array with shape (n_observations,n_dim) containing the inputs.
y : numpy.ndarray
Array with shape (n_observations,1) containing the outputs/targets.
train : float, optional
Fraction between 0.0 and 1.0, representing the proportion of the dataset to include in the train split.
random_seed : int, optional
Seed for random number generator.
shuffle : bool, optional
Whether to shuffle the rows of data when spliting.
Returns
-------
tuple
Tuple (X_train, X_test, y_train, y_test) containing the split data, output as numpy.ndarray's.
Example
-------
>>> X_train, X_test, y_train, y_test = eq.datasets.train_test_split(X, y,
>>> train=0.8, random_seed = 42)
"""
if X.shape[0] == y.shape[0]:
n_observations = X.shape[0]
else:
raise ValueError("X and y have different numbers of rows")
if train > 0 or train < 1:
n_train = int(train*n_observations)
n_test = n_observations - n_train
else:
raise ValueError("train should be between 0 and 1")
if shuffle:
if np.__version__ >= '1.17':
generator = np.random.default_rng(random_seed)
else:
generator = np.random.RandomState(random_seed)
idx = generator.permutation(n_observations)
else:
idx = np.arange(n_observations)
idx_train, idx_test = idx[:n_train], idx[n_train:]
return X[idx_train], X[idx_test], y[idx_train], y[idx_test]
def score(y_true,y_pred,metric='r2',X=None):
""" Evaluates the accuracy/error score between predictions and the truth, according to the given accuracy metric.
Parameters
----------
y_pred : numpy.ndarray
Array with shape (number_of_observations, 1), containing predictions.
y_true : numpy.ndarray
Array with shape (number_of_observations, 1) containing the true data.
metric : str, optional
The scoring metric to use. Avaliable options are: ```adjusted_r2```, ```r2```, ```mae```, ```rmse```, or ```normalised_mae```.
X : numpy.ndarray
The input data associated with **y_pred**. Required if ``metric=`adjusted_r2```.
Returns
-------
float
The accuracy or error score.
"""
y_true = y_true.flatten()
y_pred = y_pred.flatten()
if metric == 'r2':
score = st.linregress(y_true,y_pred)[2]**2
elif metric == 'adjusted_r2':
if X is None: raise ValueError('Must specify X in _score if adjusted_r2 metric used')
N,d = X.shape
r2 = st.linregress(y_true,y_pred)[2]**2
score = 1.0 - (((1-r2)*(N-1))/(N-d-1))
elif metric == 'mae':
score = np.mean(np.abs(y_true-y_pred))
elif metric == 'normalised_mae':
score = np.mean(np.abs(y_true-y_pred))/np.std(y_true)
elif metric == 'rmse':
score = np.sqrt(np.mean((y_true-y_pred)**2))
else:
raise ValueError('Only r2, adjusted_r2, mae, normalised_mae, rmse scoring metrics currently supported')
return score
| Effective-Quadratures/Effective-Quadratures | equadratures/datasets.py | Python | lgpl-2.1 | 10,747 | [
"Gaussian",
"VisIt"
] | 425e35ba940240977fb1d6e865c5e1d74e8e3c316465b1e2d824f62a2b81088a |
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
# This example illustrates how to set up a Turing pattern in 1-D using
# reaction diffusion calculations. The runtime is kept short so that the
# pattern doesn't make it all the way to the end of the system.
import math
import pylab
import numpy
import moose
import proto18
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
model = moose.element( '/model' )
cellId = moose.loadModel( 'ca1_asym.p', '/model/elec', "Neutral" )
return cellId
def makeChemModel( cellId ):
# create container for model
r0 = 1e-6 # m
r1 = 1e-6 # m
num = 2800
diffLength = 1e-6 # m
diffConst = 5e-12 # m^2/sec
motorRate = 1e-6 # m/sec
concA = 1 # millimolar
model = moose.element( '/model' )
compartment = moose.NeuroMesh( '/model/compartment' )
compartment.cell = cellId
compartment.diffLength = diffLength
print "cell NeuroMesh parameters: numSeg and numDiffCompt: ", compartment.numSegments, compartment.numDiffCompts
print "compartment.numDiffCompts == num: ", compartment.numDiffCompts, num
assert( compartment.numDiffCompts == num )
# create molecules and reactions
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
s = moose.Pool( '/model/compartment/s' )
e1 = moose.MMenz( '/model/compartment/e1' )
e2 = moose.MMenz( '/model/compartment/e2' )
e3 = moose.MMenz( '/model/compartment/e3' )
r1 = moose.Reac( '/model/compartment/r1' )
moose.connect( e1, 'sub', s, 'reac' )
moose.connect( e1, 'prd', a, 'reac' )
moose.connect( a, 'nOut', e1, 'enzDest' )
e1.Km = 1
e1.kcat = 1
moose.connect( e2, 'sub', s, 'reac' )
moose.connect( e2, 'prd', b, 'reac' )
moose.connect( a, 'nOut', e2, 'enzDest' )
e2.Km = 1
e2.kcat = 0.5
moose.connect( e3, 'sub', a, 'reac' )
moose.connect( e3, 'prd', s, 'reac' )
moose.connect( b, 'nOut', e3, 'enzDest' )
e3.Km = 0.1
e3.kcat = 1
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'prd', s, 'reac' )
r1.Kf = 0.3 # 1/sec
r1.Kb = 0 # 1/sec
# Assign parameters
a.diffConst = diffConst/10
b.diffConst = diffConst
s.diffConst = 0
#b.motorConst = motorRate
# Make solvers
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/compartment/dsolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
#ksolve.numAllVoxels = compartment.numDiffCompts
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/compartment/##"
assert( dsolve.numPools == 3 )
a.vec.concInit = [0.1]*num
a.vec[0].concInit += 0.5
a.vec[400].concInit += 0.5
a.vec[800].concInit += 0.5
a.vec[1200].concInit += 0.5
a.vec[1600].concInit += 0.5
a.vec[2000].concInit += 0.5
a.vec[2400].concInit += 0.5
#a.vec[num/2].concInit -= 0.1
b.vec.concInit = [0.1]*num
s.vec.concInit = [1]*num
def displayPlots( num ):
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
print '/newplot\n/plotname a' + str(num)
for x in a.vec.conc:
print x
print '/newplot\n/plotname b' + str(num)
for y in b.vec.conc:
print y
"""
print '/newplot\n/plotname bvol'
for z in a.vec.volume:
print z * 1e18
print '/newplot\n/plotname aInit'
for x in a.vec.concInit:
print x
pos = numpy.arange( 0, a.vec.conc.size, 1.0 )
aconc = numpy.array( a.vec.conc )
bconc = numpy.array( b.vec.conc )
print "pos a, b = ", pos, b.vec.conc.size
pylab.plot( pos[:100], aconc[:100], label='a' )
pylab.plot( pos[:100], bconc[:100], label='b' )
#pylab.plot( pos, a.vec.conc, label='a' )
#pylab.plot( pos, b.vec.conc, label='b' )
print "plotting done"
pylab.legend()
print "legend done"
pylab.show()
print "show done"
"""
def main():
runtime = 400
dt4 = 0.02 # for the diffusion
dt5 = 0.2 # for the reaction
# Set up clocks. The dsolver to know before assigning stoich
moose.setClock( 4, dt4 )
moose.setClock( 5, dt5 )
model = moose.Neutral( '/model' )
cellId = loadElec()
makeChemModel( cellId )
moose.useClock( 4, '/model/compartment/dsolve', 'process' )
# Ksolve must be scheduled after dsolve.
moose.useClock( 5, '/model/compartment/ksolve', 'process' )
print "finished loading"
moose.reinit()
for i in range( 10 ):
moose.start( runtime / 10 ) # Run the model for 10 seconds.
# print 'done', i
displayPlots( i )
print "finished running"
"""
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
s = moose.element( '/model/compartment/s' )
atot = sum( a.vec.conc )
btot = sum( b.vec.conc )
stot = sum( s.vec.conc )
print "a = ", a.vec.conc
print "b = ", b.vec.conc
print "s = ", s.vec.conc
print 'tot = ', atot, btot, atot + btot + stot
displayPlots()
"""
"""
dsolve = moose.element( '/model/compartment/dsolve' )
print '**************** dsolve.nvecs'
x = dsolve.nVec[0]
print dsolve.numPools, x, sum(x)
print dsolve.nVec[1], sum( dsolve.nVec[1] )
print dsolve.nVec[2], sum( dsolve.nVec[2] )
print dsolve.nVec[3], sum( dsolve.nVec[3] )
"""
quit()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| dilawar/moose-full | moose-examples/snippets/MULTI/TuringInNeuron.py | Python | gpl-2.0 | 7,210 | [
"MOOSE"
] | 80ed190b4a5932c6a6abf301f4a44a688f7c86cef2c786e9cbae01e06fe00e5d |
"""
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
import bobo
import bobo.db
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to the Alexa Skills Kit sample. " \
"Please tell me your favorite color by saying, " \
"my favorite color is red"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Please tell me your favorite color by saying, " \
"my favorite color is red."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Alexa Skills Kit sample. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
def create_favorite_color_attributes(favorite_color):
return {"favoriteColor": favorite_color}
def set_color_in_session(intent, session):
""" Sets the color in the session and prepares the speech to reply to the
user.
"""
card_title = intent['name']
session_attributes = {}
should_end_session = False
if 'Color' in intent['slots']:
favorite_color = intent['slots']['Color']['value']
session_attributes = create_favorite_color_attributes(favorite_color)
speech_output = "I now know your favorite color is " + \
favorite_color + \
". You can ask me your favorite color by saying, " \
"what's my favorite color?"
reprompt_text = "You can ask me your favorite color by saying, " \
"what's my favorite color?"
else:
speech_output = "I'm not sure what your favorite color is. " \
"Please try again."
reprompt_text = "I'm not sure what your favorite color is. " \
"You can tell me your favorite color by saying, " \
"my favorite color is red."
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_color_from_session(intent, session):
session_attributes = {}
reprompt_text = None
if session.get('attributes', {}) and "favoriteColor" in session.get('attributes', {}):
favorite_color = session['attributes']['favoriteColor']
speech_output = "Your favorite color is " + favorite_color + \
". Goodbye."
should_end_session = True
else:
speech_output = "I'm not sure what your favorite color is. " \
"You can say, my favorite color is red."
should_end_session = False
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session))
def intent_get_list(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
card_title, 'List is currently empty', 'List is currently empty. You can say, add coffee to the list.', should_end_session))
def intent_set_item(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
item = intent['slots']['Item']['value']
return build_response(session_attributes, build_speechlet_response(
card_title, '{} added'.format(item), '{} added. If you want to add another, say add coffee to the list.'.format(item), should_end_session))
# --------------- Events ------------------
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "GetList":
return intent_get_list(intent, session)
elif intent_name == "SetItem":
return intent_set_item(intent, session)
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
| greginvm/groceries-bot | alexa/alexa.py | Python | mit | 8,284 | [
"VisIt"
] | f51cd51042aedafa913af8927f67a5824ee13fa01b4ede87c8d61b9c158875a0 |
import os
import glob
import mdtraj as md
class Everything(object):
def __contains__(self, other):
return True
run_whitelist = [0, 102, 104, 107, 122, 134, 139, 143, 144, 151, 155, 184, 1, 23, 2, 33, 35, 37, 38, 39, 43, 58, 60, 62, 65, 70, 71, 73, 74, 77, 79, 83, 87, 99]
#run_whitelist = Everything()
source_dir = "./nowater/"
out_dir = "./concat_apr30_2/"
stride = 2
top = md.load("./system.subset.pdb")
start_files = glob.glob(source_dir + "/run*-clone*-frame-000.xtc")
for filename in start_files:
print(filename)
filename = filename.split("/")[2]
print(filename)
run = int(filename.split("-")[0][3:])
if run not in run_whitelist:
continue
clone = int(filename.split("-")[1][5:])
num_gens = len(glob.glob(source_dir + "/run%d-clone%d-frame-*.xtc" % (run, clone)))
print(run, clone, num_gens)
filenames = [source_dir + "/run%d-clone%d-frame-%.3d.xtc" % (run, clone, gen) for gen in range(num_gens)]
out_filename = out_dir + "/run%d-clone%d.h5" % (run, clone)
if not os.path.exists(out_filename):
trj = md.load(filenames, top=top, stride=stride)
trj.save(out_filename)
| hainm/MSMs | attic/src/code/fahprocessing/concat.py | Python | gpl-2.0 | 1,157 | [
"MDTraj"
] | c4b3445ad0053ec45018f50990aa3eb20e5096fcafbc62f91e2294ed13dc8bf3 |
#!/usr/bin/env python
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os, sys, argparse
def writeDOX(args, writefile):
args.file_object.write("\n/** @example " + writefile + "\n */")
def createDOX(args):
file_dict = os.walk(args.application)
for dir_name, dir_list, file_list in file_dict:
for single_file in file_list:
if os.path.splitext(dir_name + '/' + single_file)[1] == '.i':
writeDOX(args, single_file)
args.file_object.close()
print 'Wrote to file:', args.application + '/' + args.application.split('/').pop() + '.dox'
def _verifyARGs(args):
if os.path.exists(args.application):
args.file_object = open(args.application + '/' + args.application.split('/').pop() + '.dox', 'w')
return args
else:
print 'Path not found:', args.application
sys.exit(1)
def _parseARGs(args=None):
parser = argparse.ArgumentParser(description='Build dox file for every input file. (Deletes DOX file if present)')
parser.add_argument('--application', required=True, help='Path to application')
return _verifyARGs(parser.parse_args(args))
if __name__ == '__main__':
createDOX(_parseARGs())
| nuclear-wizard/moose | framework/doc/doxygen/create_dox.py | Python | lgpl-2.1 | 1,459 | [
"MOOSE"
] | 6d7493fee84650c1d1f98f876b8d5dc5cca56ad0c48c99afa34416a8514da1f4 |
# FreeCAD init script of the Fem module
# (c) 2001 Juergen Riegel
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
import FreeCAD
FreeCAD.addExportType("TetGen file (*.poly)", "convert2TetGen")
FreeCAD.addImportType("FEM formats (*.unv *.med *.dat *.bdf)", "Fem")
if("BUILD_FEM_VTK" in FreeCAD.__cmake__):
FreeCAD.addImportType("FEM results (*.vtk *.vtp *.vts *.vtr *.vtu *.vti)", "Fem")
FreeCAD.addExportType("FEM formats (*.unv *.med *.dat *.inp)", "Fem")
FreeCAD.addImportType("CalculiX result (*.frd)", "ccxFrdReader")
FreeCAD.addImportType("Abaqus file (*.inp)", "FemGui")
FreeCAD.addImportType("Z88 mesh file (*.txt)", "importZ88Mesh")
FreeCAD.addExportType("Z88 mesh file (*.txt)", "importZ88Mesh")
FreeCAD.addImportType("Z88 displacement result file (*.txt)", "z88DispReader")
| timthelion/FreeCAD | src/Mod/Fem/Init.py | Python | lgpl-2.1 | 2,560 | [
"VTK"
] | dbcad54e820ef5ccde90d744fd6aec0137734f9cc315f40433024b618411682d |
from numpy import *;
import kNN
import numpy as np
import matplotlib.pyplot as plt
from mayavi import mlab
'''
@author: Michael Wan
@since: 2014-11-01
'''
def etsIntro():
x, y = np.ogrid[-2:2:20j, -2:2:20j]
z = x * np.exp( - x**2 - y**2)
pl = mlab.surf(x, y, z, warp_scale="auto")
mlab.axes(xlabel='x', ylabel='y', zlabel='z')
mlab.outline(pl)
etsIntro()
group, labels = kNN.createDatSet()
print(group)
print(labels)
print(group.shape)
#michael test classify
#training data :
# group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
# labels = ['A','A','B','B']
def classifyTest():
print(kNN.classify0([0,0], group, labels, 3))
print(kNN.classify0([1,0], group, labels, 3))
print(kNN.classify0([2,0], group, labels, 3))
print(kNN.classify0([3,0], group, labels, 3))
print(kNN.classify0([0,0.1], group, labels, 3))
print(kNN.classify0([1,0.1], group, labels, 3))
print(kNN.classify0([2,0.1], group, labels, 3))
print(kNN.classify0([3,0.1], group, labels, 3))
print(kNN.classify0([0,1], group, labels, 3))
print(kNN.classify0([1,1], group, labels, 3))
print(kNN.classify0([2,1], group, labels, 3))
print(kNN.classify0([3,1], group, labels, 3))
print(kNN.classify0([0,2], group, labels, 3))
print(kNN.classify0([1,3], group, labels, 3))
print(kNN.classify0([2,4], group, labels, 3))
print(kNN.classify0([3,5], group, labels, 3))
print(kNN.classify0([0,0.01], group, labels, 3))
print(kNN.classify0([1,0.01], group, labels, 3))
print(kNN.classify0([2,0.01], group, labels, 3))
print(kNN.classify0([3,0.01], group, labels, 3))
classifyTest();
datingDataMat, datingLabels = kNN.file2matrix('datingTestSet2.txt')
print('-------datingDataMat-------------')
print(datingDataMat)
print('-------datingLabels-------------')
print(datingLabels)
print('-------datingLabels[0:20]-------------')
print(datingLabels)
print('-------datingLabels[0:-1]-------------')
print(datingLabels)
print(random.rand(4, 4));
# #matplotlib start
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2])
ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2],
15.0 * array(datingLabels), 15.0 * array(datingLabels))
plt.show()
kNN.datingClassTest()
kNN.classifyPerson()
kNN.handwritingClassTest2()
print('-----------kNN.handwritingClassTest2()----------')
print('-----------to see the same training data and test data will still have some incorrect rate.----------')
#to see the same training data and test data will still have some incorrect rate.
kNN.handwritingClassTest2()
| onehao/opensource | pyml/inaction/ch02/knn/mydebug.py | Python | apache-2.0 | 2,717 | [
"Mayavi"
] | 8101b6d383e19294dc42a796fca8413082fbaf503eb2cac941ca5209758d7007 |
"""
Signal
======
The signal module constains all kinds of signal processing related functions.
.. inheritance-diagram:: acoustics.signal
Filtering
*********
.. autoclass:: Filterbank
.. autofunction:: bandpass_filter
.. autofunction:: octave_filter
.. autofunction:: bandpass
.. autofunction:: lowpass
.. autofunction:: highpass
.. autofunction:: octavepass
.. autofunction:: convolve
Windowing
*********
.. autofunction:: window_scaling_factor
.. autofunction:: apply_window
Spectra
*******
Different types of spectra exist.
.. autofunction:: amplitude_spectrum
.. autofunction:: auto_spectrum
.. autofunction:: power_spectrum
.. autofunction:: density_spectrum
.. autofunction:: angle_spectrum
.. autofunction:: phase_spectrum
Frequency bands
***************
.. autoclass:: Band
.. autoclass:: Frequencies
.. autoclass:: EqualBand
.. autoclass:: OctaveBand
.. autofunction:: integrate_bands
.. autofunction:: octaves
.. autofunction:: third_octaves
Hilbert transform
*****************
.. autofunction:: amplitude_envelope
.. autofunction:: instantaneous_phase
.. autofunction:: instantaneous_frequency
Conversion
**********
.. autofunction:: decibel_to_neper
.. autofunction:: neper_to_decibel
Other
*****
.. autofunction:: isolate
.. autofunction:: zero_crossings
.. autofunction:: rms
.. autofunction:: ms
.. autofunction:: normalize
.. autofunction:: ir2fr
.. autofunction:: wvd
"""
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import spdiags
from scipy.signal import butter, lfilter, freqz, filtfilt, sosfilt
import acoustics.octave
#from acoustics.octave import REFERENCE
import acoustics.bands
from scipy.signal import hilbert
from acoustics.standards.iso_tr_25417_2007 import REFERENCE_PRESSURE
from acoustics.standards.iec_61672_1_2013 import (NOMINAL_OCTAVE_CENTER_FREQUENCIES,
NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES)
try:
from pyfftw.interfaces.numpy_fft import rfft
except ImportError:
from numpy.fft import rfft
def bandpass_filter(lowcut, highcut, fs, order=8, output='sos'):
"""Band-pass filter.
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param output: Output type. {'ba', 'zpk', 'sos'}. Default is 'sos'. See also :func:`scipy.signal.butter`.
:returns: Returned value depends on `output`.
A Butterworth filter is used.
.. seealso:: :func:`scipy.signal.butter`.
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
output = butter(order/2, [low, high], btype='band', output=output)
return output
def bandpass(signal, lowcut, highcut, fs, order=8, zero_phase=False):
"""Filter signal with band-pass filter.
:param signal: Signal
:param lowcut: Lower cut-off frequency
:param highcut: Upper cut-off frequency
:param fs: Sample frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`bandpass_filter` for the filter that is used.
"""
sos = bandpass_filter(lowcut, highcut, fs, order, output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def lowpass(signal, cutoff, fs, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param signal: Signal
:param fs: Sample frequency
:param cutoff: Cut-off frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`scipy.signal.butter`.
"""
sos = butter(order, cutoff/(fs/2.0), btype='low', output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def highpass(signal, cutoff, fs, order=4, zero_phase=False):
"""Filter signal with low-pass filter.
:param signal: Signal
:param fs: Sample frequency
:param cutoff: Cut-off frequency
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`scipy.signal.butter`.
"""
sos = butter(order, cutoff/(fs/2.0), btype='high', output='sos')
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def octave_filter(center, fs, fraction, order=8, output='sos'):
"""Fractional-octave band-pass filter.
:param center: Centerfrequency of fractional-octave band.
:param fs: Sample frequency
:param fraction: Fraction of fractional-octave band.
:param order: Filter order
:param output: Output type. {'ba', 'zpk', 'sos'}. Default is 'sos'. See also :func:`scipy.signal.butter`.
A Butterworth filter is used.
.. seealso:: :func:`bandpass_filter`
"""
ob = OctaveBand(center=center, fraction=fraction)
return bandpass_filter(ob.lower[0], ob.upper[0], fs, order, output=output)
def octavepass(signal, center, fs, fraction, order=8, zero_phase=True):
"""Filter signal with fractional-octave bandpass filter.
:param signal: Signal
:param center: Centerfrequency of fractional-octave band.
:param fs: Sample frequency
:param fraction: Fraction of fractional-octave band.
:param order: Filter order
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
A Butterworth filter is used. Filtering is done with second-order sections.
.. seealso:: :func:`octave_filter`
"""
sos = octave_filter(center, fs, fraction, order)
if zero_phase:
return _sosfiltfilt(sos, signal)
else:
return sosfilt(sos, signal)
def convolve(signal, ltv, mode='full'):
"""
Perform convolution of signal with linear time-variant system ``ltv``.
:param signal: Vector representing input signal :math:`u`.
:param ltv: 2D array where each column represents an impulse response
:param mode: 'full', 'valid', or 'same'. See :func:`np.convolve` for an explanation of the options.
The convolution of two sequences is given by
.. math:: \mathbf{y} = \mathbf{t} \\star \mathbf{u}
This can be written as a matrix-vector multiplication
.. math:: \mathbf{y} = \mathbf{T} \\cdot \mathbf{u}
where :math:`T` is a Toeplitz matrix in which each column represents an impulse response.
In the case of a linear time-invariant (LTI) system, each column represents a time-shifted copy of the first column.
In the time-variant case (LTV), every column can contain a unique impulse response, both in values as in size.
This function assumes all impulse responses are of the same size.
The input matrix ``ltv`` thus represents the non-shifted version of the Toeplitz matrix.
.. seealso:: :func:`np.convolve`, :func:`scipy.signal.convolve` and :func:`scipy.signal.fftconvolve` for convolution with LTI system.
"""
assert(len(signal) == ltv.shape[1])
n = ltv.shape[0] + len(signal) - 1 # Length of output vector
un = np.concatenate((signal, np.zeros(ltv.shape[0] - 1))) # Resize input vector
offsets = np.arange(0, -ltv.shape[0], -1) # Offsets for impulse responses
Cs = spdiags(ltv, offsets, n, n) # Sparse representation of IR's.
out = Cs.dot(un) # Calculate dot product.
if mode=='full':
return out
elif mode=='same':
start = ltv.shape[0]/2 - 1 + ltv.shape[0]%2
stop = len(signal) + ltv.shape[0]/2 - 1 + ltv.shape[0]%2
return out[start:stop]
elif mode=='valid':
length = len(signal) - ltv.shape[0]
start = ltv.shape[0] - 1
stop = len(signal)
return out[start:stop]
def ir2fr(ir, fs, N=None):
"""
Convert impulse response into frequency response. Returns single-sided RMS spectrum.
:param ir: Impulser response
:param fs: Sample frequency
:param N: Blocks
Calculates the positive frequencies using :func:`np.fft.rfft`.
Corrections are then applied to obtain the single-sided spectrum.
.. note:: Single-sided spectrum. Therefore, the amount of bins returned is either N/2 or N/2+1.
"""
#ir = ir - np.mean(ir) # Remove DC component.
N = N if N else ir.shape[-1]
fr = rfft(ir, n=N) / N
f = np.fft.rfftfreq(N, 1.0/fs) #/ 2.0
fr *= 2.0
fr[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
fr[..., -1] /= 2.0 # And neither should fs/2 be.
#f = np.arange(0, N/2+1)*(fs/N)
return f, fr
def decibel_to_neper(decibel):
"""
Convert decibel to neper.
:param decibel: Value in decibel (dB).
:returns: Value in neper (Np).
The conversion is done according to
.. math :: \\mathrm{dB} = \\frac{\\log{10}}{20} \\mathrm{Np}
"""
return np.log(10.0) / 20.0 * decibel
def neper_to_decibel(neper):
"""
Convert neper to decibel.
:param neper: Value in neper (Np).
:returns: Value in decibel (dB).
The conversion is done according to
.. math :: \\mathrm{Np} = \\frac{20}{\\log{10}} \\mathrm{dB}
"""
return 20.0 / np.log(10.0) * neper
class Frequencies(object):
"""
Object describing frequency bands.
"""
def __init__(self, center, lower, upper, bandwidth=None):
self.center = np.asarray(center)
"""
Center frequencies.
"""
self.lower = np.asarray(lower)
"""
Lower frequencies.
"""
self.upper = np.asarray(upper)
"""
Upper frequencies.
"""
self.bandwidth = np.asarray(bandwidth) if bandwidth is not None else np.asarray(self.upper) - np.asarray(self.lower)
"""
Bandwidth.
"""
def __iter__(self):
for i in range(len(self.center)):
yield self[i]
def __len__(self):
return len(self.center)
def __str__(self):
return str(self.center)
def __repr__(self):
return "Frequencies({})".format(str(self.center))
def angular(self):
"""Angular center frequency in radians per second.
"""
return 2.0 * np.pi * self.center
class EqualBand(Frequencies):
"""
Equal bandwidth spectrum. Generally used for narrowband data.
"""
def __init__(self, center=None, fstart=None, fstop=None, nbands=None, bandwidth=None):
"""
:param center: Vector of center frequencies.
:param fstart: First center frequency.
:param fstop: Last center frequency.
:param nbands: Amount of frequency bands.
:param bandwidth: Bandwidth of bands.
"""
if center is not None:
try:
nbands = len(center)
except TypeError:
center = [center]
nbands = 1
u = np.unique(np.diff(center).round(decimals=3))
n = len(u)
if n == 1:
bandwidth = u
elif n > 1:
raise ValueError("Given center frequencies are not equally spaced.")
else:
pass
fstart = center[0] #- bandwidth/2.0
fstop = center[-1] #+ bandwidth/2.0
elif fstart is not None and fstop is not None and nbands:
bandwidth = (fstop - fstart) / (nbands-1)
elif fstart is not None and fstop is not None and bandwidth:
nbands = round((fstop - fstart) / bandwidth) + 1
elif fstart is not None and bandwidth and nbands:
fstop = fstart + nbands * bandwidth
elif fstop is not None and bandwidth and nbands:
fstart = fstop - (nbands-1) * bandwidth
else:
raise ValueError("Insufficient parameters. Cannot determine fstart, fstop, bandwidth.")
center = fstart + np.arange(0, nbands) * bandwidth # + bandwidth/2.0
upper = fstart + np.arange(0, nbands) * bandwidth + bandwidth/2.0
lower = fstart + np.arange(0, nbands) * bandwidth - bandwidth/2.0
super(EqualBand, self).__init__(center, lower, upper, bandwidth)
def __getitem__(self, key):
return type(self)(center=self.center[key], bandwidth=self.bandwidth)
def __repr__(self):
return "EqualBand({})".format(str(self.center))
class OctaveBand(Frequencies):
"""Fractional-octave band spectrum.
"""
def __init__(self, center=None, fstart=None, fstop=None, nbands=None, fraction=1, reference=acoustics.octave.REFERENCE):
if center is not None:
try:
nbands = len(center)
except TypeError:
center = [center]
center = np.asarray(center)
indices = acoustics.octave.index_of_frequency(center, fraction=fraction, ref=reference)
elif fstart is not None and fstop is not None:
nstart = acoustics.octave.index_of_frequency(fstart, fraction=fraction, ref=reference)
nstop = acoustics.octave.index_of_frequency(fstop, fraction=fraction, ref=reference)
indices = np.arange(nstart, nstop+1)
elif fstart is not None and nbands is not None:
nstart = acoustics.octave.index_of_frequency(fstart, fraction=fraction, ref=reference)
indices = np.arange(nstart, nstart+nbands)
elif fstop is not None and nbands is not None:
nstop = acoustics.octave.index_of_frequency(fstop, fraction=fraction, ref=reference)
indices = np.arange(nstop-nbands, nstop)
else:
raise ValueError("Insufficient parameters. Cannot determine fstart and/or fstop.")
center = acoustics.octave.exact_center_frequency(None, fraction=fraction, n=indices, ref=reference)
lower = acoustics.octave.lower_frequency(center, fraction=fraction)
upper = acoustics.octave.upper_frequency(center, fraction=fraction)
bandwidth = upper - lower
nominal = acoustics.octave.nominal_center_frequency(None, fraction, indices)
super(OctaveBand, self).__init__(center, lower, upper, bandwidth)
self.fraction = fraction
"""Fraction of fractional-octave filter.
"""
self.reference = reference
"""Reference center frequency.
"""
self.nominal = nominal
"""Nominal center frequencies.
"""
def __getitem__(self, key):
return type(self)(center=self.center[key], fraction=self.fraction, reference=self.reference)
def __repr__(self):
return "OctaveBand({})".format(str(self.center))
def ms(x):
"""Mean value of signal `x` squared.
:param x: Dynamic quantity.
:returns: Mean squared of `x`.
"""
return (np.abs(x)**2.0).mean()
def rms(x):
"""Root mean squared of signal `x`.
:param x: Dynamic quantity.
.. math:: x_{rms} = lim_{T \\to \\infty} \\sqrt{\\frac{1}{T} \int_0^T |f(x)|^2 \\mathrm{d} t }
:seealso: :func:`ms`.
"""
return np.sqrt(ms(x))
def normalize(y, x=None):
"""normalize power in y to a (standard normal) white noise signal.
Optionally normalize to power in signal `x`.
#The mean power of a Gaussian with :math:`\\mu=0` and :math:`\\sigma=1` is 1.
"""
#return y * np.sqrt( (np.abs(x)**2.0).mean() / (np.abs(y)**2.0).mean() )
if x is not None:
x = ms(x)
else:
x = 1.0
return y * np.sqrt( x / ms(y) )
#return y * np.sqrt( 1.0 / (np.abs(y)**2.0).mean() )
## Broken? Caused correlation in auralizations....weird!
def window_scaling_factor(window, axis=-1):
"""
Calculate window scaling factor.
:param window: Window.
When analysing broadband (filtered noise) signals it is common to normalize
the windowed signal so that it has the same power as the un-windowed one.
.. math:: S = \\sqrt{\\frac{\\sum_{i=0}^N w_i^2}{N}}
"""
return np.sqrt((window*window).mean(axis=axis))
def apply_window(x, window):
"""
Apply window to signal.
:param x: Instantaneous signal :math:`x(t)`.
:param window: Vector representing window.
:returns: Signal with window applied to it.
.. math:: x_s(t) = x(t) / S
where :math:`S` is the window scaling factor.
.. seealso:: :func:`window_scaling_factor`.
"""
s = window_scaling_factor(window) # Determine window scaling factor.
n = len(window)
windows = x//n # Amount of windows.
x = x[0:windows*n] # Truncate final part of signal that does not fit.
#x = x.reshape(-1, len(window)) # Reshape so we can apply window.
y = np.tile(window, windows)
return x * y / s
def amplitude_spectrum(x, fs, N=None):
"""
Amplitude spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The amplitude spectrum gives the amplitudes of the sinusoidal the signal is built
up from, and the RMS (root-mean-square) amplitudes can easily be found by dividing
these amplitudes with :math:`\\sqrt{2}`.
The amplitude spectrum is double-sided.
"""
N = N if N else x.shape[-1]
fr = np.fft.fft(x, n=N) / N
f = np.fft.fftfreq(N, 1.0/fs)
return np.fft.fftshift(f), np.fft.fftshift(fr, axes=[-1])
def auto_spectrum(x, fs, N=None):
"""
Auto-spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The auto-spectrum contains the squared amplitudes of the signal. Squared amplitudes
are used when presenting data as it is a measure of the power/energy in the signal.
.. math:: S_{xx} (f_n) = \\overline{X (f_n)} \\cdot X (f_n)
The auto-spectrum is double-sided.
"""
f, a = amplitude_spectrum(x, fs, N=N)
return f, (a*a.conj()).real
def power_spectrum(x, fs, N=None):
"""
Power spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
The power spectrum, or single-sided autospectrum, contains the squared RMS amplitudes of the signal.
A power spectrum is a spectrum with squared RMS values. The power spectrum is
calculated from the autospectrum of the signal.
.. warning:: Does not include scaling to reference value!
.. seealso:: :func:`auto_spectrum`
"""
N = N if N else x.shape[-1]
f, a = auto_spectrum(x, fs, N=N)
a = a[..., N//2:]
f = f[..., N//2:]
a *= 2.0
a[..., 0] /= 2.0 # DC component should not be doubled.
if not N%2: # if not uneven
a[..., -1] /= 2.0 # And neither should fs/2 be.
return f, a
def angle_spectrum(x, fs, N=None):
"""
Phase angle spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided wrapped phase angle spectrum.
.. seealso:: :func:`phase_spectrum` for unwrapped phase spectrum.
"""
N = N if N else x.shape[-1]
f, a = amplitude_spectrum(x, fs, N)
a = np.angle(a)
a = a[..., N//2:]
f = f[..., N//2:]
return f, a
def phase_spectrum(x, fs, N=None):
"""
Phase spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
This function returns a single-sided unwrapped phase spectrum.
.. seealso:: :func:`angle_spectrum` for wrapped phase angle.
"""
f, a = angle_spectrum(x, fs, N=None)
return f, np.unwrap(a)
#def power_and_phase_spectrum(x, fs, N=None):
#"""
#Power spectrum and phase of instantaneous signal :math:`x(t)`.
#:param x: Instantaneous signal :math:`x(t)`.
#:param fs: Sample frequency :math:`f_s`.
#:param N: Amount of FFT bins.
#Often one is interested in both the power spectrum and the phase. This function returns the power and a single-sided phase spectrum.
#For an explanation of the power spectrum, see :func:`power_spectrum`.
#"""
#returns f, power, phase
def density_spectrum(x, fs, N=None):
"""
Density spectrum of instantaneous signal :math:`x(t)`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency :math:`f_s`.
:param N: Amount of FFT bins.
A density spectrum considers the amplitudes per unit frequency.
Density spectra are used to compare spectra with different frequency resolution as the
magnitudes are not influenced by the resolution because it is per Hertz. The amplitude
spectra on the other hand depend on the chosen frequency resolution.
"""
N = N if N else x.shape[-1]
fr = np.fft.fft(x, n=N) / fs
f = np.fft.fftfreq(N, 1.0/fs)
return np.fft.fftshift(f), np.fft.fftshift(fr)
#def auto_density_spectrum(x, fs, N=None):
#"""
#Auto density spectrum of instantaneous signal :math:`x(t)`.
#"""
#f, d = density_spectrum(x, fs, N=N)
#return f, (d*d.conj()).real
#def power_density_spectrum(x, fs, N=None):
#"""
#Power density spectrum.
#"""
#N = N if N else x.shape[-1]
#f, a = auto_density_spectrum(x, fs, N=N)
#a = a[N//2:]
#f = f[N//2:]
#a *= 2.0
#a[..., 0] /= 2.0 # DC component should not be doubled.
#if not N%2: # if not uneven
#a[..., -1] /= 2.0 # And neither should fs/2 be.
#return f, a
def integrate_bands(data, a, b):
"""
Reduce frequency resolution of power spectrum. Merges frequency bands by integration.
:param data: Vector with narrowband powers.
:param a: Instance of :class:`Frequencies`.
:param b: Instance of :class:`Frequencies`.
.. note:: Needs rewriting so that the summation goes over axis=1.
"""
try:
if b.fraction%a.fraction:
raise NotImplementedError("Non-integer ratio of fractional-octaves are not supported.")
except AttributeError:
pass
lower, _ = np.meshgrid(b.lower, a.center)
upper, _ = np.meshgrid(b.upper, a.center)
_, center= np.meshgrid(b.center, a.center)
return ((lower < center) * (center <= upper) * data[...,None]).sum(axis=-2)
def bandpass_frequencies(x, fs, frequencies, order=8, purge=False, zero_phase=False):
""""Apply bandpass filters for frequencies
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies. Instance of :class:`Frequencies`.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
"""
if purge:
frequencies = frequencies[frequencies.upper < fs/2.0]
return frequencies, np.array([bandpass(x, band.lower, band.upper, fs, order, zero_phase=zero_phase) for band in frequencies])
def bandpass_octaves(x, fs, frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES, order=8, purge=False, zero_phase=False):
"""Apply 1/1-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
return bandpass_fractional_octaves(x, fs, frequencies, fraction=1, order=order, purge=purge, zero_phase=zero_phase)
def bandpass_third_octaves(x, fs, frequencies=NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES, order=8, purge=False, zero_phase=False):
"""Apply 1/3-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
return bandpass_fractional_octaves(x, fs, frequencies, fraction=3, order=order, purge=purge, zero_phase=zero_phase)
def bandpass_fractional_octaves(x, fs, frequencies, fraction=None, order=8, purge=False, zero_phase=False):
"""Apply 1/N-octave bandpass filters.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param frequencies: Frequencies. Either instance of :class:`OctaveBand`, or array along with fs.
:param order: Filter order.
:param purge: Discard bands of which the upper corner frequency is above the Nyquist frequency.
:param zero_phase: Prevent phase error by filtering in both directions (filtfilt)
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. seealso:: :func:`octavepass`
"""
if not isinstance(frequencies, Frequencies):
frequencies = OctaveBand(center=frequencies, fraction=fraction)
return bandpass_frequencies(x, fs, frequencies, order=order, purge=purge, zero_phase=zero_phase)
def third_octaves(p, fs, density=False,
frequencies=NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES,
ref=REFERENCE_PRESSURE):
"""Calculate level per 1/3-octave in frequency domain using the FFT.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. seealso:: :attr:`acoustics.bands.THIRD_OCTAVE_CENTER_FREQUENCIES`
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(center=frequencies, fraction=3)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power / ref**2.0)
return fob, level
def octaves(p, fs, density=False,
frequencies=NOMINAL_OCTAVE_CENTER_FREQUENCIES,
ref=REFERENCE_PRESSURE):
"""Calculate level per 1/1-octave in frequency domain using the FFT.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:param frequencies: Frequencies.
:param ref: Reference value.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. seealso:: :attr:`acoustics.bands.OCTAVE_CENTER_FREQUENCIES`
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(center=frequencies, fraction=1)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power / ref**2.0)
return fob, level
def fractional_octaves(p, fs, start=5.0, stop=16000.0, fraction=3, density=False):
"""Calculate level per 1/N-octave in frequency domain using the FFT. N is `fraction`.
:param x: Instantaneous signal :math:`x(t)`.
:param fs: Sample frequency.
:param density: Power density instead of power.
:returns: Tuple. First element is an instance of :class:`OctaveBand`. The second element an array.
.. note:: Based on power spectrum (FFT)
.. note:: This function does *not* use nominal center frequencies.
.. note:: Exact center frequencies are always calculated.
"""
fob = OctaveBand(fstart=start, fstop=stop, fraction=fraction)
f, p = power_spectrum(p, fs)
fnb = EqualBand(f)
power = integrate_bands(p, fnb, fob)
if density:
power /= (fob.bandwidth/fnb.bandwidth)
level = 10.0*np.log10(power)
return fob, level
class Filterbank(object):
"""
Fractional-Octave filter bank.
.. warning:: For high frequencies the filter coefficients are wrong for low frequencies. Therefore, to improve the response for lower frequencies the signal should be downsampled. Currently, there is no easy way to do so within the Filterbank.
"""
def __init__(self, frequencies, sample_frequency=44100, order=8):
self.frequencies = frequencies
"""
Frequencies object.
See also :class:`Frequencies` and subclasses.
.. note:: A frequencies object should have the attributes center, lower and upper.
"""
self.order = order
"""
Filter order of Butterworth filter.
"""
self.sample_frequency = sample_frequency
"""
Sample frequency.
"""
@property
def sample_frequency(self):
"""
Sample frequency.
"""
return self._sample_frequency
@sample_frequency.setter
def sample_frequency(self, x):
#if x <= self.center_frequencies.max():
#raise ValueError("Sample frequency cannot be lower than the highest center frequency.")
self._sample_frequency = x
@property
def filters(self):
"""
Filters this filterbank consists of.
"""
fs = self.sample_frequency
return ( bandpass_filter(lower, upper, fs, order=self.order, output='ba') for lower, upper in zip(self.frequencies.lower, self.frequencies.upper) )
#order = self.order
#filters = list()
#nyq = self.sample_frequency / 2.0
#return ( butter(order, [lower/nyq, upper/nyq], btype='band', analog=False) for lower, upper in zip(self.frequencies.lower, self.frequencies.upper) )
def lfilter(self, signal):
"""
Filter signal with filterbank.
.. note:: This function uses :func:`scipy.signal.lfilter`.
"""
return ( lfilter(b, a, signal) for b, a in self.filters )
def filtfilt(self, signal):
"""
Filter signal with filterbank.
Returns a list consisting of a filtered signal per filter.
.. note:: This function uses :func:`scipy.signal.filtfilt` and therefore has a zero-phase response.
"""
return ( filtfilt(b, a, signal) for b, a in self.filters )
def power(self, signal):
"""
Power per band in signal.
"""
filtered = self.filtfilt(signal)
return np.array([(x**2.0).sum()/len(x) / bw for x, bw in zip(filtered, self.frequencies.bandwidth)])
def plot_response(self):
"""
Plot frequency response.
.. note:: The follow phase response is obtained in case :meth:`lfilter` is used. The method :meth:`filtfilt` results in a zero-phase response.
"""
fs = self.sample_frequency
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
for f, fc in zip(self.filters, self.frequencies.center):
w, h = freqz(f[0], f[1], int(fs/2))#np.arange(fs/2.0))
ax1.semilogx(w / (2.0*np.pi) * fs, 20.0 * np.log10(np.abs(h)), label=str(int(fc)))
ax2.semilogx(w / (2.0*np.pi) * fs, np.angle(h), label=str(int(fc)))
ax1.set_xlabel(r'$f$ in Hz')
ax1.set_ylabel(r'$|H|$ in dB re. 1')
ax2.set_xlabel(r'$f$ in Hz')
ax2.set_ylabel(r'$\angle H$ in rad')
ax1.legend(loc=5)
ax2.legend(loc=5)
ax1.set_ylim(-60.0, +10.0)
return fig
def plot_power(self, signal):
"""
Plot power in signal.
"""
f = self.frequencies.center
p = self.power(signal)
fig = plt.figure()
ax = fig.add_subplot(111)
p = ax.bar(f, 20.0*np.log10(p))
ax.set_xlabel('$f$ in Hz')
ax.set_ylabel('$L$ in dB re. 1')
ax.set_xscale('log')
return fig
#class FilterbankFFT(object):
#"""
#Filterbank to filter signal using FFT.
#"""
#def __init__(self, frequencies, sample_frequency=44100):
#self.frequencies = frequencies
#"""
#Frequencies.
#See also :class:`Frequencies` and subclasses.
#"""
#self.sample_frequency = sample_frequency
#def power(self, signal):
#pass
#def plot_power(self, signal):
#pass
def isolate(signals):
"""Isolate signals.
:param signals: Array of shape N x M where N is the amount of samples and M the amount of signals. Thus, each column is a signal.
:returns: Array of isolated signals. Each column is a signal.
Isolate signals using Singular Value Decomposition.
"""
x = np.asarray(signals)
W, s, v = np.linalg.svd( (np.tile( (x*x).sum(axis=0), (len(x), 1) ) * x).dot(x.T) )
return v.T
def zero_crossings(data):
"""
Determine the positions of zero crossings in `data`.
:param data: Vector
:returns: Vector with indices of samples *before* the zero crossing.
"""
pos = data > 0
npos = ~pos
return ((pos[:-1] & npos[1:]) | (npos[:-1] & pos[1:])).nonzero()[0]
def amplitude_envelope(signal, fs):
"""Instantaneous amplitude of tone.
The instantaneous amplitude is the magnitude of the analytic signal.
.. seealso:: :func:`scipy.signal.hilbert`
"""
return np.abs(hilbert(signal))
def instantaneous_phase(signal, fs):
"""Instantaneous phase of tone.
The instantaneous phase is the angle of the analytic signal.
This function returns a wrapped angle.
.. seealso:: :func:`scipy.signal.hilbert`
"""
return np.angle(hilbert(signal))
def instantaneous_frequency(signal, fs):
"""Determine instantaneous frequency of tone.
The instantaneous frequency can be obtained by differentiating the unwrapped instantaneous phase.
.. seealso:: :func:`instantaneous_phase`
"""
return np.diff( np.unwrap(instantaneous_phase(signal, fs))) / (2.0*np.pi) * fs
def wvd(signal, fs, analytic=True):
"""Wigner-Ville Distribution
:param signal: Signal
:param fs: Sample frequency
:param analytic: Use the analytic signal, calculated using Hilbert transform.
.. math:: W_z(n, \\omega) = 2 \\sum_k z^*[n-k]z[n+k] e^{-j\\omega 2kT}
Includes positive and negative frequencies.
"""
signal = np.asarray(signal)
N = int(len(signal)+len(signal)%2)
length_FFT = N # Take an even value of N
if N != len(signal):
signal = np.concatenate(signal, [0])
length_time = len(signal)
if analytic:
signal = hilbert(signal)
s = np.concatenate((np.zeros(length_time), signal, np.zeros(length_time)))
W = np.zeros((length_FFT,length_time))
tau = np.arange(0, N//2)
R = np.zeros((N, length_time), dtype='float64')
i = length_time
for t in range(length_time):
R[t, tau1] = ( s[i+tau] * s[i-tau].conj() ) # In one direction
R[t, N-(tau+1)] = R[t, tau+1].conj() # And the other direction
i += 1
W = np.fft.fft(R, length_FFT) / (2*length_FFT)
f = np.fft.fftfreq(N, 1./fs)
return f, W.T
def _sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None, method='pad', irlen=None):
"""Filtfilt version using Second Order sections. Code is taken from scipy.signal.filtfilt and adapted to make it work with SOS.
Note that broadcasting does not work.
"""
from scipy.signal import sosfilt_zi
from scipy.signal._arraytools import odd_ext, axis_slice, axis_reverse
x = np.asarray(x)
if padlen is None:
edge = 0
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = sosfilt_zi(sos)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
#zi_shape = [1] * x.ndim
#zi_shape[axis] = zi.size
#zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
__all__ = ['bandpass',
'bandpass_frequencies',
'bandpass_fractional_octaves',
'bandpass_octaves',
'bandpass_third_octaves',
'lowpass',
'highpass',
'octavepass',
'octave_filter',
'bandpass_filter',
'convolve',
'ir2fr',
'decibel_to_neper',
'neper_to_decibel',
'EqualBand',
'OctaveBand',
'ms',
'rms',
'normalize',
'window_scaling_factor',
'apply_window',
'amplitude_spectrum',
'auto_spectrum',
'power_spectrum',
'angle_spectrum',
'phase_spectrum',
'density_spectrum',
'integrate_bands',
'octaves',
'third_octaves',
'fractional_octaves',
'Filterbank',
'isolate',
'zero_crossings',
'amplitude_envelope',
'instantaneous_phase',
'instantaneous_frequency',
'wvd',
]
| giumas/python-acoustics | acoustics/signal.py | Python | bsd-3-clause | 40,055 | [
"Gaussian"
] | dcf9dcb82dcbeb747a4e022a02782c3888b38ab6122db68f87b72563ef259b85 |
p00022 = r"""
<H1>Maximum Sum Sequence</H1>
<p>
Given a sequence of numbers <var>a<sub>1</sub></var>, <var>a<sub>2</sub></var>, <var>a<sub>3</sub></var>, ..., <var>a<sub>n</sub></var>, find the maximum sum of a contiguous subsequence of those numbers. Note that, a subsequence of one element is also a <i>contiquous</i> subsequence.
</p>
<H2>Input</H2>
<p>
The input consists of multiple datasets. Each data set consists of:
<pre>
<var>n</var>
<var>a<sub>1</sub></var>
<var>a<sub>2</sub></var>
.
.
<var>a<sub>n</sub></var>
</pre>
<p>
You can assume that 1 ≤ <var>n</var> ≤ 5000 and -100000 ≤ <var>a<sub>i</sub></var> ≤ 100000.
</p>
<p>
The input end with a line consisting of a single 0.
</p>
<H2>Output</H2>
<p>
For each dataset, print the maximum sum in a line.
</p>
<H2>Sample Input</H2>
<pre>
7
-5
-1
6
4
9
-6
-7
13
1
2
3
2
-2
-1
1
2
3
2
1
-2
1
3
1000
-200
201
0
</pre>
<H2>Output for the Sample Input</H2>
<pre>
19
14
1001
</pre>
"""
p00023 = r"""
<script type="text/x-mathjax-config">
MathJax.Hub.Config({ tex2jax: { inlineMath: [["$","$"], ["\\(","\\)"]], processEscapes: true }});
</script>
<script type="text/javascript" src="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-MML-AM_CHTML">
</script>
<H1>Circles Intersection</H1>
<p>
You are given circle $A$ with radius $r_a$ and with central coordinate $(x_a, y_a)$ and circle $B$ with radius $r_b$ and with central coordinate $(x_b, y_b)$.
</p>
<p>
Write a program which prints:
</p>
<ul>
<li>"2" if $B$ is in $A$,</li>
<li>"-2" if $A$ is in $B$, </li>
<li>"1" if circumference of $A$ and $B$ intersect, and</li>
<li>"0" if $A$ and $B$ do not overlap.</li>
</ul>
<p>
You may assume that $A$ and $B$ are not identical.
</p>
<H2>Input</H2>
<p>
The input consists of multiple datasets. The first line consists of an integer $N$ ($N \leq 50$), the number of datasets. There will be $N$ lines where each line represents each dataset. Each data set consists of real numbers:<br/>
<br/>
$x_a$ $y_a$ $r_a$ $x_b$ $y_b$ $r_b$<br/>
</p>
<H2>Output</H2>
<p>
For each dataset, print 2, -2, 1, or 0 in a line.
</p>
<H2>Sample Input</H2>
<pre>
2
0.0 0.0 5.0 0.0 0.0 4.0
0.0 0.0 2.0 4.1 0.0 2.0
</pre>
<H2>Output for the Sample Input</H2>
<pre>
2
0
</pre>
"""
p00130 = r"""
<h1>列車</h1>
<p>
26 両以下の編成の列車があります。それぞれの車両には、英小文字の a から z までの識別記号が付いています。同じ記号が付いている車両はありません。ただし、車両を連結する順番は自由とします。列車の中を車掌が巡回します。車掌は、列車の中を行ったり来たりして巡回するので、同じ車両を何度も通ることがあります。ただし、すべての車両を最低一回は巡回するものとします。また
、巡回をはじめる車両や巡回を終える車両が列車の一番端の車両とは限りません。
</p>
<p>
ある車掌が乗ったすべての列車の巡回記録があります。そこから分かる各列車の編成を先頭車両から出力するプログラムを作成してください。巡回記録は 1 行が 1 つの列車に対応します。各行は、英小文字を 1 文字ずつ <span><-</span> または <span>-></span> で区切った文字列でできています。<span><-</span> は前方の車両への移動、<span>-></span> は後方の車両への移動を表します
。
</p>
<p>
例えば、<span>a->b<-a<-c</span> は車両 a から後方の車両である b に移り、b から前方の a に移り、a から前方の c へ移ったことを表します。この場合の列車の編成は先頭車両から <span>cab</span> となります。
</p>
<H2>Input</H2>
<p>
1行目に巡回記録の個数 <var>n</var> (<var>n</var> ≤ 50)、続く <var>n</var> 行に巡回記録 <var>i</var> を表す文字列 <var>s<sub>i</sub></var> (1024文字までの半角文字列) が与えられます。
</p>
<H2>Output</H2>
<p>
巡回記録 <var>i</var> について、先頭車両からの列車の編成を現す文字列を <var>i</var> 行目に出力してください。
</p>
<H2>Sample Input</H2>
<pre>
4
a->e->c->b->d
b<-c<-a<-d<-e
b->a->c<-a->c->d<-c<-a<-b->a->c->d->e<-d
a->e<-a<-d->a->e<-a<-d<-c->d->a<-d<-c<-b->c->d<-c
</pre>
<H2>Output for the Sample Input</H2>
<pre>
aecbd
edacb
bacde
bcdae
</pre>
"""
p00352 = r"""
<H1>Handsel</H1>
<!-- New Year’s gift money -->
<p>
Alice and Brown are brothers in a family and each receives pocket money in celebration of the coming year. They are very close and share the total amount of the money fifty-fifty. The pocket money each receives is a multiple of 1,000 yen.
</p>
<p>
Write a program to calculate each one’s share given the amount of money Alice and Brown received.
</p>
<h2>Input</h2>
<p>
The input is given in the following format.
</p>
<pre>
<var>a</var> <var>b</var>
</pre>
<p>
A line of data is given that contains two values of money: <var>a</var> (1000 ≤ <var>a</var> ≤ 50000) for Alice and <bar>b</var> (1000 ≤ <var>b</var> ≤ 50000) for Brown.
</p>
<h2>Output</h2>
<p>
Output the amount of money each of Alice and Brown receive in a line.
</p>
<h2>Sample Input 1</h2>
<pre>
1000 3000
</pre>
<h2>Sample Output 1</h2>
<pre>
2000
</pre>
"""
p01950 = r"""
<script type="text/x-mathjax-config">
MathJax.Hub.Config({ tex2jax: { inlineMath: [["$","$"], ["\\(","\\)"]], skipTags: ["script","noscript","style","textarea","code"], processEscapes: true }});
</script>
<script type="text/javascript" async src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/MathJax.js?config=TeX-AMS_HTML"></script>
<H1>
Endless BFS
</H1>
<p>
Mr. Endo wanted to write the code that performs breadth-first search (BFS), which is a search algorithm to explore all vertices on an undirected graph. An example of pseudo code of BFS is as follows:
</p>
<pre>
1: $current \leftarrow \{start\_vertex\}$
2: $visited \leftarrow current$
3: while $visited \ne $ the set of all the vertices
4: $found \leftarrow \{\}$
5: for $v$ in $current$
6: for each $u$ adjacent to $v$
7: $found \leftarrow found \cup\{u\}$
8: $current \leftarrow found \setminus visited$
9: $visited \leftarrow visited \cup found$
</pre>
<p>
However, Mr. Endo apparently forgot to manage visited vertices in his code. More precisely, he wrote the following
code:
</p>
<pre>
1: $current \leftarrow \{start\_vertex\}$
2: while $current \ne $ the set of all the vertices
3: $found \leftarrow \{\}$
4: for $v$ in $current$
5: for each $u$ adjacent to $v$
6: $found \leftarrow found \cup \{u\}$
7: $current \leftarrow found$
</pre>
<p>
You may notice that for some graphs, Mr. Endo's program will not stop because it keeps running infinitely. Notice that it does not necessarily mean the program cannot explore all the vertices within finite steps. See example 2 below for more details.Your task here is to make a program that determines whether Mr. Endo's program will stop within finite ste
ps for a given graph in order to point out the bug to him. Also, calculate the minimum number of loop iterations required for the program to stop if it is finite.
</p>
<H2>Input</H2>
<p>
The input consists of a single test case formatted as follows.
</p>
<pre>
$N$ $M$
$U_1$ $V_1$
...
$U_M$ $V_M$
</pre>
<p>
The first line consists of two integers $N$ ($2 \leq N \leq 100,000$) and $M$ ($1 \leq M \leq 100,000$), where $N$ is the number of vertices and $M$ is the number of edges in a given undirected graph, respectively. The $i$-th line of the following $M$ lines consists of two integers $U_i$ and $V_i$ ($1 \leq U_i, V_i \leq N$), which means the vertices $U_i$ and $V_i$ are adjacent in the given graph. The vertex 1 is the start vertex, i.e. $start\_vertex$ in the pseudo codes. You can assume that the given graph also meets the following conditions.
</p>
<ul>
<li>The graph has no self-loop, i.e., $U_i \ne V_i$ for all $1 \leq i \leq M$.</li>
<li>The graph has no multi-edge, i.e., $\{Ui,Vi\} \ne \{U_j,V_j\}$ for all $1 \leq i < j \leq M$.</li>
<li>The graph is connected, i.e., there is at least one path from $U$ to $V$ (and vice versa) for all vertices $1 \leq U, V \leq N$</li>
</ul>
<H2>Output</H2>
<p>
If Mr. Endo's wrong BFS code cannot stop within finite steps for the given input graph, print -1 in a line. Otherwise, print the minimum number of loop iterations required to stop.
</p>
<H2>Sample Input 1</H2>
<pre>
3 3
1 2
1 3
2 3
</pre>
<H2>Output for Sample Input 1</H2>
<pre>
2
</pre>
<H2>Sample Input 2</H2>
<pre>
4 3
1 2
2 3
3 4
</pre>
<H2>Output for Sample Input 2</H2>
<pre>
-1
</pre>
<p>
Transition of $current$ is $\{1\} \rightarrow \{2\} \rightarrow \{1,3\} \rightarrow \{2,4\} \rightarrow \{1,3\} \rightarrow \{2,4\} \rightarrow ... $. Although Mr. Endo's program will achieve to visit all the vertices (in 3 steps), will never become the same set as all the vertices.
</p>
<H2>Sample Input 3</H2>
<pre>
4 4
1 2
2 3
3 4
4 1
</pre>
<H2>Output for Sample Input 3</H2>
<pre>
-1
</pre>
<H2>Sample Input 4</H2>
<pre>
8 9
2 1
3 5
1 6
2 5
3 1
8 4
2 7
7 1
7 4
</pre>
<H2>Output for Sample Input 4</H2>
<pre>
3
</pre>
"""
p00729_abbr = r"""<h1><font color="#000">Problem B:</font> Analyzing Login/Logout Records</h1>
This shouldn't be included.
<h2>Input</h2>
Example
<p>
<nl>
<li>Unclosed item
</nl>
</p>
<h2>Output</h2>
This shouldn't be included.
"""
p03050 = r"""
<span class="lang-en">
<p>Score : <var>500</var> points</p>
<div class="part">
<section>
<h3>Problem Statement</h3><p>Snuke received a positive integer <var>N</var> from Takahashi.
A positive integer <var>m</var> is called a <em>favorite number</em> when the following condition is satisfied:</p>
<ul>
<li>The quotient and remainder of <var>N</var> divided by <var>m</var> are equal, that is, <var>\lfloor \frac{N}{m} \rfloor = N \bmod m</var> holds.</li>
</ul>
<p>Find all favorite numbers and print the sum of those.</p>
</section>
</div>
<div class="part">
<section>
<h3>Constraints</h3><ul>
<li>All values in input are integers.</li>
<li><var>1 \leq N \leq 10^{12}</var></li>
</ul>
</section>
</div>
<hr/>
<div class="io-style">
<div class="part">
<section>
<h3>Input</h3><p>Input is given from Standard Input in the following format:</p>
<pre><var>N</var>
</pre>
</section>
</div>
<div class="part">
<section>
<h3>Output</h3><p>Print the answer.</p>
</section>
</div>
</div>
<hr/>
<div class="part">
<section>
<h3>Sample Input 1</h3><pre>8
</pre>
</section>
</div>
<div class="part">
<section>
<h3>Sample Output 1</h3><pre>10
</pre>
<p>There are two favorite numbers: <var>3</var> and <var>7</var>. Print the sum of these, <var>10</var>.</p>
</section>
</div>
<hr/>
<div class="part">
<section>
<h3>Sample Input 2</h3><pre>1000000000000
</pre>
</section>
</div>
<div class="part">
<section>
<h3>Sample Output 2</h3><pre>2499686339916
</pre>
<p>Watch out for overflow.</p></section>
</div>
</span>
"""
p04019 = """
<span class="lang-en">
<p>Score : <var>200</var> points</p>
<div class="part">
<section>
<h3>Problem Statement</h3><p>Snuke lives on an infinite two-dimensional plane. He is going on an <var>N</var>-day trip.
At the beginning of Day <var>1</var>, he is at home. His plan is described in a string <var>S</var> of length <var>N</var>.
On Day <var>i(1 ≦ i ≦ N)</var>, he will travel a positive distance in the following direction:</p>
<ul>
<li>North if the <var>i</var>-th letter of <var>S</var> is <code>N</code></li>
<li>West if the <var>i</var>-th letter of <var>S</var> is <code>W</code></li>
<li>South if the <var>i</var>-th letter of <var>S</var> is <code>S</code></li>
<li>East if the <var>i</var>-th letter of <var>S</var> is <code>E</code></li>
</ul>
<p>He has not decided each day's travel distance. Determine whether it is possible to set each day's travel distance so that he will be back at home at the end of Day <var>N</var>.</p>
</section>
</div>
<div class="part">
<section>
<h3>Constraints</h3><ul>
<li><var>1 ≦ | S | ≦ 1000</var></li>
<li><var>S</var> consists of the letters <code>N</code>, <code>W</code>, <code>S</code>, <code>E</code>.</li>
</ul>
</section>
</div>
<hr/>
<div class="io-style">
<div class="part">
<section>
<h3>Input</h3><p>The input is given from Standard Input in the following format:</p>
<pre><var>S</var>
</pre>
</section>
</div>
<div class="part">
<section>
<h3>Output</h3><p>Print <code>Yes</code> if it is possible to set each day's travel distance so that he will be back at home at the end of Day <var>N</var>. Otherwise, print <code>No</code>.</p>
</section>
</div>
</div>
<hr/>
<div class="part">
<section>
<h3>Sample Input 1</h3><pre>SENW
</pre>
</section>
</div>
<div class="part">
<section>
<h3>Sample Output 1</h3><pre>Yes
</pre>
<p>If Snuke travels a distance of <var>1</var> on each day, he will be back at home at the end of day <var>4</var>.</p>
</section>
</div>
<hr/>
<div class="part">
<section>
<h3>Sample Input 2</h3><pre>NSNNSNSN
</pre>
</section>
</div>
<div class="part">
<section>
<h3>Sample Output 2</h3><pre>Yes
</pre>
</section>
</div>
<hr/>
<div class="part">
<section>
<h3>Sample Input 3</h3><pre>NNEW
</pre>
</section>
</div>
<div class="part">
<section>
<h3>Sample Output 3</h3><pre>No
</pre>
</section>
</div>
<hr/>
<div class="part">
<section>
<h3>Sample Input 4</h3><pre>W
</pre>
</section>
</div>
<div class="part">
<section>
<h3>Sample Output 4</h3><pre>No
</pre></section>
</div>
</span>
"""
p00569 = r"""
<h1>L番目のK番目の数 (LthKthNumber)</h1>
<h2>問題文</h2>
<p>
横一列に並べられた <var>N</var> 枚のカードがある.左から <var>i</var> 枚目(<var>1 ≦ i ≦ N</var>)のカードには,整数 <var>a_i</var> が書かれている.</p>
<p>
JOI 君は,これらのカードを用いて次のようなゲームを行う.連続する <var>K</var> 枚以上のカードの列を選び,次の操作を行う.</p>
<ul>
<li>選んだカードを,書かれている整数が小さい順に左から並べる.</li>
<li>並べたカードのうち,左から <var>K</var> 番目のカードに書かれた整数を紙に書き出す.</li>
<li>選んだカードを,すべて元の位置に戻す.</li>
</ul>
<p>
この操作を,連続する <var>K</var> 枚以上のカードの列すべてに対して行う.すなわち,<var>1 ≦ l ≦ r ≦ N</var> かつ <var>K ≦ r - l + 1</var> を満たすすべての <var>(l,r)</var> について,<var>a_l, a_{l+1}, ..., a_r</var> のうち <var>K</var> 番目に小さな整数を書き出す.</p>
<p>
こうして書き出された整数を,左から小さい順に並べる.並べた整数のうち,左から <var>L</var> 番目のものがこのゲームにおける JOI 君の得点である.JOI 君の得点を求めよ.</p>
<h2>制約</h2>
<ul>
<li><var>1 \leq N \leq 200000</var></li>
<li><var>1 \leq K \leq N</var></li>
<li><var>1 \leq a_i \leq N</var></li>
<li><var>1 \leq L</var></li>
<li>JOI 君が書き出す整数は <var>L</var> 個以上である.</li>
</ul>
<h2>入力・出力</h2>
<p>
<b>入力</b><br>
入力は以下の形式で標準入力から与えられる.<br>
<var>N</var> <var>K</var> <var>L</var><br>
<var>a_1</var> <var>a_2</var> <var>...</var> <var>a_N</var>
</p>
<p>
<b>出力</b><br>
JOI 君の得点を <var>1</var> 行で出力せよ.<br>
<!--
<h2>小課題</h2>
<p>
<b>小課題 1 [6点]</b>
</p>
<ul>
<li><var>N ≦ 100</var></li>
</ul>
<p>
<b>小課題 2 [33点]</b>
</p>
<ul>
<li><var>N ≦ 4000</var></li>
</ul>
<p>
<b>小課題 3 [61点]</b>
</p>
<ul>
<li>追加の制限はない.</li>
</ul>
-->
<h2>入出力例</h2>
<b>入力例 1</b><br>
<pre>
4 3 2
4 3 1 2
</pre>
<b>出力例 1</b><br>
<pre>
3
</pre>
<p>
<var>1 \leq l \leq r \leq N (= 4)</var> かつ <var>K (= 3) \leq r - l + 1</var> を満たす <var>(l,r)</var> は,<var>(1,3), (1,4), (2,4)</var> の <var>3</var> 通りある.</p>
<p>
これらの <var>(l,r)</var> に対し,<var>a_l, a_{l+1}, ..., a_r</var> で <var>3</var> 番目に小さな整数は,それぞれ <var>4, 3, 3</var> である.</p>
<p>
このうち <var>L (= 2)</var> 番目に小さい整数は <var>3</var> なので,JOI 君の得点は <var>3</var> である.同じ整数が複数あるときも,重複して数えることに注意せよ.</p>
<hr>
<b>入力例 2</b><br>
<pre>
5 3 3
1 5 2 2 4
</pre>
<b>出力例 2</b><br>
<pre>
4
</pre>
<p>
JOI 君が書き出す整数は,</p>
<ul>
<li><var>(l,r) = (1,3)</var> に対し <var>5</var></li>
<li><var>(l,r) = (1,4)</var> に対し <var>2</var></li>
<li><var>(l,r) = (1,5)</var> に対し <var>2</var></li>
<li><var>(l,r) = (2,4)</var> に対し <var>5</var></li>
<li><var>(l,r) = (2,5)</var> に対し <var>4</var></li>
<li><var>(l,r) = (3,5)</var> に対し <var>4</var></li>
</ul>
<p>
である.このうち <var>L (= 3)</var> 番目に小さい整数は <var>4</var> である.
</p>
<hr>
<b>入力例 3</b><br>
<pre>
6 2 9
1 5 3 4 2 4
</pre>
<b>出力例 3</b><br>
<pre>
4
</pre>
<hr>
<b>入力例 4</b><br>
<pre>
6 2 8
1 5 3 4 2 4
</pre>
<b>出力例 4</b><br>
<pre>
3
</pre>
"""
| google-research/runtime-error-prediction | core/data/example_problem_descriptions.py | Python | apache-2.0 | 17,198 | [
"VisIt"
] | 0d0f384a56504399892821bc2f2ad579e252c99abe6fb7ca74154ff382124354 |
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import numpy as np
import espressomd
import espressomd.reaction_ensemble
class ReactionEnsembleTest(ut.TestCase):
"""Test the core implementation of the constant pH reaction ensemble."""
N0 = 40
c0 = 0.00028
type_HA = 0
type_A = 1
type_H = 5
temperature = 1.0
# choose target alpha not too far from 0.5 to get good statistics in a
# small number of steps
pKa_minus_pH = -0.2
pH = 2
pKa = pKa_minus_pH + pH
Ka = 10**(-pKa)
box_l = (N0 / c0)**(1.0 / 3.0)
system = espressomd.System(box_l=[box_l, box_l, box_l])
np.random.seed(69) # make reaction code fully deterministic
system.cell_system.skin = 0.4
system.time_step = 0.01
RE = espressomd.reaction_ensemble.ConstantpHEnsemble(
temperature=1.0, exclusion_radius=1, seed=44)
@classmethod
def setUpClass(cls):
cls.system.part.add(
pos=np.random.random((2 * cls.N0, 3)) * cls.system.box_l,
type=cls.N0 * [cls.type_A, cls.type_H])
cls.RE.add_reaction(
gamma=cls.Ka,
reactant_types=[cls.type_HA],
reactant_coefficients=[1],
product_types=[cls.type_A, cls.type_H],
product_coefficients=[1, 1],
default_charges={cls.type_HA: 0, cls.type_A: -1, cls.type_H: +1})
cls.RE.constant_pH = cls.pH
@classmethod
def ideal_alpha(cls, pH):
return 1.0 / (1 + 10**(cls.pKa - pH))
def test_ideal_titration_curve(self):
N0 = ReactionEnsembleTest.N0
type_A = ReactionEnsembleTest.type_A
type_H = ReactionEnsembleTest.type_H
type_HA = ReactionEnsembleTest.type_HA
system = ReactionEnsembleTest.system
RE = ReactionEnsembleTest.RE
# chemical warmup - get close to chemical equilibrium before we start
# sampling
RE.reaction(40 * N0)
average_NH = 0.0
average_NHA = 0.0
average_NA = 0.0
num_samples = 1000
for _ in range(num_samples):
RE.reaction(10)
average_NH += system.number_of_particles(type=type_H)
average_NHA += system.number_of_particles(type=type_HA)
average_NA += system.number_of_particles(type=type_A)
average_NH /= float(num_samples)
average_NA /= float(num_samples)
average_NHA /= float(num_samples)
average_alpha = average_NA / float(N0)
# note you cannot calculate the pH via -log10(<NH>/volume) in the
# constant pH ensemble, since the volume is totally arbitrary and does
# not influence the average number of protons
pH = ReactionEnsembleTest.pH
pKa = ReactionEnsembleTest.pKa
target_alpha = ReactionEnsembleTest.ideal_alpha(pH)
rel_error_alpha = abs(average_alpha - target_alpha) / target_alpha
# relative error
self.assertLess(
rel_error_alpha,
0.015,
msg="\nDeviation from ideal titration curve is too big for the given input parameters.\n"
+ f" pH: {pH:.2f}"
+ f" pKa: {pKa:.2f}"
+ f" average NH: {average_NH:.1f}"
+ f" average NA: {average_NA:.1f}"
+ f" average NHA: {average_NHA:.1f}"
+ f" average alpha: {average_alpha:.3f}"
+ f" target alpha: {target_alpha:.3f}"
+ f" rel_error: {rel_error_alpha * 100:.1f}%"
)
if __name__ == "__main__":
ut.main()
| fweik/espresso | testsuite/python/constant_pH_stats.py | Python | gpl-3.0 | 4,194 | [
"ESPResSo"
] | d62228cb5b3bea2ea38d080f5ecd3929a9d670de99536ccdd4d5b92313ab41e1 |
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns, assert_array_equal, assert_array_almost_equal)
from numpy import random
from numpy.compat import asbytes
import sys
import warnings
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
actual = np.random.random_integers(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_(np.random.choice(6, s, replace=True).shape, s)
assert_(np.random.choice(6, s, replace=False).shape, s)
assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = asbytes('\x82Ui\x9e\xff\x97+Wf\xa5')
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
# Hmm... not even symmetric.
cov = [[1, 0], [1, 0]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.47027513018564449, 10.],
[-1.65915081534845532, 10.]],
[[-2.29186329304599745, 10.],
[-1.77505606019580053, 10.]],
[[-0.54970369430044119, 10.],
[0.29768848031692957, 10.]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([-0.79441224511977482, 10.])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance raises warning
mean = [0, 0]
cov = [[1, 1 + 1e-10], [1 + 1e-10, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
# (fmax / 1e17) - fmin is within range, so this should not throw
np.random.uniform(low=fmin, high=fmax / 1e17)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(TestCase):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setUp(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(TestCase):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(TestCase):
def setUp(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
self.assertEqual(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
self.assertEqual(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
self.assertEqual(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
| chiffa/numpy | numpy/random/tests/test_random.py | Python | bsd-3-clause | 59,785 | [
"Gaussian"
] | b2af98a4986b64ae1160f55d27a19ee045054143dad8019692aaee30b7297cb4 |
import unittest, random, sys, time, re, math
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_glm
import h2o_util, h2o_browse as h2b, h2o_gbm
# use randChars for the random chars to use
def random_enum(randChars, maxEnumSize):
choiceStr = randChars
r = ''.join(random.choice(choiceStr) for x in range(maxEnumSize))
return r
ONE_RATIO = 100
ENUM_RANGE = 20
MAX_ENUM_SIZE = 4
GAUSS_ENUMS = True
def create_enum_list(randChars="012345679", maxEnumSize=MAX_ENUM_SIZE, listSize=ENUM_RANGE):
# okay to have duplicates?
enumList = [random_enum(randChars, random.randint(2,maxEnumSize)) for i in range(listSize)]
# enumList = [random_enum(randChars, maxEnumSize) for i in range(listSize)]
return enumList
def write_syn_dataset(csvPathname, enumList, rowCount, colCount=1, SEED='12345678',
colSepChar=",", rowSepChar="\n"):
enumRange = len(enumList)
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for row in range(rowCount):
rowData = []
for col in range(colCount):
if GAUSS_ENUMS:
# truncated gaussian distribution, from the enumList
value = None
while not value:
value = int(random.gauss(enumRange/2, enumRange/4))
if value < 0 or value >= enumRange:
value = None
rowData.append(enumList[value])
else:
value = random.choice(enumList)
rowData.append(value)
# output column
# ri = r1.randint(0,1)
# skew the binomial 0,1 distribution. (by rounding to 0 or 1
# ri = round(r1.triangular(0,1,0.3), 0)
# just put a 1 in every 100th row
if (row % ONE_RATIO)==0:
ri = 1
else:
ri = 0
rowData.append(ri)
rowDataCsv = colSepChar.join(map(str,rowData)) + rowSepChar
dsf.write(rowDataCsv)
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1,java_heap_GB=1)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_ints_unbalanced(self):
h2o.beta_features = True
### h2b.browseTheCloud()
SYNDATASETS_DIR = h2o.make_syn_dir()
n = 2000
tryList = [
(n, 1, 'cD', 300),
(n, 2, 'cE', 300),
(n, 4, 'cF', 300),
(n, 8, 'cG', 300),
(n, 16, 'cH', 300),
(n, 32, 'cI', 300),
]
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
# using the comma is nice to ensure no craziness
colSepHexString = '2c' # comma
colSepChar = colSepHexString.decode('hex')
colSepInt = int(colSepHexString, base=16)
print "colSepChar:", colSepChar
rowSepHexString = '0a' # newline
rowSepChar = rowSepHexString.decode('hex')
print "rowSepChar:", rowSepChar
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = 'syn_enums_' + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
csvScoreFilename = 'syn_enums_score_' + str(rowCount) + 'x' + str(colCount) + '.csv'
csvScorePathname = SYNDATASETS_DIR + '/' + csvScoreFilename
enumList = create_enum_list()
# use half of the enums for creating the scoring dataset
enumListForScore = random.sample(enumList,5)
print "Creating random", csvPathname, "for glm model building"
write_syn_dataset(csvPathname, enumList, rowCount, colCount, SEEDPERFILE,
colSepChar=colSepChar, rowSepChar=rowSepChar)
print "Creating random", csvScorePathname, "for glm scoring with prior model (using enum subset)"
write_syn_dataset(csvScorePathname, enumListForScore, rowCount, colCount, SEEDPERFILE,
colSepChar=colSepChar, rowSepChar=rowSepChar)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=hex_key,
timeoutSecs=30, separator=colSepInt)
print "Parse result['destination_key']:", parseResult['destination_key']
print "\n" + csvFilename
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(parseResult['destination_key'], exceptionOnMissingValues=True)
y = colCount
modelKey = 'xyz'
kwargs = {
'n_folds': 0,
'destination_key': modelKey,
'response': y,
'max_iter': 200,
'family': 'binomial',
'alpha': 0,
'lambda': 0,
}
start = time.time()
updateList= [
{'alpha': 0.5, 'lambda': 1e-5},
# {'alpha': 0.25, 'lambda': 1e-4},
]
# Try each one
for updateDict in updateList:
print "\n#################################################################"
print updateDict
kwargs.update(updateDict)
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, pollTimeoutSecs=180, **kwargs)
print "glm end on ", parseResult['destination_key'], 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
parseResult = h2i.import_parse(path=csvScorePathname, schema='put', hex_key="B.hex",
timeoutSecs=30, separator=colSepInt)
h2o_cmd.runScore(dataKey="B.hex", modelKey=modelKey,
vactual='C' + str(y+1), vpredict=1, expectedAuc=0.6)
if __name__ == '__main__':
h2o.unit_main()
| woobe/h2o | py/testdir_single_jvm/test_GLM2_ints_unbalanced.py | Python | apache-2.0 | 6,252 | [
"Gaussian"
] | ffa735500c841b7284f66fd34374b6921f07c218aa9f885ddbb308a78de8b625 |
""" DIRAC Basic MySQL Class
It provides access to the basic MySQL methods in a multithread-safe mode
keeping used connections in a python Queue for further reuse.
These are the coded methods:
__init__( host, user, passwd, name, [maxConnsInQueue=10] )
Initializes the Queue and tries to connect to the DB server,
using the _connect method.
"maxConnsInQueue" defines the size of the Queue of open connections
that are kept for reuse. It also defined the maximum number of open
connections available from the object.
maxConnsInQueue = 0 means unlimited and it is not supported.
_except( methodName, exception, errorMessage )
Helper method for exceptions: the "methodName" and the "errorMessage"
are printed with ERROR level, then the "exception" is printed (with
full description if it is a MySQL Exception) and S_ERROR is returned
with the errorMessage and the exception.
_connect()
Attempts connection to DB and sets the _connected flag to True upon success.
Returns S_OK or S_ERROR.
_query( cmd, [conn] )
Executes SQL command "cmd".
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue.
Returns S_OK with fetchall() out in Value or S_ERROR upon failure.
_update( cmd, [conn] )
Executes SQL command "cmd" and issue a commit
Gets a connection from the Queue (or open a new one if none is available),
the used connection is back into the Queue.
If a connection to the the DB is passed as second argument this connection
is used and is not in the Queue
Returns S_OK with number of updated registers in Value or S_ERROR upon failure.
_createTables( tableDict )
Create a new Table in the DB
_getConnection()
Gets a connection from the Queue (or open a new one if none is available)
Returns S_OK with connection in Value or S_ERROR
the calling method is responsible for closing this connection once it is no
longer needed.
Some high level methods have been added to avoid the need to write SQL
statement in most common cases. They should be used instead of low level
_insert, _update methods when ever possible.
buildCondition( self, condDict = None, older = None, newer = None,
timeStamp = None, orderAttribute = None, limit = False,
greater = None, smaller = None ):
Build SQL condition statement from provided condDict and other extra check on
a specified time stamp.
The conditions dictionary specifies for each attribute one or a List of possible
values
greater and smaller are dictionaries in which the keys are the names of the fields,
that are requested to be >= or < than the corresponding value.
For compatibility with current usage it uses Exceptions to exit in case of
invalid arguments
insertFields( self, tableName, inFields = None, inValues = None, conn = None, inDict = None ):
Insert a new row in "tableName" assigning the values "inValues" to the
fields "inFields".
Alternatively inDict can be used
String type values will be appropriately escaped.
updateFields( self, tableName, updateFields = None, updateValues = None,
condDict = None,
limit = False, conn = None,
updateDict = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Update "updateFields" from "tableName" with "updateValues".
updateDict alternative way to provide the updateFields and updateValues
N records can match the condition
return S_OK( number of updated rows )
if limit is not False, the given limit is set
String type values will be appropriately escaped.
deleteEntries( self, tableName,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Delete rows from "tableName" with
N records can match the condition
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
getFields( self, tableName, outFields = None,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
Select "outFields" from "tableName" with condDict
N records can match the condition
return S_OK( tuple(Field,Value) )
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
for compatibility with other methods condDict keyed argument is added
getCounters( self, table, attrList, condDict = None, older = None,
newer = None, timeStamp = None, connection = False ):
Count the number of records on each distinct combination of AttrList, selected
with condition defined by condDict and time stamps
getDistinctAttributeValues( self, table, attribute, condDict = None, older = None,
newer = None, timeStamp = None, connection = False ):
Get distinct values of a table attribute under specified conditions
"""
import collections
import time
import threading
import MySQLdb
from DIRAC import gLogger
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.Time import fromString
from DIRAC.Core.Utilities import DErrno
# This is for proper initialization of embedded server, it should only be called once
try:
MySQLdb.server_init( ['--defaults-file=/opt/dirac/etc/my.cnf', '--datadir=/opt/mysql/db'], ['mysqld'] )
except MySQLdb.ProgrammingError:
pass
gInstancesCount = 0
gDebugFile = None
__RCSID__ = "$Id$"
MAXCONNECTRETRY = 10
def _checkFields( inFields, inValues ):
"""
Helper to check match between inFields and inValues lengths
"""
if inFields is None and inValues is None:
return S_OK()
try:
assert len( inFields ) == len( inValues )
except AssertionError:
return S_ERROR( DErrno.EMYSQL, 'Mismatch between inFields and inValues.' )
return S_OK()
def _quotedList( fieldList = None ):
"""
Quote a list of MySQL Field Names with "`"
Return a comma separated list of quoted Field Names
To be use for Table and Field Names
"""
if fieldList is None:
return None
quotedFields = []
try:
for field in fieldList:
quotedFields.append( '`%s`' % field.replace( '`', '' ) )
except Exception:
return None
if not quotedFields:
return None
return ', '.join( quotedFields )
class MySQL( object ):
"""
Basic multithreaded DIRAC MySQL Client Class
"""
__initialized = False
class ConnectionPool( object ):
"""
Management of connections per thread
"""
def __init__( self, host, user, passwd, port = 3306, graceTime = 600 ):
self.__host = host
self.__user = user
self.__passwd = passwd
self.__port = port
self.__graceTime = graceTime
self.__spares = collections.deque()
self.__maxSpares = 10
self.__lastClean = 0
self.__assigned = {}
@property
def __thid( self ):
return threading.current_thread()
def __newConn( self ):
conn = MySQLdb.connect( host = self.__host,
port = self.__port,
user = self.__user,
passwd = self.__passwd )
self.__execute( conn, "SET AUTOCOMMIT=1" )
return conn
def __execute( self, conn, cmd ):
cursor = conn.cursor()
res = cursor.execute( cmd )
conn.commit()
cursor.close()
return res
def get( self, dbName, retries = 10 ):
retries = max( 0, min( MAXCONNECTRETRY, retries ) )
self.clean()
return self.__getWithRetry( dbName, retries, retries )
def __getWithRetry( self, dbName, totalRetries, retriesLeft ):
sleepTime = 5 * ( totalRetries - retriesLeft )
if sleepTime > 0:
time.sleep( sleepTime )
try:
conn, lastName, thid = self.__innerGet()
except MySQLdb.MySQLError as excp:
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( DErrno.EMYSQL, "Could not connect: %s" % excp )
if not self.__ping( conn ):
try:
self.__assigned.pop( thid )
except KeyError:
pass
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft )
return S_ERROR( DErrno.EMYSQL, "Could not connect" )
if lastName != dbName:
try:
conn.select_db( dbName )
except MySQLdb.MySQLError as excp:
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( DErrno.EMYSQL, "Could not select db %s: %s" % ( dbName, excp ) )
try:
self.__assigned[ thid ][1] = dbName
except KeyError:
if retriesLeft >= 0:
return self.__getWithRetry( dbName, totalRetries, retriesLeft - 1 )
return S_ERROR( DErrno.EMYSQL, "Could not connect" )
return S_OK( conn )
def __ping( self, conn ):
try:
conn.ping( True )
return True
except BaseException:
return False
def __innerGet( self ):
thid = self.__thid
now = time.time()
if thid in self.__assigned:
data = self.__assigned[ thid ]
conn = data[0]
data[2] = now
return data[0], data[1], thid
# Not cached
try:
conn, dbName = self.__spares.pop()
except IndexError:
conn = self.__newConn()
dbName = ""
self.__assigned[ thid ] = [ conn, dbName, now ]
return conn, dbName, thid
def __pop( self, thid ):
try:
data = self.__assigned.pop( thid )
if len( self.__spares ) < self.__maxSpares:
self.__spares.append( ( data[0], data[1] ) )
else:
try:
data[0].close()
except MySQLdb.ProgrammingError as exc:
gLogger.warn("ProgrammingError exception while closing MySQL connection: %s" % exc)
except BaseException as exc:
gLogger.warn("Exception while closing MySQL connection: %s" % exc)
except KeyError:
pass
def clean( self, now = False ):
if not now:
now = time.time()
self.__lastClean = now
for thid in list( self.__assigned ):
if not thid.isAlive():
self.__pop( thid )
try:
data = self.__assigned[ thid ]
except KeyError:
continue
if now - data[2] > self.__graceTime:
self.__pop( thid )
def transactionStart( self, dbName ):
result = self.get( dbName )
if not result[ 'OK' ]:
return result
conn = result[ 'Value' ]
try:
return S_OK( self.__execute( conn, "START TRANSACTION WITH CONSISTENT SNAPSHOT" ) )
except MySQLdb.MySQLError as excp:
return S_ERROR( DErrno.EMYSQL, "Could not begin transaction: %s" % excp )
def transactionCommit( self, dbName ):
result = self.get( dbName )
if not result[ 'OK' ]:
return result
conn = result[ 'Value' ]
try:
result = self.__execute( conn, "COMMIT" )
return S_OK( result )
except MySQLdb.MySQLError as excp:
return S_ERROR( DErrno.EMYSQL, "Could not commit transaction: %s" % excp )
def transactionRollback( self, dbName ):
result = self.get( dbName )
if not result[ 'OK' ]:
return result
conn = result[ 'Value' ]
try:
result = self.__execute( conn, "ROLLBACK" )
return S_OK( result )
except MySQLdb.MySQLError as excp:
return S_ERROR( DErrno.EMYSQL, "Could not rollback transaction: %s" % excp )
__connectionPools = {}
def __init__( self, hostName = 'localhost', userName = 'dirac', passwd = 'dirac', dbName = '', port = 3306, debug = False ):
"""
set MySQL connection parameters and try to connect
"""
global gInstancesCount, gDebugFile
gInstancesCount += 1
self._connected = False
if 'log' not in dir( self ):
self.log = gLogger.getSubLogger( 'MySQL' )
self.logger = self.log
# let the derived class decide what to do with if is not 1
self._threadsafe = MySQLdb.thread_safe()
self.log.debug( 'thread_safe = %s' % self._threadsafe )
self.__hostName = str( hostName )
self.__userName = str( userName )
self.__passwd = str( passwd )
self.__dbName = str( dbName )
self.__port = port
cKey = ( self.__hostName, self.__userName, self.__passwd, self.__port )
if cKey not in MySQL.__connectionPools:
MySQL.__connectionPools[ cKey ] = MySQL.ConnectionPool( *cKey )
self.__connectionPool = MySQL.__connectionPools[ cKey ]
self.__initialized = True
result = self._connect()
if not result[ 'OK' ]:
gLogger.error( "Cannot connect to to DB", " %s" % result[ 'Message' ] )
if debug:
try:
gDebugFile = open( "%s.debug.log" % self.__dbName, "w" )
except IOError:
pass
def __del__( self ):
global gInstancesCount
try:
gInstancesCount -= 1
except Exception:
pass
def _except( self, methodName, x, err ):
"""
print MySQL error or exception
return S_ERROR with Exception
"""
try:
raise x
except MySQLdb.Error as e:
self.log.debug( '%s: %s' % ( methodName, err ),
'%d: %s' % ( e.args[0], e.args[1] ) )
return S_ERROR( DErrno.EMYSQL, '%s: ( %d: %s )' % ( err, e.args[0], e.args[1] ) )
except BaseException as e:
self.log.debug('%s: %s' % (methodName, err), repr(e))
return S_ERROR(DErrno.EMYSQL, '%s: (%s)' % (err, repr(e)))
def __isDateTime( self, dateString ):
if dateString == 'UTC_TIMESTAMP()':
return True
try:
dtime = dateString.replace( '"', '').replace( "'", "" )
dtime = fromString( dtime )
if dtime is None:
return False
return True
except BaseException:
return False
def __escapeString( self, myString ):
"""
To be used for escaping any MySQL string before passing it to the DB
this should prevent passing non-MySQL accepted characters to the DB
It also includes quotation marks " around the given string
"""
retDict = self._getConnection()
if not retDict['OK']:
return retDict
connection = retDict['Value']
try:
myString = str( myString )
except ValueError:
return S_ERROR( DErrno.EMYSQL, "Cannot escape value!" )
timeUnits = [ 'MICROSECOND', 'SECOND', 'MINUTE', 'HOUR', 'DAY', 'WEEK', 'MONTH', 'QUARTER', 'YEAR' ]
try:
# Check datetime functions first
if myString.strip() == 'UTC_TIMESTAMP()':
return S_OK( myString )
for func in [ 'TIMESTAMPDIFF', 'TIMESTAMPADD' ]:
if myString.strip().startswith( '%s(' % func ) and myString.strip().endswith( ')' ):
args = myString.strip()[:-1].replace( '%s(' % func, '' ).strip().split(',')
arg1, arg2, arg3 = [ x.strip() for x in args ]
if arg1 in timeUnits:
if self.__isDateTime( arg2 ) or arg2.isalnum():
if self.__isDateTime( arg3 ) or arg3.isalnum():
return S_OK( myString )
self.log.debug( '__escape_string: Could not escape string', '"%s"' % myString )
return S_ERROR( DErrno.EMYSQL, '__escape_string: Could not escape string' )
escape_string = connection.escape_string( str( myString ) )
self.log.debug( '__escape_string: returns', '"%s"' % escape_string )
return S_OK( '"%s"' % escape_string )
except BaseException as x:
self.log.debug( '__escape_string: Could not escape string', '"%s"' % myString )
return self._except( '__escape_string', x, 'Could not escape string' )
def __checkTable( self, tableName, force = False ):
table = _quotedList( [tableName] )
if not table:
return S_ERROR( DErrno.EMYSQL, 'Invalid tableName argument' )
cmd = 'SHOW TABLES'
retDict = self._query( cmd, debug = True )
if not retDict['OK']:
return retDict
if ( tableName, ) in retDict['Value']:
if not force:
# the requested exist and table creation is not force, return with error
return S_ERROR( DErrno.EMYSQL, 'The requested table already exist' )
else:
cmd = 'DROP TABLE %s' % table
retDict = self._update( cmd, debug = True )
if not retDict['OK']:
return retDict
return S_OK()
def _escapeString( self, myString, conn = None ):
"""
Wrapper around the internal method __escapeString
"""
self.log.debug( '_escapeString:', '"%s"' % str( myString ) )
return self.__escapeString( myString )
def _escapeValues( self, inValues = None ):
"""
Escapes all strings in the list of values provided
"""
self.log.debug( '_escapeValues:', inValues )
inEscapeValues = []
if not inValues:
return S_OK( inEscapeValues )
for value in inValues:
if isinstance( value, basestring ):
retDict = self.__escapeString( value )
if not retDict['OK']:
return retDict
inEscapeValues.append( retDict['Value'] )
elif isinstance( value, ( tuple, list )):
tupleValues = []
for v in list( value ):
retDict = self.__escapeString( v )
if not retDict['OK']:
return retDict
tupleValues.append( retDict['Value'] )
inEscapeValues.append( '(' + ', '.join( tupleValues ) + ')' )
elif isinstance( value, bool ):
inEscapeValues = [str( value )]
else:
retDict = self.__escapeString( str( value ) )
if not retDict['OK']:
return retDict
inEscapeValues.append( retDict['Value'] )
return S_OK( inEscapeValues )
def _safeCmd( self, command ):
""" Just replaces password, if visible, with *********
"""
return command.replace(self.__passwd, '**********')
def _connect( self ):
"""
open connection to MySQL DB and put Connection into Queue
set connected flag to True and return S_OK
return S_ERROR upon failure
"""
if not self.__initialized:
error = 'DB not properly initialized'
gLogger.error( error )
return S_ERROR( DErrno.EMYSQL, error )
self.log.debug( '_connect:', self._connected )
if self._connected:
return S_OK()
self.log.debug( '_connect: Attempting to access DB',
'[%s@%s] by user %s' %
( self.__dbName, self.__hostName, self.__userName ) )
try:
self.log.verbose( '_connect: Connected.' )
self._connected = True
return S_OK()
except Exception as x:
print x
return self._except( '_connect', x, 'Could not connect to DB.' )
def _query( self, cmd, conn = None, debug = False ):
"""
execute MySQL query command
return S_OK structure with fetchall result as tuple
it returns an empty tuple if no matching rows are found
return S_ERROR upon error
"""
if debug:
self.logger.debug( '_query: %s' % self._safeCmd( cmd ) )
else:
if self.logger.getLevel() == 'DEBUG':
self.logger.verbose( '_query: %s' % self._safeCmd( cmd ) )
else:
self.logger.verbose( '_query: %s' % self._safeCmd( cmd )[:min( len( cmd ) , 512 )] )
if gDebugFile:
start = time.time()
retDict = self._getConnection()
if not retDict['OK']:
return retDict
connection = retDict[ 'Value' ]
try:
cursor = connection.cursor()
if cursor.execute( cmd ):
res = cursor.fetchall()
else:
res = ()
# Log the result limiting it to just 10 records
if len( res ) <= 10:
if debug:
self.logger.debug( '_query: returns', res )
else:
self.logger.verbose( '_query: returns', res )
else:
if debug:
self.logger.debug( '_query: Total %d records returned' % len( res ) )
self.logger.debug( '_query: %s ...' % str( res[:10] ) )
else:
self.logger.verbose( '_query: Total %d records returned' % len( res ) )
self.logger.verbose( '_query: %s ...' % str( res[:10] ) )
retDict = S_OK( res )
except BaseException as x:
self.log.warn( '_query: %s' % self._safeCmd( cmd ) )
retDict = self._except( '_query', x, 'Execution failed.' )
try:
cursor.close()
except BaseException:
pass
if gDebugFile:
print >> gDebugFile, time.time() - start, cmd.replace( '\n', '' )
gDebugFile.flush()
return retDict
def _update( self, cmd, conn = None, debug = False ):
""" execute MySQL update command
return S_OK with number of updated registers upon success
return S_ERROR upon error
"""
if debug:
self.logger.debug( '_update: %s' % self._safeCmd( cmd ) )
else:
if self.logger.getLevel() == 'DEBUG':
self.logger.verbose( '_update: %s' % self._safeCmd( cmd ) )
else:
self.logger.verbose( '_update: %s' % self._safeCmd( cmd )[:min( len( cmd ) , 512 )] )
if gDebugFile:
start = time.time()
retDict = self._getConnection()
if not retDict['OK']:
return retDict
connection = retDict['Value']
try:
cursor = connection.cursor()
res = cursor.execute( cmd )
# connection.commit()
if debug:
self.log.debug( '_update:', res )
else:
self.log.verbose( '_update:', res )
retDict = S_OK( res )
if cursor.lastrowid:
retDict[ 'lastRowId' ] = cursor.lastrowid
except Exception as x:
self.log.warn( '_update: %s: %s' % ( self._safeCmd( cmd ), str( x ) ) )
retDict = self._except( '_update', x, 'Execution failed.' )
try:
cursor.close()
except Exception:
pass
if gDebugFile:
print >> gDebugFile, time.time() - start, cmd.replace( '\n', '' )
gDebugFile.flush()
return retDict
def _transaction( self, cmdList, conn = None ):
""" dummy transaction support
:param self: self reference
:param list cmdList: list of queries to be executed within the transaction
:param MySQLDB.Connection conn: connection
:return: S_OK( [ ( cmd1, ret1 ), ... ] ) or S_ERROR
"""
if not isinstance( cmdList, list ):
return S_ERROR( DErrno.EMYSQL, "_transaction: wrong type (%s) for cmdList" % type( cmdList ) )
# # get connection
connection = conn
if not connection:
retDict = self._getConnection()
if not retDict['OK']:
return retDict
connection = retDict[ 'Value' ]
# # list with cmds and their results
cmdRet = []
try:
cursor = connection.cursor()
for cmd in cmdList:
cmdRet.append( ( cmd, cursor.execute( cmd ) ) )
connection.commit()
except Exception as error:
self.logger.execption( error )
# # rollback, put back connection to the pool
connection.rollback()
return S_ERROR( DErrno.EMYSQL, error )
# # close cursor, put back connection to the pool
cursor.close()
return S_OK( cmdRet )
def _createViews( self, viewsDict, force = False ):
""" create view based on query
:param dict viewDict: { 'ViewName': "Fields" : { "`a`": `tblA.a`, "`sumB`" : "SUM(`tblB.b`)" }
"SelectFrom" : "tblA join tblB on tblA.id = tblB.id",
"Clauses" : [ "`tblA.a` > 10", "`tblB.Status` = 'foo'" ] ## WILL USE AND CLAUSE
"GroupBy": [ "`a`" ],
"OrderBy": [ "`b` DESC" ] }
"""
if force:
gLogger.debug( viewsDict )
for viewName, viewDict in viewsDict.iteritems():
viewQuery = [ "CREATE OR REPLACE VIEW `%s`.`%s` AS" % ( self.__dbName, viewName ) ]
columns = ",".join( [ "%s AS %s" % ( colDef, colName )
for colName, colDef in viewDict.get( "Fields", {} ).iteritems() ] )
tables = viewDict.get( "SelectFrom", "" )
if columns and tables:
viewQuery.append( "SELECT %s FROM %s" % ( columns, tables ) )
where = " AND ".join( viewDict.get( "Clauses", [] ) )
if where:
viewQuery.append( "WHERE %s" % where )
groupBy = ",".join( viewDict.get( "GroupBy", [] ) )
if groupBy:
viewQuery.append( "GROUP BY %s" % groupBy )
orderBy = ",".join( viewDict.get( "OrderBy", [] ) )
if orderBy:
viewQuery.append( "ORDER BY %s" % orderBy )
viewQuery.append( ";" )
viewQuery = " ".join( viewQuery )
self.log.debug( "`%s` VIEW QUERY IS: %s" % ( viewName, viewQuery ) )
createView = self._query( viewQuery )
if not createView["OK"]:
gLogger.error( 'Can not create view', createView["Message"] )
return createView
return S_OK()
def _createTables( self, tableDict, force = False ):
"""
tableDict:
tableName: { 'Fields' : { 'Field': 'Description' },
'ForeignKeys': {'Field': 'Table.key' },
'PrimaryKey': 'Id',
'Indexes': { 'Index': [] },
'UniqueIndexes': { 'Index': [] },
'Engine': 'InnoDB' }
only 'Fields' is a mandatory key.
Creates a new Table for each key in tableDict, "tableName" in the DB with
the provided description.
It allows to create:
- flat tables if no "ForeignKeys" key defined.
- tables with foreign keys to auxiliary tables holding the values
of some of the fields
Arguments:
tableDict: dictionary of dictionary with description of tables to be created.
Only "Fields" is a mandatory key in the table description.
"Fields": Dictionary with Field names and description of the fields
"ForeignKeys": Dictionary with Field names and name of auxiliary tables.
The auxiliary tables must be defined in tableDict.
"PrimaryKey": Name of PRIMARY KEY for the table (if exist).
"Indexes": Dictionary with definition of indexes, the value for each
index is the list of fields to be indexed.
"UniqueIndexes": Dictionary with definition of indexes, the value for each
index is the list of fields to be indexed. This indexes will declared
unique.
"Engine": use the given DB engine, InnoDB is the default if not present.
"Charset": use the given character set. Default is latin1
force:
if True, requested tables are DROP if they exist.
if False, returned with S_ERROR if table exist.
"""
# First check consistency of request
if not isinstance( tableDict, dict ):
return S_ERROR( DErrno.EMYSQL, 'Argument is not a dictionary: %s( %s )'
% ( type( tableDict ), tableDict ) )
tableList = tableDict.keys()
if len( tableList ) == 0:
return S_OK( 0 )
for table in tableList:
thisTable = tableDict[table]
# Check if Table is properly described with a dictionary
if not isinstance( thisTable, dict ):
return S_ERROR( DErrno.EMYSQL, 'Table description is not a dictionary: %s( %s )'
% ( type( thisTable ), thisTable ) )
if not 'Fields' in thisTable:
return S_ERROR( DErrno.EMYSQL, 'Missing `Fields` key in `%s` table dictionary' % table )
tableCreationList = [[]]
auxiliaryTableList = []
i = 0
extracted = True
while tableList and extracted:
# iterate extracting tables from list if they only depend on
# already extracted tables.
extracted = False
auxiliaryTableList += tableCreationList[i]
i += 1
tableCreationList.append( [] )
for table in list( tableList ):
toBeExtracted = True
thisTable = tableDict[table]
if 'ForeignKeys' in thisTable:
thisKeys = thisTable['ForeignKeys']
for key, auxTable in thisKeys.iteritems():
forTable = auxTable.split( '.' )[0]
forKey = key
if forTable != auxTable:
forKey = auxTable.split( '.' )[1]
if forTable not in auxiliaryTableList:
toBeExtracted = False
break
if not key in thisTable['Fields']:
return S_ERROR( DErrno.EMYSQL, 'ForeignKey `%s` -> `%s` not defined in Primary table `%s`.'
% ( key, forKey, table ) )
if not forKey in tableDict[forTable]['Fields']:
return S_ERROR( DErrno.EMYSQL, 'ForeignKey `%s` -> `%s` not defined in Auxiliary table `%s`.'
% ( key, forKey, forTable ) )
if toBeExtracted:
self.log.info( 'Table %s ready to be created' % table )
extracted = True
tableList.remove( table )
tableCreationList[i].append( table )
if tableList:
return S_ERROR( DErrno.EMYSQL, 'Recursive Foreign Keys in %s' % ', '.join( tableList ) )
for tableList in tableCreationList:
for table in tableList:
# Check if Table exist
retDict = self.__checkTable( table, force = force )
if not retDict['OK']:
return retDict
thisTable = tableDict[table]
cmdList = []
for field in thisTable['Fields'].keys():
cmdList.append( '`%s` %s' % ( field, thisTable['Fields'][field] ) )
if 'PrimaryKey' in thisTable:
if isinstance( thisTable['PrimaryKey'], basestring ):
cmdList.append( 'PRIMARY KEY ( `%s` )' % thisTable['PrimaryKey'] )
else:
cmdList.append( 'PRIMARY KEY ( %s )' % ", ".join( [ "`%s`" % str( f ) for f in thisTable['PrimaryKey'] ] ) )
if 'Indexes' in thisTable:
indexDict = thisTable['Indexes']
for index in indexDict:
indexedFields = '`, `'.join( indexDict[index] )
cmdList.append( 'INDEX `%s` ( `%s` )' % ( index, indexedFields ) )
if 'UniqueIndexes' in thisTable:
indexDict = thisTable['UniqueIndexes']
for index in indexDict:
indexedFields = '`, `'.join( indexDict[index] )
cmdList.append( 'UNIQUE INDEX `%s` ( `%s` )' % ( index, indexedFields ) )
if 'ForeignKeys' in thisTable:
thisKeys = thisTable['ForeignKeys']
for key, auxTable in thisKeys.iteritems():
forTable = auxTable.split( '.' )[0]
forKey = key
if forTable != auxTable:
forKey = auxTable.split( '.' )[1]
# cmdList.append( '`%s` %s' % ( forTable, tableDict[forTable]['Fields'][forKey] )
cmdList.append( 'FOREIGN KEY ( `%s` ) REFERENCES `%s` ( `%s` )'
' ON DELETE RESTRICT' % ( key, forTable, forKey ) )
engine = thisTable.get('Engine', 'InnoDB')
charset = thisTable.get('Charset', 'latin1')
cmd = 'CREATE TABLE `%s` (\n%s\n) ENGINE=%s DEFAULT CHARSET=%s' % ( table, ',\n'.join( cmdList ), engine, charset )
retDict = self._update( cmd, debug = True )
if not retDict['OK']:
return retDict
self.log.info( 'Table %s created' % table )
return S_OK()
def _getFields( self, tableName, outFields = None,
inFields = None, inValues = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None ):
"""
Wrapper to the new method for backward compatibility
"""
self.log.warn( '_getFields:', 'deprecation warning, use getFields methods instead of _getFields.' )
retDict = _checkFields( inFields, inValues )
if not retDict['OK']:
self.log.warn( '_getFields:', retDict['Message'] )
return retDict
condDict = {}
if inFields != None:
try:
condDict.update( [ ( inFields[k], inValues[k] ) for k in range( len( inFields ) )] )
except Exception as x:
return S_ERROR( DErrno.EMYSQL, x )
return self.getFields( tableName, outFields, condDict, limit, conn, older, newer, timeStamp, orderAttribute )
def _insert( self, tableName, inFields = None, inValues = None, conn = None ):
"""
Wrapper to the new method for backward compatibility
"""
self.log.warn( '_insert:', 'deprecation warning, use insertFields methods instead of _insert.' )
return self.insertFields( tableName, inFields, inValues, conn )
def _to_value( self, param ):
"""
Convert to string
"""
return str( param[0] )
def _to_string( self, param ):
"""
"""
return param[0].tostring()
def _getConnection( self ):
""" Return a new connection to the DB,
Try the Queue, if it is empty add a newConnection to the Queue and retry
it will retry MAXCONNECTRETRY to open a new connection and will return
an error if it fails.
"""
self.log.debug( '_getConnection:' )
if not self.__initialized:
error = 'DB not properly initialized'
gLogger.error( error )
return S_ERROR( DErrno.EMYSQL, error )
return self.__connectionPool.get( self.__dbName )
########################################################################################
#
# Transaction functions
#
########################################################################################
def transactionStart( self ):
return self.__connectionPool.transactionStart( self.__dbName )
def transactionCommit( self ):
return self.__connectionPool.transactionCommit( self.__dbName )
def transactionRollback( self ):
return self.__connectionPool.transactionRollback( self.__dbName )
########################################################################################
#
# Utility functions
#
########################################################################################
def countEntries( self, table, condDict, older = None, newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Count the number of entries wit the given conditions
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'countEntries:', error )
return S_ERROR( DErrno.EMYSQL, error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = greater, smaller = smaller )
except Exception as x:
return S_ERROR( DErrno.EMYSQL, x )
cmd = 'SELECT COUNT(*) FROM %s %s' % ( table, cond )
res = self._query( cmd , connection, debug = True )
if not res['OK']:
return res
return S_OK( res['Value'][0][0] )
########################################################################################
def getCounters( self, table, attrList, condDict, older = None, newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Count the number of records on each distinct combination of AttrList, selected
with condition defined by condDict and time stamps
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'getCounters:', error )
return S_ERROR( DErrno.EMYSQL, error )
attrNames = _quotedList( attrList )
if attrNames is None:
error = 'Invalid updateFields argument'
self.log.debug( 'getCounters:', error )
return S_ERROR( DErrno.EMYSQL, error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = greater, smaller = smaller )
except Exception as x:
return S_ERROR( DErrno.EMYSQL, x )
cmd = 'SELECT %s, COUNT(*) FROM %s %s GROUP BY %s ORDER BY %s' % ( attrNames, table, cond, attrNames, attrNames )
res = self._query( cmd , connection, debug = True )
if not res['OK']:
return res
resultList = []
for raw in res['Value']:
attrDict = {}
for i in range( len( attrList ) ):
attrDict[attrList[i]] = raw[i]
item = ( attrDict, raw[len( attrList )] )
resultList.append( item )
return S_OK( resultList )
#########################################################################################
def getDistinctAttributeValues( self, table, attribute, condDict = None, older = None,
newer = None, timeStamp = None, connection = False,
greater = None, smaller = None ):
"""
Get distinct values of a table attribute under specified conditions
"""
table = _quotedList( [table] )
if not table:
error = 'Invalid table argument'
self.log.debug( 'getDistinctAttributeValues:', error )
return S_ERROR( DErrno.EMYSQL, error )
attributeName = _quotedList( [attribute] )
if not attributeName:
error = 'Invalid attribute argument'
self.log.debug( 'getDistinctAttributeValues:', error )
return S_ERROR( DErrno.EMYSQL, error )
try:
cond = self.buildCondition( condDict = condDict, older = older, newer = newer, timeStamp = timeStamp,
greater = greater, smaller = smaller )
except Exception as x:
return S_ERROR( DErrno.EMYSQL, x )
cmd = 'SELECT DISTINCT( %s ) FROM %s %s ORDER BY %s' % ( attributeName, table, cond, attributeName )
res = self._query( cmd, connection, debug = True )
if not res['OK']:
return res
attr_list = [ x[0] for x in res['Value'] ]
return S_OK( attr_list )
#############################################################################
def buildCondition( self, condDict = None, older = None, newer = None,
timeStamp = None, orderAttribute = None, limit = False,
greater = None, smaller = None, offset = None ):
""" Build SQL condition statement from provided condDict and other extra check on
a specified time stamp.
The conditions dictionary specifies for each attribute one or a List of possible
values
greater and smaller are dictionaries in which the keys are the names of the fields,
that are requested to be >= or < than the corresponding value.
For compatibility with current usage it uses Exceptions to exit in case of
invalid arguments
"""
condition = ''
conjunction = "WHERE"
if condDict != None:
for aName, attrValue in condDict.iteritems():
if isinstance( aName, basestring ):
attrName = _quotedList( [aName] )
elif isinstance( aName, tuple ):
attrName = '('+_quotedList( list( aName ) )+')'
if not attrName:
error = 'Invalid condDict argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if isinstance( attrValue, list ):
retDict = self._escapeValues( attrValue )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValues = retDict['Value']
multiValue = ', '.join( escapeInValues )
condition = ' %s %s %s IN ( %s )' % ( condition,
conjunction,
attrName,
multiValue )
conjunction = "AND"
else:
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s = %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
if timeStamp:
timeStamp = _quotedList( [timeStamp] )
if not timeStamp:
error = 'Invalid timeStamp argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if newer:
retDict = self._escapeValues( [ newer ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s >= %s' % ( condition,
conjunction,
timeStamp,
escapeInValue )
conjunction = "AND"
if older:
retDict = self._escapeValues( [ older ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s < %s' % ( condition,
conjunction,
timeStamp,
escapeInValue )
if isinstance( greater, dict ):
for attrName, attrValue in greater.iteritems():
attrName = _quotedList( [attrName] )
if not attrName:
error = 'Invalid greater argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s >= %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
if isinstance( smaller, dict ):
for attrName, attrValue in smaller.iteritems():
attrName = _quotedList( [attrName] )
if not attrName:
error = 'Invalid smaller argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
retDict = self._escapeValues( [ attrValue ] )
if not retDict['OK']:
self.log.warn( 'buildCondition:', retDict['Message'] )
raise Exception( retDict['Message'] )
else:
escapeInValue = retDict['Value'][0]
condition = ' %s %s %s < %s' % ( condition,
conjunction,
attrName,
escapeInValue )
conjunction = "AND"
orderList = []
orderAttrList = orderAttribute
if not isinstance( orderAttrList, list ):
orderAttrList = [ orderAttribute ]
for orderAttr in orderAttrList:
if orderAttr is None:
continue
if not isinstance( orderAttr, basestring ):
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
orderField = _quotedList( orderAttr.split( ':' )[:1] )
if not orderField:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
if len( orderAttr.split( ':' ) ) == 2:
orderType = orderAttr.split( ':' )[1].upper()
if orderType in [ 'ASC', 'DESC']:
orderList.append( '%s %s' % ( orderField, orderType ) )
else:
error = 'Invalid orderAttribute argument'
self.log.warn( 'buildCondition:', error )
raise Exception( error )
else:
orderList.append( orderAttr )
if orderList:
condition = "%s ORDER BY %s" % ( condition, ', '.join( orderList ) )
if limit:
if offset:
condition = "%s LIMIT %d OFFSET %d" % ( condition, limit, offset )
else:
condition = "%s LIMIT %d" % ( condition, limit )
return condition
#############################################################################
def getFields( self, tableName, outFields = None,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Select "outFields" from "tableName" with condDict
N records can match the condition
return S_OK( tuple(Field,Value) )
if outFields is None all fields in "tableName" are returned
if limit is not False, the given limit is set
inValues are properly escaped using the _escape_string method, they can be single values or lists of values.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'getFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
quotedOutFields = '*'
if outFields:
quotedOutFields = _quotedList( outFields )
if quotedOutFields is None:
error = 'Invalid outFields arguments'
self.log.warn( 'getFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
self.log.verbose( 'getFields:', 'selecting fields %s from table %s.' % ( quotedOutFields, table ) )
if condDict is None:
condDict = {}
try:
try:
mylimit = limit[0]
myoffset = limit[1]
except TypeError:
mylimit = limit
myoffset = None
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = mylimit,
greater = greater, smaller = smaller, offset = myoffset )
except Exception as x:
return S_ERROR( DErrno.EMYSQL, x )
return self._query( 'SELECT %s FROM %s %s' %
( quotedOutFields, table, condition ), conn, debug = True )
#############################################################################
def deleteEntries( self, tableName,
condDict = None,
limit = False, conn = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Delete rows from "tableName" with
N records can match the condition
if limit is not False, the given limit is set
String type values will be appropriately escaped, they can be single values or lists of values.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'deleteEntries:', error )
return S_ERROR( DErrno.EMYSQL, error )
self.log.verbose( 'deleteEntries:', 'deleting rows from table %s.' % table )
try:
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit,
greater = greater, smaller = smaller )
except Exception as x:
return S_ERROR( DErrno.EMYSQL, x )
return self._update( 'DELETE FROM %s %s' % ( table, condition ), conn, debug = True )
#############################################################################
def updateFields( self, tableName, updateFields = None, updateValues = None,
condDict = None,
limit = False, conn = None,
updateDict = None,
older = None, newer = None,
timeStamp = None, orderAttribute = None,
greater = None, smaller = None ):
"""
Update "updateFields" from "tableName" with "updateValues".
updateDict alternative way to provide the updateFields and updateValues
N records can match the condition
return S_OK( number of updated rows )
if limit is not False, the given limit is set
String type values will be appropriately escaped.
"""
if not updateFields and not updateDict:
return S_OK( 0 )
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'updateFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
retDict = _checkFields( updateFields, updateValues )
if not retDict['OK']:
error = 'Mismatch between updateFields and updateValues.'
self.log.warn( 'updateFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
if updateFields is None:
updateFields = []
updateValues = []
if updateDict:
if not isinstance( updateDict, dict ):
error = 'updateDict must be a of Type DictType'
self.log.warn( 'updateFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
try:
updateFields += updateDict.keys()
updateValues += [updateDict[k] for k in updateDict.keys()]
except TypeError:
error = 'updateFields and updateValues must be a list'
self.log.warn( 'updateFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
updateValues = self._escapeValues( updateValues )
if not updateValues['OK']:
self.log.warn( 'updateFields:', updateValues['Message'] )
return updateValues
updateValues = updateValues['Value']
self.log.verbose( 'updateFields:', 'updating fields %s from table %s.' %( ', '.join( updateFields ), table ) )
try:
condition = self.buildCondition( condDict = condDict, older = older, newer = newer,
timeStamp = timeStamp, orderAttribute = orderAttribute, limit = limit,
greater = greater, smaller = smaller )
except Exception as x:
return S_ERROR( DErrno.EMYSQL, x )
updateString = ','.join( ['%s = %s' % ( _quotedList( [updateFields[k]] ),
updateValues[k] ) for k in range( len( updateFields ) ) ] )
return self._update( 'UPDATE %s SET %s %s' %
( table, updateString, condition ), conn, debug = True )
#############################################################################
def insertFields( self, tableName, inFields = None, inValues = None, conn = None, inDict = None ):
"""
Insert a new row in "tableName" assigning the values "inValues" to the
fields "inFields".
String type values will be appropriately escaped.
"""
table = _quotedList( [tableName] )
if not table:
error = 'Invalid tableName argument'
self.log.warn( 'insertFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
retDict = _checkFields( inFields, inValues )
if not retDict['OK']:
self.log.warn( 'insertFields:', retDict['Message'] )
return retDict
if inFields is None:
inFields = []
inValues = []
if inDict:
if not isinstance( inDict, dict ):
error = 'inDict must be a of Type DictType'
self.log.warn( 'insertFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
try:
inFields += inDict.keys()
inValues += [inDict[k] for k in inDict.keys()]
except TypeError:
error = 'inFields and inValues must be a list'
self.log.warn( 'insertFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
inFieldString = _quotedList( inFields )
if inFieldString is None:
error = 'Invalid inFields arguments'
self.log.warn( 'insertFields:', error )
return S_ERROR( DErrno.EMYSQL, error )
inFieldString = '( %s )' % inFieldString
retDict = self._escapeValues( inValues )
if not retDict['OK']:
self.log.warn( 'insertFields:', retDict['Message'] )
return retDict
inValueString = ', '.join( retDict['Value'] )
inValueString = '( %s )' % inValueString
self.log.verbose( 'insertFields:', 'inserting %s into table %s'
% ( inFieldString, table ) )
return self._update( 'INSERT INTO %s %s VALUES %s' %
( table, inFieldString, inValueString ), conn, debug = True )
def executeStoredProcedure( self, packageName, parameters, outputIds ):
conDict = self._getConnection()
if not conDict['OK']:
return conDict
connection = conDict['Value']
cursor = connection.cursor()
try:
cursor.callproc( packageName, parameters )
row = []
for oId in outputIds:
resName = "@_%s_%s" % ( packageName, oId )
cursor.execute( "SELECT %s" % resName )
row.append( cursor.fetchone()[0] )
retDict = S_OK( row )
except Exception as x:
retDict = self._except( '_query', x, 'Execution failed.' )
connection.rollback()
try:
cursor.close()
except Exception:
pass
return retDict
# For the procedures that execute a select without storing the result
def executeStoredProcedureWithCursor( self, packageName, parameters ):
conDict = self._getConnection()
if not conDict['OK']:
return conDict
connection = conDict['Value']
cursor = connection.cursor()
try:
# execStr = "call %s(%s);" % ( packageName, ",".join( map( str, parameters ) ) )
execStr = "call %s(%s);" % ( packageName, ",".join( ["\"%s\"" % param if isinstance( param, basestring ) else str( param ) for param in parameters] ) )
cursor.execute( execStr )
rows = cursor.fetchall()
retDict = S_OK( rows )
except Exception as x:
retDict = self._except( '_query', x, 'Execution failed.' )
connection.rollback()
try:
cursor.close()
except Exception:
pass
return retDict
| arrabito/DIRAC | Core/Utilities/MySQL.py | Python | gpl-3.0 | 54,673 | [
"DIRAC"
] | a59c32457fe342805a39ee905ebb537e19d327e1d9949fac85d2d61557688d6e |
# Placeholder because KDTree moved
# Remove this in version 1.0
__all__ = ['NeighborSearch']
import warnings
with warnings.catch_warnings():
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(('KDTree has been removed in 0.11. Instead you can use the '
'BioPython or scikit-learn implementation directly. The '
'"AtomNeighborSearch" class is still available in the '
'NeighborSearch module which is moved to MDAnalysis.lib.NeighborSearch.'
'This KDTree module will be removed in the 1.0 release.'),
DeprecationWarning)
from .lib import NeighborSearch
| alejob/mdanalysis | package/MDAnalysis/KDTree.py | Python | gpl-2.0 | 671 | [
"Biopython",
"MDAnalysis"
] | d8e1d4bc5b289a53d5e185f6e5fd747ad28a788d6b6418c97ad09c18fe597d0e |
from pyibex import *
from pyibex.thickset import *
from pyibex.geometry import CtcPolar
from vibes import vibes
import math
import numpy as np
vibes.beginDrawing()
class ThickRotation(ThickTest):
def __init__(self, th1, th2, y):
ThickTest.__init__(self, 2)
self.th = Interval(th1, th2)
self.y = IntervalVector(y) # IntervalVector([[1,3], [4,6]])
fthin = Function("x1", "x2", "(x1*cos(%s) - x2*sin(%s), x1*sin(%s) + x2*cos(%s))"% ((Interval(th1, th2), )*4 ) )
self.thinTest = ThinfIn(fthin, y)
self.ctcpolar = CtcPolar()
self.rho, self.theta = Interval(1, 100) , Interval(0).inflate(2*math.pi/3.)
self.ctcpolar.contract(y[0], y[1], self.rho, self.theta)
def test(self, X):
b = self.thinTest.test(X)
if is_singleton(b):
return b
# penombra
# tmp = self.ctcpolar.RTfromXY(X[0], X[1])
# rho, theta = tmp[0], tmp[1]
rho, theta = Interval(1, 100) , Interval(0).inflate(2*math.pi/3.)
self.ctcpolar.contract(X[0], X[1], rho, theta)
print(rho, theta, self.rho, self.theta)
titv = ThickInterval(theta + self.th.lb(), theta + self.th.ub())
if rho.is_subset(self.rho):
if titv.superset().is_subset(self.theta):
return IN
elif rho.is_disjoint(self.rho) or titv.superset().is_disjoint(self.theta):
return OUT
# titv = ThickInterval(theta + self.th.lb(), theta + self.th.ub())
b1 = titv.intersects(self.theta)
b2 = titv.isNotInclude(self.theta)
# b1 = isThickIntersect(theta + self.th.lb(), theta + self.th.ub(), self.theta)
# b2 = isThickNotInclude(theta + self.th.lb(), theta + self.th.ub(), self.theta)
if b1 and b2:
return MAYBE
return UNK
ctcpolar = CtcPolar()
th1 = math.pi/4.
th2 = math.pi/3.
def flb_polar(X0):
rho, theta = Interval(1, 100) , Interval(0).inflate(2*math.pi/3.)
# print(rho, theta, X0)
ctcpolar.contract(X0[0], X0[1], rho, theta)
theta = theta + th1
X = IntervalVector(2, [-100,100])
ctcpolar.contract(X[0], X[1], rho, theta)
# print(rho, theta, X)
return X
def fub_polar(X0):
rho, theta = Interval(1, 100) , Interval(0).inflate(2*math.pi/3.)
# print(rho, theta, X0)
ctcpolar.contract(X0[0], X0[1], rho, theta)
theta = theta + th2
X = IntervalVector(2, [-100,100])
ctcpolar.contract(X[0], X[1], rho, theta)
# print(rho, theta, X)
return X
flb = Function("x1", "x2", "(x1*cos(%f) - x2*sin(%f), x1*sin(%f) + x2*cos(%f))"% ((th1, )*4 ) )
fub = Function("x1", "x2", "(x1*cos(%f) - x2*sin(%f), x1*sin(%f) + x2*cos(%f))"% ((th2, )*4 ) )
print(flb)
fthin = Function("x1", "x2", "(x1*cos(%s) - x2*sin(%s), x1*sin(%s) + x2*cos(%s))"% ((Interval(th1, th2), )*4 ) )
thinTest = ThinfIn(fthin, IntervalVector([[1,3], [4,6]]))
# test1 = ThickfIn(flb_polar, flb_polar, IntervalVector([[1,3], [4,6]]))
# test2 = ThickfIn(fub_polar, fub_polar, IntervalVector([[1,3], [4,6]]))
# test = ThickOr([test1, test2])
test = ThickRotation(th1, th2, IntervalVector([[1,3], [4,6]]))
P1 = ThickPaving(IntervalVector(2, [-20,20]), test, 0.05)
P1.visit(ToVibes(10000, "test"))
R1 = np.array([[np.cos(th1), np.sin(th1)], [-np.sin(th1), np.cos(th1)]])
R2 = np.array([[np.cos(th2), np.sin(th2)], [-np.sin(th2), np.cos(th2)]])
vibes.selectFigure('test')
for x in np.linspace(1.,3.,10):
for y in np.linspace(4.,6.,10):
v = np.array([x,y])
v1 = R1.dot(v)
v2 = R2.dot(v)
# print(x,y)
vibes.drawCircle(x,y,0.05, '[g]')
vibes.drawCircle(v1[0],v1[1],0.05, '[b]')
vibes.drawCircle(v2[0],v2[1],0.05, '[orange]')
# vibes.drawBox(1,3,4,6, '[k]')
# R1 = np.array([[np.cos(-th1), np.sin(-th1)], [-np.sin(-th1), np.cos(-th1)]])
# R2 = np.array([[np.cos(-th2), np.sin(-th2)], [-np.sin(-th2), np.cos(-th2)]])
# vibes.selectFigure('test')
# for x in np.linspace(5.,6.,10):
# for y in np.linspace(1.,2.,10):
# v = np.array([x,y])
# v1 = R1.dot(v)
# v2 = R2.dot(v)
# # print(x,y)
# vibes.drawCircle(x,y,0.05, '[g]')
# vibes.drawCircle(v1[0],v1[1],0.05, '[b]')
# vibes.drawCircle(v2[0],v2[1],0.05, '[orange]')
| benEnsta/pyIbex | pyibex/thickset/examples/thickTransform/ex2_rotbox.py | Python | lgpl-3.0 | 4,039 | [
"VisIt"
] | b86acd690e7d9727e2865f66ddfaa851247eefc25fdaaf97405e1f7047e9182f |
import datetime
import time
import zlib
import hashlib
import redis
import re
import mongoengine as mongo
import random
import requests
import HTMLParser
from collections import defaultdict
from pprint import pprint
from BeautifulSoup import BeautifulSoup
from mongoengine.queryset import Q
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
from django.core.mail import EmailMultiAlternatives
from apps.reader.models import UserSubscription, RUserStory
from apps.analyzer.models import MClassifierFeed, MClassifierAuthor, MClassifierTag, MClassifierTitle
from apps.analyzer.models import apply_classifier_titles, apply_classifier_feeds, apply_classifier_authors, apply_classifier_tags
from apps.rss_feeds.models import Feed, MStory
from apps.rss_feeds.text_importer import TextImporter
from apps.profile.models import Profile, MSentEmail
from vendor import facebook
from vendor import tweepy
from vendor import appdotnet
from vendor import pynliner
from utils import log as logging
from utils import json_functions as json
from utils.feed_functions import relative_timesince, chunks
from utils.story_functions import truncate_chars, strip_tags, linkify, image_size
from utils.scrubber import SelectiveScriptScrubber
from utils import s3_utils
from StringIO import StringIO
RECOMMENDATIONS_LIMIT = 5
IGNORE_IMAGE_SOURCES = [
"http://feeds.feedburner.com"
]
class MRequestInvite(mongo.Document):
email = mongo.EmailField()
request_date = mongo.DateTimeField(default=datetime.datetime.now)
invite_sent = mongo.BooleanField(default=False)
invite_sent_date = mongo.DateTimeField()
meta = {
'collection': 'social_invites',
'allow_inheritance': False,
}
def __unicode__(self):
return "%s%s" % (self.email, '*' if self.invite_sent else '')
@classmethod
def blast(cls):
invites = cls.objects.filter(email_sent=None)
print ' ---> Found %s invites...' % invites.count()
for invite in invites:
try:
invite.send_email()
except:
print ' ***> Could not send invite to: %s. Deleting.' % invite.username
invite.delete()
def send_email(self):
user = User.objects.filter(username__iexact=self.username)
if not user:
user = User.objects.filter(email__iexact=self.username)
if user:
user = user[0]
email = user.email or self.username
else:
user = {
'username': self.username,
'profile': {
'autologin_url': '/',
}
}
email = self.username
params = {
'user': user,
}
text = render_to_string('mail/email_social_beta.txt', params)
html = render_to_string('mail/email_social_beta.xhtml', params)
subject = "Psst, you're in..."
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['<%s>' % (email)])
msg.attach_alternative(html, "text/html")
msg.send()
self.email_sent = True
self.save()
logging.debug(" ---> ~BB~FM~SBSending email for social beta: %s" % self.username)
class MSocialProfile(mongo.Document):
user_id = mongo.IntField(unique=True)
username = mongo.StringField(max_length=30, unique=True)
email = mongo.StringField()
bio = mongo.StringField(max_length=160)
blurblog_title = mongo.StringField(max_length=256)
custom_bgcolor = mongo.StringField(max_length=50)
custom_css = mongo.StringField()
photo_url = mongo.StringField()
photo_service = mongo.StringField()
location = mongo.StringField(max_length=40)
website = mongo.StringField(max_length=200)
bb_permalink_direct = mongo.BooleanField()
subscription_count = mongo.IntField(default=0)
shared_stories_count = mongo.IntField(default=0)
following_count = mongo.IntField(default=0)
follower_count = mongo.IntField(default=0)
following_user_ids = mongo.ListField(mongo.IntField())
follower_user_ids = mongo.ListField(mongo.IntField())
unfollowed_user_ids = mongo.ListField(mongo.IntField())
requested_follow_user_ids = mongo.ListField(mongo.IntField())
popular_publishers = mongo.StringField()
stories_last_month = mongo.IntField(default=0)
average_stories_per_month = mongo.IntField(default=0)
story_count_history = mongo.ListField()
feed_classifier_counts = mongo.DictField()
favicon_color = mongo.StringField(max_length=6)
protected = mongo.BooleanField()
private = mongo.BooleanField()
meta = {
'collection': 'social_profile',
'indexes': ['user_id', 'following_user_ids', 'follower_user_ids', 'unfollowed_user_ids', 'requested_follow_user_ids'],
'allow_inheritance': False,
'index_drop_dups': True,
}
def __unicode__(self):
return "%s [%s] following %s/%s, shared %s" % (self.username, self.user_id,
self.following_count, self.follower_count, self.shared_stories_count)
@classmethod
def get_user(cls, user_id):
profile, created = cls.objects.get_or_create(user_id=user_id)
if created:
profile.save()
return profile
def save(self, *args, **kwargs):
if not self.username:
self.import_user_fields()
if not self.subscription_count:
self.count_follows(skip_save=True)
if self.bio and len(self.bio) > MSocialProfile.bio.max_length:
self.bio = self.bio[:80]
if self.bio:
self.bio = strip_tags(self.bio)
if self.website:
self.website = strip_tags(self.website)
if self.location:
self.location = strip_tags(self.location)
if self.custom_css:
self.custom_css = strip_tags(self.custom_css)
super(MSocialProfile, self).save(*args, **kwargs)
if self.user_id not in self.following_user_ids:
self.follow_user(self.user_id, force=True)
self.count_follows()
return self
@property
def blurblog_url(self):
return "http://%s.%s" % (
self.username_slug,
Site.objects.get_current().domain.replace('www.', ''))
@property
def blurblog_rss(self):
return "%s%s" % (self.blurblog_url, reverse('shared-stories-rss-feed',
kwargs={'user_id': self.user_id,
'username': self.username_slug}))
def find_stories(self, query, offset=0, limit=25):
stories_db = MSharedStory.objects(
Q(user_id=self.user_id) &
(Q(story_title__icontains=query) |
Q(story_author_name__icontains=query) |
Q(story_tags__icontains=query))
).order_by('-shared_date')[offset:offset+limit]
stories = Feed.format_stories(stories_db)
return stories
def recommended_users(self):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
following_key = "F:%s:F" % (self.user_id)
social_follow_key = "FF:%s:F" % (self.user_id)
profile_user_ids = []
# Find potential twitter/fb friends
services = MSocialServices.objects.get(user_id=self.user_id)
facebook_user_ids = [u.user_id for u in
MSocialServices.objects.filter(facebook_uid__in=services.facebook_friend_ids).only('user_id')]
twitter_user_ids = [u.user_id for u in
MSocialServices.objects.filter(twitter_uid__in=services.twitter_friend_ids).only('user_id')]
social_user_ids = facebook_user_ids + twitter_user_ids
# Find users not currently followed by this user
r.delete(social_follow_key)
nonfriend_user_ids = []
if social_user_ids:
r.sadd(social_follow_key, *social_user_ids)
nonfriend_user_ids = r.sdiff(social_follow_key, following_key)
profile_user_ids = [int(f) for f in nonfriend_user_ids]
r.delete(social_follow_key)
# Not enough? Grab popular users.
if len(nonfriend_user_ids) < RECOMMENDATIONS_LIMIT:
homepage_user = User.objects.get(username='popular')
suggested_users_list = r.sdiff("F:%s:F" % homepage_user.pk, following_key)
suggested_users_list = [int(f) for f in suggested_users_list]
suggested_user_ids = []
slots_left = min(len(suggested_users_list), RECOMMENDATIONS_LIMIT - len(nonfriend_user_ids))
for slot in range(slots_left):
suggested_user_ids.append(random.choice(suggested_users_list))
profile_user_ids.extend(suggested_user_ids)
# Sort by shared story count
profiles = MSocialProfile.profiles(profile_user_ids).order_by('-shared_stories_count')[:RECOMMENDATIONS_LIMIT]
return profiles
@property
def username_slug(self):
return slugify(self.username)
def count_stories(self):
# Popular Publishers
self.save_popular_publishers()
def save_popular_publishers(self, feed_publishers=None):
if not feed_publishers:
publishers = defaultdict(int)
for story in MSharedStory.objects(user_id=self.user_id).only('story_feed_id')[:500]:
publishers[story.story_feed_id] += 1
feed_titles = dict((f.id, f.feed_title)
for f in Feed.objects.filter(pk__in=publishers.keys()).only('id', 'feed_title'))
feed_publishers = sorted([{'id': k, 'feed_title': feed_titles[k], 'story_count': v}
for k, v in publishers.items()
if k in feed_titles],
key=lambda f: f['story_count'],
reverse=True)[:20]
popular_publishers = json.encode(feed_publishers)
if len(popular_publishers) < 1023:
self.popular_publishers = popular_publishers
self.save()
return
if len(popular_publishers) > 1:
self.save_popular_publishers(feed_publishers=feed_publishers[:-1])
@classmethod
def profile(cls, user_id, include_follows=True):
profile = cls.get_user(user_id)
return profile.canonical(include_follows=True)
@classmethod
def profiles(cls, user_ids):
profiles = cls.objects.filter(user_id__in=user_ids)
return profiles
@classmethod
def profile_feeds(cls, user_ids):
profiles = cls.objects.filter(user_id__in=user_ids)
profiles = dict((p.user_id, p.feed()) for p in profiles)
return profiles
@classmethod
def sync_all_redis(cls):
for profile in cls.objects.all():
profile.sync_redis(force=True)
def sync_redis(self, force=False):
self.following_user_ids = list(set(self.following_user_ids))
self.save()
for user_id in self.following_user_ids:
self.follow_user(user_id, force=force)
self.follow_user(self.user_id)
@property
def title(self):
return self.blurblog_title if self.blurblog_title else self.username + "'s blurblog"
def feed(self):
params = self.canonical(compact=True)
params.update({
'feed_title': self.title,
'page_url': reverse('load-social-page', kwargs={'user_id': self.user_id, 'username': self.username_slug}),
'shared_stories_count': self.shared_stories_count,
})
return params
def page(self):
params = self.canonical(include_follows=True)
params.update({
'feed_title': self.title,
'custom_css': self.custom_css,
})
return params
@property
def profile_photo_url(self):
if self.photo_url:
return self.photo_url
return settings.MEDIA_URL + 'img/reader/default_profile_photo.png'
@property
def large_photo_url(self):
photo_url = self.email_photo_url
if 'graph.facebook.com' in photo_url:
return photo_url + '?type=large'
elif 'twimg' in photo_url:
return photo_url.replace('_normal', '')
elif '/avatars/' in photo_url:
return photo_url.replace('thumbnail_', 'large_')
return photo_url
@property
def email_photo_url(self):
if self.photo_url:
if self.photo_url.startswith('//'):
self.photo_url = 'http:' + self.photo_url
return self.photo_url
domain = Site.objects.get_current().domain
return 'http://' + domain + settings.MEDIA_URL + 'img/reader/default_profile_photo.png'
def canonical(self, compact=False, include_follows=False, common_follows_with_user=None,
include_settings=False, include_following_user=None):
domain = Site.objects.get_current().domain
params = {
'id': 'social:%s' % self.user_id,
'user_id': self.user_id,
'username': self.username,
'photo_url': self.email_photo_url,
'large_photo_url': self.large_photo_url,
'location': self.location,
'num_subscribers': self.follower_count,
'feed_title': self.title,
'feed_address': "http://%s%s" % (domain, reverse('shared-stories-rss-feed',
kwargs={'user_id': self.user_id, 'username': self.username_slug})),
'feed_link': self.blurblog_url,
'protected': self.protected,
'private': self.private,
}
if not compact:
params.update({
'large_photo_url': self.large_photo_url,
'bio': self.bio,
'website': self.website,
'shared_stories_count': self.shared_stories_count,
'following_count': self.following_count,
'follower_count': self.follower_count,
'popular_publishers': json.decode(self.popular_publishers),
'stories_last_month': self.stories_last_month,
'average_stories_per_month': self.average_stories_per_month,
})
if include_settings:
params.update({
'custom_css': self.custom_css,
'custom_bgcolor': self.custom_bgcolor,
'bb_permalink_direct': self.bb_permalink_direct,
})
if include_follows:
params.update({
'photo_service': self.photo_service,
'following_user_ids': self.following_user_ids_without_self[:48],
'follower_user_ids': self.follower_user_ids_without_self[:48],
})
if common_follows_with_user:
FOLLOWERS_LIMIT = 128
with_user = MSocialProfile.get_user(common_follows_with_user)
followers_youknow, followers_everybody = with_user.common_follows(self.user_id, direction='followers')
following_youknow, following_everybody = with_user.common_follows(self.user_id, direction='following')
params['followers_youknow'] = followers_youknow[:FOLLOWERS_LIMIT]
params['followers_everybody'] = followers_everybody[:FOLLOWERS_LIMIT]
params['following_youknow'] = following_youknow[:FOLLOWERS_LIMIT]
params['following_everybody'] = following_everybody[:FOLLOWERS_LIMIT]
params['requested_follow'] = common_follows_with_user in self.requested_follow_user_ids
if include_following_user or common_follows_with_user:
if not include_following_user:
include_following_user = common_follows_with_user
if include_following_user != self.user_id:
params['followed_by_you'] = bool(self.is_followed_by_user(include_following_user))
params['following_you'] = self.is_following_user(include_following_user)
return params
@property
def following_user_ids_without_self(self):
if self.user_id in self.following_user_ids:
return [u for u in self.following_user_ids if u != self.user_id]
return self.following_user_ids
@property
def follower_user_ids_without_self(self):
if self.user_id in self.follower_user_ids:
return [u for u in self.follower_user_ids if u != self.user_id]
return self.follower_user_ids
def import_user_fields(self, skip_save=False):
user = User.objects.get(pk=self.user_id)
self.username = user.username
self.email = user.email
def count_follows(self, skip_save=False):
self.subscription_count = UserSubscription.objects.filter(user__pk=self.user_id).count()
self.shared_stories_count = MSharedStory.objects.filter(user_id=self.user_id).count()
self.following_count = len(self.following_user_ids_without_self)
self.follower_count = len(self.follower_user_ids_without_self)
if not skip_save:
self.save()
def follow_user(self, user_id, check_unfollowed=False, force=False):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
if check_unfollowed and user_id in self.unfollowed_user_ids:
return
if self.user_id == user_id:
followee = self
else:
followee = MSocialProfile.get_user(user_id)
logging.debug(" ---> ~FB~SB%s~SN (%s) following %s" % (self.username, self.user_id, user_id))
if not followee.protected or force:
if user_id not in self.following_user_ids:
self.following_user_ids.append(user_id)
elif not force:
return
if user_id in self.unfollowed_user_ids:
self.unfollowed_user_ids.remove(user_id)
self.count_follows()
self.save()
if followee.protected and user_id != self.user_id and not force:
if self.user_id not in followee.requested_follow_user_ids:
followee.requested_follow_user_ids.append(self.user_id)
MFollowRequest.add(self.user_id, user_id)
elif self.user_id not in followee.follower_user_ids:
followee.follower_user_ids.append(self.user_id)
followee.count_follows()
followee.save()
if followee.protected and user_id != self.user_id and not force:
from apps.social.tasks import EmailFollowRequest
EmailFollowRequest.apply_async(kwargs=dict(follower_user_id=self.user_id,
followee_user_id=user_id),
countdown=settings.SECONDS_TO_DELAY_CELERY_EMAILS)
return
following_key = "F:%s:F" % (self.user_id)
r.sadd(following_key, user_id)
follower_key = "F:%s:f" % (user_id)
r.sadd(follower_key, self.user_id)
if user_id != self.user_id:
MInteraction.new_follow(follower_user_id=self.user_id, followee_user_id=user_id)
MActivity.new_follow(follower_user_id=self.user_id, followee_user_id=user_id)
socialsub, _ = MSocialSubscription.objects.get_or_create(user_id=self.user_id,
subscription_user_id=user_id)
socialsub.needs_unread_recalc = True
socialsub.save()
MFollowRequest.remove(self.user_id, user_id)
if not force:
from apps.social.tasks import EmailNewFollower
EmailNewFollower.apply_async(kwargs=dict(follower_user_id=self.user_id,
followee_user_id=user_id),
countdown=settings.SECONDS_TO_DELAY_CELERY_EMAILS)
return socialsub
def is_following_user(self, user_id):
# XXX TODO: Outsource to redis
return user_id in self.following_user_ids
def is_followed_by_user(self, user_id):
# XXX TODO: Outsource to redis
return user_id in self.follower_user_ids
def unfollow_user(self, user_id):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
if not isinstance(user_id, int):
user_id = int(user_id)
if user_id == self.user_id:
# Only unfollow other people, not yourself.
return
if user_id in self.following_user_ids:
self.following_user_ids.remove(user_id)
if user_id not in self.unfollowed_user_ids:
self.unfollowed_user_ids.append(user_id)
self.count_follows()
self.save()
followee = MSocialProfile.get_user(user_id)
if self.user_id in followee.follower_user_ids:
followee.follower_user_ids.remove(self.user_id)
followee.count_follows()
followee.save()
if self.user_id in followee.requested_follow_user_ids:
followee.requested_follow_user_ids.remove(self.user_id)
followee.count_follows()
followee.save()
MFollowRequest.remove(self.user_id, user_id)
following_key = "F:%s:F" % (self.user_id)
r.srem(following_key, user_id)
follower_key = "F:%s:f" % (user_id)
r.srem(follower_key, self.user_id)
try:
MSocialSubscription.objects.get(user_id=self.user_id, subscription_user_id=user_id).delete()
except MSocialSubscription.DoesNotExist:
return False
def common_follows(self, user_id, direction='followers'):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
my_followers = "F:%s:%s" % (self.user_id, 'F' if direction == 'followers' else 'F')
their_followers = "F:%s:%s" % (user_id, 'f' if direction == 'followers' else 'F')
follows_inter = r.sinter(their_followers, my_followers)
follows_diff = r.sdiff(their_followers, my_followers)
follows_inter = [int(f) for f in follows_inter]
follows_diff = [int(f) for f in follows_diff]
if user_id in follows_inter:
follows_inter.remove(user_id)
if user_id in follows_diff:
follows_diff.remove(user_id)
return follows_inter, follows_diff
def send_email_for_new_follower(self, follower_user_id):
user = User.objects.get(pk=self.user_id)
if follower_user_id not in self.follower_user_ids:
logging.user(user, "~FMNo longer being followed by %s" % follower_user_id)
return
if not user.email:
logging.user(user, "~FMNo email to send to, skipping.")
return
elif not user.profile.send_emails:
logging.user(user, "~FMDisabled emails, skipping.")
return
if self.user_id == follower_user_id:
return
emails_sent = MSentEmail.objects.filter(receiver_user_id=user.pk,
sending_user_id=follower_user_id,
email_type='new_follower')
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
for email in emails_sent:
if email.date_sent > day_ago:
logging.user(user, "~SK~FMNot sending new follower email, already sent before. NBD.")
return
follower_profile = MSocialProfile.get_user(follower_user_id)
common_followers, _ = self.common_follows(follower_user_id, direction='followers')
common_followings, _ = self.common_follows(follower_user_id, direction='following')
if self.user_id in common_followers:
common_followers.remove(self.user_id)
if self.user_id in common_followings:
common_followings.remove(self.user_id)
common_followers = MSocialProfile.profiles(common_followers)
common_followings = MSocialProfile.profiles(common_followings)
data = {
'user': user,
'follower_profile': follower_profile,
'common_followers': common_followers,
'common_followings': common_followings,
}
text = render_to_string('mail/email_new_follower.txt', data)
html = render_to_string('mail/email_new_follower.xhtml', data)
subject = "%s is now following your Blurblog on NewsBlur!" % follower_profile.username
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user.username, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
MSentEmail.record(receiver_user_id=user.pk, sending_user_id=follower_user_id,
email_type='new_follower')
logging.user(user, "~BB~FM~SBSending email for new follower: %s" % follower_profile.username)
def send_email_for_follow_request(self, follower_user_id):
user = User.objects.get(pk=self.user_id)
if follower_user_id not in self.requested_follow_user_ids:
logging.user(user, "~FMNo longer being followed by %s" % follower_user_id)
return
if not user.email:
logging.user(user, "~FMNo email to send to, skipping.")
return
elif not user.profile.send_emails:
logging.user(user, "~FMDisabled emails, skipping.")
return
if self.user_id == follower_user_id:
return
emails_sent = MSentEmail.objects.filter(receiver_user_id=user.pk,
sending_user_id=follower_user_id,
email_type='follow_request')
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
for email in emails_sent:
if email.date_sent > day_ago:
logging.user(user, "~SK~FMNot sending follow request email, already sent before. NBD.")
return
follower_profile = MSocialProfile.get_user(follower_user_id)
common_followers, _ = self.common_follows(follower_user_id, direction='followers')
common_followings, _ = self.common_follows(follower_user_id, direction='following')
if self.user_id in common_followers:
common_followers.remove(self.user_id)
if self.user_id in common_followings:
common_followings.remove(self.user_id)
common_followers = MSocialProfile.profiles(common_followers)
common_followings = MSocialProfile.profiles(common_followings)
data = {
'user': user,
'follower_profile': follower_profile,
'common_followers': common_followers,
'common_followings': common_followings,
}
text = render_to_string('mail/email_follow_request.txt', data)
html = render_to_string('mail/email_follow_request.xhtml', data)
subject = "%s has requested to follow your Blurblog on NewsBlur" % follower_profile.username
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user.username, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
MSentEmail.record(receiver_user_id=user.pk, sending_user_id=follower_user_id,
email_type='follow_request')
logging.user(user, "~BB~FM~SBSending email for follow request: %s" % follower_profile.username)
def save_feed_story_history_statistics(self):
"""
Fills in missing months between earlier occurances and now.
Save format: [('YYYY-MM, #), ...]
Example output: [(2010-12, 123), (2011-01, 146)]
"""
now = datetime.datetime.utcnow()
min_year = now.year
total = 0
month_count = 0
# Count stories, aggregate by year and month. Map Reduce!
map_f = """
function() {
var date = (this.shared_date.getFullYear()) + "-" + (this.shared_date.getMonth()+1);
emit(date, 1);
}
"""
reduce_f = """
function(key, values) {
var total = 0;
for (var i=0; i < values.length; i++) {
total += values[i];
}
return total;
}
"""
dates = {}
res = MSharedStory.objects(user_id=self.user_id).map_reduce(map_f, reduce_f, output='inline')
for r in res:
dates[r.key] = r.value
year = int(re.findall(r"(\d{4})-\d{1,2}", r.key)[0])
if year < min_year:
min_year = year
# Assemble a list with 0's filled in for missing months,
# trimming left and right 0's.
months = []
start = False
for year in range(min_year, now.year+1):
for month in range(1, 12+1):
if datetime.datetime(year, month, 1) < now:
key = u'%s-%s' % (year, month)
if dates.get(key) or start:
start = True
months.append((key, dates.get(key, 0)))
total += dates.get(key, 0)
month_count += 1
self.story_count_history = months
self.average_stories_per_month = total / max(1, month_count)
self.save()
def save_classifier_counts(self):
def calculate_scores(cls, facet):
map_f = """
function() {
emit(this["%s"], {
pos: this.score>0 ? this.score : 0,
neg: this.score<0 ? Math.abs(this.score) : 0
});
}
""" % (facet)
reduce_f = """
function(key, values) {
var result = {pos: 0, neg: 0};
values.forEach(function(value) {
result.pos += value.pos;
result.neg += value.neg;
});
return result;
}
"""
scores = []
res = cls.objects(social_user_id=self.user_id).map_reduce(map_f, reduce_f, output='inline')
for r in res:
facet_values = dict([(k, int(v)) for k,v in r.value.iteritems()])
facet_values[facet] = r.key
scores.append(facet_values)
scores = sorted(scores, key=lambda v: v['neg'] - v['pos'])
return scores
scores = {}
for cls, facet in [(MClassifierTitle, 'title'),
(MClassifierAuthor, 'author'),
(MClassifierTag, 'tag'),
(MClassifierFeed, 'feed_id')]:
scores[facet] = calculate_scores(cls, facet)
if facet == 'feed_id' and scores[facet]:
scores['feed'] = scores[facet]
del scores['feed_id']
elif not scores[facet]:
del scores[facet]
if scores:
self.feed_classifier_counts = scores
self.save()
class MSocialSubscription(mongo.Document):
UNREAD_CUTOFF = datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
user_id = mongo.IntField()
subscription_user_id = mongo.IntField(unique_with='user_id')
follow_date = mongo.DateTimeField(default=datetime.datetime.utcnow())
last_read_date = mongo.DateTimeField(default=UNREAD_CUTOFF)
mark_read_date = mongo.DateTimeField(default=UNREAD_CUTOFF)
unread_count_neutral = mongo.IntField(default=0)
unread_count_positive = mongo.IntField(default=0)
unread_count_negative = mongo.IntField(default=0)
unread_count_updated = mongo.DateTimeField()
oldest_unread_story_date = mongo.DateTimeField()
needs_unread_recalc = mongo.BooleanField(default=False)
feed_opens = mongo.IntField(default=0)
is_trained = mongo.BooleanField(default=False)
meta = {
'collection': 'social_subscription',
'indexes': [('user_id', 'subscription_user_id')],
'allow_inheritance': False,
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
subscription_user = User.objects.get(pk=self.subscription_user_id)
return "Socialsub %s:%s" % (user, subscription_user)
@classmethod
def feeds(cls, user_id=None, subscription_user_id=None, calculate_all_scores=False,
update_counts=False, *args, **kwargs):
params = {
'user_id': user_id,
}
if subscription_user_id:
params["subscription_user_id"] = subscription_user_id
social_subs = cls.objects.filter(**params)
social_feeds = []
if social_subs:
if calculate_all_scores:
for s in social_subs: s.calculate_feed_scores()
# Fetch user profiles of subscriptions
social_user_ids = [sub.subscription_user_id for sub in social_subs]
social_profiles = MSocialProfile.profile_feeds(social_user_ids)
for social_sub in social_subs:
user_id = social_sub.subscription_user_id
if social_profiles[user_id]['shared_stories_count'] <= 0:
continue
if update_counts and social_sub.needs_unread_recalc:
social_sub.calculate_feed_scores()
# Combine subscription read counts with feed/user info
feed = dict(social_sub.canonical().items() + social_profiles[user_id].items())
social_feeds.append(feed)
return social_feeds
@classmethod
def feeds_with_updated_counts(cls, user, social_feed_ids=None):
feeds = {}
# Get social subscriptions for user
user_subs = cls.objects.filter(user_id=user.pk)
if social_feed_ids:
social_user_ids = [int(f.replace('social:', '')) for f in social_feed_ids]
user_subs = user_subs.filter(subscription_user_id__in=social_user_ids)
profiles = MSocialProfile.objects.filter(user_id__in=social_user_ids)
profiles = dict((p.user_id, p) for p in profiles)
for i, sub in enumerate(user_subs):
# Count unreads if subscription is stale.
if (sub.needs_unread_recalc or
(sub.unread_count_updated and
sub.unread_count_updated < user.profile.unread_cutoff) or
(sub.oldest_unread_story_date and
sub.oldest_unread_story_date < user.profile.unread_cutoff)):
sub = sub.calculate_feed_scores(force=True, silent=True)
feed_id = "social:%s" % sub.subscription_user_id
feeds[feed_id] = {
'ps': sub.unread_count_positive,
'nt': sub.unread_count_neutral,
'ng': sub.unread_count_negative,
'id': feed_id,
}
if social_feed_ids and sub.subscription_user_id in profiles:
feeds[feed_id]['shared_stories_count'] = profiles[sub.subscription_user_id].shared_stories_count
return feeds
def canonical(self):
return {
'user_id': self.user_id,
'subscription_user_id': self.subscription_user_id,
'nt': self.unread_count_neutral,
'ps': self.unread_count_positive,
'ng': self.unread_count_negative,
'is_trained': self.is_trained,
'feed_opens': self.feed_opens,
}
@classmethod
def subs_for_users(cls, user_id, subscription_user_ids=None, read_filter="unread"):
socialsubs = cls.objects
if read_filter == "unread":
socialsubs = socialsubs.filter(Q(unread_count_neutral__gt=0) |
Q(unread_count_positive__gt=0))
if not subscription_user_ids:
socialsubs = socialsubs.filter(user_id=user_id)\
.only('subscription_user_id', 'mark_read_date', 'is_trained')
else:
socialsubs = socialsubs.filter(user_id=user_id,
subscription_user_id__in=subscription_user_ids)\
.only('subscription_user_id', 'mark_read_date', 'is_trained')
return socialsubs
@classmethod
def story_hashes(cls, user_id, relative_user_id, subscription_user_ids=None, socialsubs=None,
read_filter="unread", order="newest",
include_timestamps=False, group_by_user=True, cutoff_date=None):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
pipeline = r.pipeline()
story_hashes = {} if group_by_user else []
if not socialsubs:
socialsubs = cls.subs_for_users(relative_user_id,
subscription_user_ids=subscription_user_ids,
read_filter=read_filter)
subscription_user_ids = [sub.subscription_user_id for sub in socialsubs]
if not subscription_user_ids:
return story_hashes
read_dates = dict((us.subscription_user_id,
int(us.mark_read_date.strftime('%s'))) for us in socialsubs)
current_time = int(time.time() + 60*60*24)
if not cutoff_date:
cutoff_date = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
unread_timestamp = int(time.mktime(cutoff_date.timetuple()))-1000
feed_counter = 0
for sub_user_id_group in chunks(subscription_user_ids, 20):
pipeline = r.pipeline()
for sub_user_id in sub_user_id_group:
stories_key = 'B:%s' % (sub_user_id)
sorted_stories_key = 'zB:%s' % (sub_user_id)
read_stories_key = 'RS:%s' % (user_id)
read_social_stories_key = 'RS:%s:B:%s' % (user_id, sub_user_id)
unread_stories_key = 'UB:%s:%s' % (user_id, sub_user_id)
sorted_stories_key = 'zB:%s' % (sub_user_id)
unread_ranked_stories_key = 'zUB:%s:%s' % (user_id, sub_user_id)
expire_unread_stories_key = False
max_score = current_time
if read_filter == 'unread':
# +1 for the intersection b/w zF and F, which carries an implicit score of 1.
min_score = read_dates[sub_user_id] + 1
pipeline.sdiffstore(unread_stories_key, stories_key, read_stories_key)
pipeline.sdiffstore(unread_stories_key, unread_stories_key, read_social_stories_key)
expire_unread_stories_key = True
else:
min_score = unread_timestamp
unread_stories_key = stories_key
if order == 'oldest':
byscorefunc = pipeline.zrangebyscore
else:
byscorefunc = pipeline.zrevrangebyscore
min_score, max_score = max_score, min_score
pipeline.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
byscorefunc(unread_ranked_stories_key, min_score, max_score, withscores=include_timestamps)
pipeline.delete(unread_ranked_stories_key)
if expire_unread_stories_key:
pipeline.delete(unread_stories_key)
results = pipeline.execute()
for hashes in results:
if not isinstance(hashes, list): continue
if group_by_user:
story_hashes[subscription_user_ids[feed_counter]] = hashes
feed_counter += 1
else:
story_hashes.extend(hashes)
return story_hashes
def get_stories(self, offset=0, limit=6, order='newest', read_filter='all',
withscores=False, hashes_only=False, cutoff_date=None,
mark_read_complement=False):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
ignore_user_stories = False
stories_key = 'B:%s' % (self.subscription_user_id)
read_stories_key = 'RS:%s' % (self.user_id)
read_social_stories_key = 'RS:%s:B:%s' % (self.user_id, self.subscription_user_id)
unread_stories_key = 'UB:%s:%s' % (self.user_id, self.subscription_user_id)
if not r.exists(stories_key):
return []
elif read_filter != 'unread' or not r.exists(read_stories_key):
ignore_user_stories = True
unread_stories_key = stories_key
else:
r.sdiffstore(unread_stories_key, stories_key, read_stories_key)
r.sdiffstore(unread_stories_key, unread_stories_key, read_social_stories_key)
sorted_stories_key = 'zB:%s' % (self.subscription_user_id)
unread_ranked_stories_key = 'z%sUB:%s:%s' % ('h' if hashes_only else '',
self.user_id, self.subscription_user_id)
r.zinterstore(unread_ranked_stories_key, [sorted_stories_key, unread_stories_key])
now = datetime.datetime.now()
current_time = int(time.time() + 60*60*24)
mark_read_time = int(time.mktime(self.mark_read_date.timetuple())) + 1
if cutoff_date:
mark_read_time = int(time.mktime(cutoff_date.timetuple())) + 1
if order == 'oldest':
byscorefunc = r.zrangebyscore
min_score = mark_read_time
max_score = current_time
else: # newest
byscorefunc = r.zrevrangebyscore
min_score = current_time
if mark_read_complement:
min_score = mark_read_time
now = datetime.datetime.now()
unread_cutoff = cutoff_date
if not unread_cutoff:
unread_cutoff = now - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
max_score = int(time.mktime(unread_cutoff.timetuple()))-1
story_ids = byscorefunc(unread_ranked_stories_key, min_score,
max_score, start=offset, num=limit,
withscores=withscores)
if withscores:
story_ids = [(s[0], int(s[1])) for s in story_ids]
r.expire(unread_ranked_stories_key, 1*60*60)
if not ignore_user_stories:
r.delete(unread_stories_key)
return story_ids
@classmethod
def feed_stories(cls, user_id, social_user_ids, offset=0, limit=6,
order='newest', read_filter='all', relative_user_id=None, cache=True,
socialsubs=None, cutoff_date=None):
rt = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_TEMP_POOL)
if not relative_user_id:
relative_user_id = user_id
if order == 'oldest':
range_func = rt.zrange
else:
range_func = rt.zrevrange
if not isinstance(social_user_ids, list):
social_user_ids = [social_user_ids]
ranked_stories_keys = 'zU:%s:social' % (user_id)
unread_ranked_stories_keys = 'zhU:%s:social' % (user_id)
if (offset and cache and
rt.exists(ranked_stories_keys) and
rt.exists(unread_ranked_stories_keys)):
story_hashes_and_dates = range_func(ranked_stories_keys, offset, limit, withscores=True)
if not story_hashes_and_dates:
return [], [], []
story_hashes, story_dates = zip(*story_hashes_and_dates)
if read_filter == "unread":
unread_story_hashes = story_hashes
else:
unread_story_hashes = range_func(unread_ranked_stories_keys, 0, offset+limit)
return story_hashes, story_dates, unread_story_hashes
else:
rt.delete(ranked_stories_keys)
rt.delete(unread_ranked_stories_keys)
story_hashes = cls.story_hashes(user_id, relative_user_id,
subscription_user_ids=social_user_ids,
read_filter=read_filter, order=order,
include_timestamps=True,
group_by_user=False,
socialsubs=socialsubs,
cutoff_date=cutoff_date)
if not story_hashes:
return [], [], []
pipeline = rt.pipeline()
for story_hash_group in chunks(story_hashes, 100):
pipeline.zadd(ranked_stories_keys, **dict(story_hash_group))
pipeline.execute()
story_hashes_and_dates = range_func(ranked_stories_keys, offset, limit, withscores=True)
if not story_hashes_and_dates:
return [], [], []
story_hashes, story_dates = zip(*story_hashes_and_dates)
if read_filter == "unread":
unread_feed_story_hashes = story_hashes
rt.zunionstore(unread_ranked_stories_keys, [ranked_stories_keys])
else:
unread_story_hashes = cls.story_hashes(user_id, relative_user_id,
subscription_user_ids=social_user_ids,
read_filter="unread", order=order,
include_timestamps=True,
group_by_user=False,
socialsubs=socialsubs,
cutoff_date=cutoff_date)
if unread_story_hashes:
pipeline = rt.pipeline()
for unread_story_hash_group in chunks(unread_story_hashes, 100):
pipeline.zadd(unread_ranked_stories_keys, **dict(unread_story_hash_group))
pipeline.execute()
unread_feed_story_hashes = range_func(unread_ranked_stories_keys, offset, limit)
rt.expire(ranked_stories_keys, 60*60)
rt.expire(unread_ranked_stories_keys, 60*60)
return story_hashes, story_dates, unread_feed_story_hashes
def mark_story_ids_as_read(self, story_hashes, feed_id=None, mark_all_read=False, request=None):
data = dict(code=0, payload=story_hashes)
r = redis.Redis(connection_pool=settings.REDIS_POOL)
if not request:
request = User.objects.get(pk=self.user_id)
if not self.needs_unread_recalc and not mark_all_read:
self.needs_unread_recalc = True
self.save()
sub_username = MSocialProfile.get_user(self.subscription_user_id).username
if len(story_hashes) > 1:
logging.user(request, "~FYRead %s stories in social subscription: %s" % (len(story_hashes), sub_username))
else:
logging.user(request, "~FYRead story in social subscription: %s" % (sub_username))
for story_hash in set(story_hashes):
if feed_id is not None:
story_hash = MStory.ensure_story_hash(story_hash, story_feed_id=feed_id)
if feed_id is None:
feed_id, _ = MStory.split_story_hash(story_hash)
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (self.user_id)
share_key = "S:%s" % (story_hash)
friends_with_shares = [int(f) for f in r.sinter(share_key, friend_key)]
RUserStory.mark_read(self.user_id, feed_id, story_hash, social_user_ids=friends_with_shares,
aggregated=mark_all_read)
if self.user_id in friends_with_shares:
friends_with_shares.remove(self.user_id)
if friends_with_shares:
socialsubs = MSocialSubscription.objects.filter(
user_id=self.user_id,
subscription_user_id__in=friends_with_shares)
for socialsub in socialsubs:
if not socialsub.needs_unread_recalc and not mark_all_read:
socialsub.needs_unread_recalc = True
socialsub.save()
# Also count on original subscription
usersubs = UserSubscription.objects.filter(user=self.user_id, feed=feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save()
return data
@classmethod
def mark_unsub_story_ids_as_read(cls, user_id, social_user_id, story_ids, feed_id=None,
request=None):
data = dict(code=0, payload=story_ids)
r = redis.Redis(connection_pool=settings.REDIS_POOL)
if not request:
request = User.objects.get(pk=user_id)
if len(story_ids) > 1:
logging.user(request, "~FYRead %s social stories from global" % (len(story_ids)))
else:
logging.user(request, "~FYRead social story from global")
for story_id in set(story_ids):
try:
story = MSharedStory.objects.get(user_id=social_user_id,
story_guid=story_id)
except MSharedStory.DoesNotExist:
continue
# Find other social feeds with this story to update their counts
friend_key = "F:%s:F" % (user_id)
share_key = "S:%s" % (story.story_hash)
friends_with_shares = [int(f) for f in r.sinter(share_key, friend_key)]
RUserStory.mark_read(user_id, story.story_feed_id, story.story_hash,
social_user_ids=friends_with_shares)
# Also count on original subscription
usersubs = UserSubscription.objects.filter(user=user_id, feed=story.story_feed_id)
if usersubs:
usersub = usersubs[0]
if not usersub.needs_unread_recalc:
usersub.needs_unread_recalc = True
usersub.save()
# XXX TODO: Real-time notification, just for this user
return data
def mark_feed_read(self, cutoff_date=None):
user_profile = Profile.objects.get(user_id=self.user_id)
recount = True
if cutoff_date:
cutoff_date = cutoff_date + datetime.timedelta(seconds=1)
else:
# Use the latest story to get last read time.
latest_shared_story = MSharedStory.objects(user_id=self.subscription_user_id,
shared_date__gte=user_profile.unread_cutoff
).order_by('-shared_date').only('shared_date').first()
if latest_shared_story:
cutoff_date = latest_shared_story['shared_date'] + datetime.timedelta(seconds=1)
else:
cutoff_date = datetime.datetime.utcnow()
recount = False
self.last_read_date = cutoff_date
self.mark_read_date = cutoff_date
self.oldest_unread_story_date = cutoff_date
if not recount:
self.unread_count_negative = 0
self.unread_count_positive = 0
self.unread_count_neutral = 0
self.unread_count_updated = datetime.datetime.utcnow()
self.needs_unread_recalc = False
else:
self.needs_unread_recalc = True
# Manually mark all shared stories as read.
unread_story_hashes = self.get_stories(read_filter='unread', limit=500, hashes_only=True,
mark_read_complement=True)
self.mark_story_ids_as_read(unread_story_hashes, mark_all_read=True)
self.save()
def calculate_feed_scores(self, force=False, silent=False):
if not self.needs_unread_recalc and not force:
return self
now = datetime.datetime.now()
user_profile = Profile.objects.get(user_id=self.user_id)
if user_profile.last_seen_on < user_profile.unread_cutoff:
# if not silent:
# logging.info(' ---> [%s] SKIPPING Computing scores: %s (1 week+)' % (self.user, self.feed))
return self
feed_scores = dict(negative=0, neutral=0, positive=0)
# Two weeks in age. If mark_read_date is older, mark old stories as read.
date_delta = user_profile.unread_cutoff
if date_delta < self.mark_read_date:
date_delta = self.mark_read_date
else:
self.mark_read_date = date_delta
unread_story_hashes = self.get_stories(read_filter='unread', limit=500, hashes_only=True,
cutoff_date=user_profile.unread_cutoff)
stories_db = MSharedStory.objects(user_id=self.subscription_user_id,
story_hash__in=unread_story_hashes)
story_feed_ids = set()
for s in stories_db:
story_feed_ids.add(s['story_feed_id'])
story_feed_ids = list(story_feed_ids)
usersubs = UserSubscription.objects.filter(user__pk=self.user_id, feed__pk__in=story_feed_ids)
usersubs_map = dict((sub.feed_id, sub) for sub in usersubs)
oldest_unread_story_date = now
unread_stories_db = []
for story in stories_db:
if story['story_hash'] not in unread_story_hashes:
continue
feed_id = story.story_feed_id
if usersubs_map.get(feed_id) and story.shared_date < usersubs_map[feed_id].mark_read_date:
continue
unread_stories_db.append(story)
if story.shared_date < oldest_unread_story_date:
oldest_unread_story_date = story.shared_date
stories = Feed.format_stories(unread_stories_db)
classifier_feeds = list(MClassifierFeed.objects(user_id=self.user_id, social_user_id=self.subscription_user_id))
classifier_authors = list(MClassifierAuthor.objects(user_id=self.user_id, social_user_id=self.subscription_user_id))
classifier_titles = list(MClassifierTitle.objects(user_id=self.user_id, social_user_id=self.subscription_user_id))
classifier_tags = list(MClassifierTag.objects(user_id=self.user_id, social_user_id=self.subscription_user_id))
# Merge with feed specific classifiers
if story_feed_ids:
classifier_feeds = classifier_feeds + list(MClassifierFeed.objects(user_id=self.user_id,
feed_id__in=story_feed_ids))
classifier_authors = classifier_authors + list(MClassifierAuthor.objects(user_id=self.user_id,
feed_id__in=story_feed_ids))
classifier_titles = classifier_titles + list(MClassifierTitle.objects(user_id=self.user_id,
feed_id__in=story_feed_ids))
classifier_tags = classifier_tags + list(MClassifierTag.objects(user_id=self.user_id,
feed_id__in=story_feed_ids))
for story in stories:
scores = {
'feed' : apply_classifier_feeds(classifier_feeds, story['story_feed_id'],
social_user_ids=self.subscription_user_id),
'author' : apply_classifier_authors(classifier_authors, story),
'tags' : apply_classifier_tags(classifier_tags, story),
'title' : apply_classifier_titles(classifier_titles, story),
}
max_score = max(scores['author'], scores['tags'], scores['title'])
min_score = min(scores['author'], scores['tags'], scores['title'])
if max_score > 0:
feed_scores['positive'] += 1
elif min_score < 0:
feed_scores['negative'] += 1
else:
if scores['feed'] > 0:
feed_scores['positive'] += 1
elif scores['feed'] < 0:
feed_scores['negative'] += 1
else:
feed_scores['neutral'] += 1
self.unread_count_positive = feed_scores['positive']
self.unread_count_neutral = feed_scores['neutral']
self.unread_count_negative = feed_scores['negative']
self.unread_count_updated = datetime.datetime.now()
self.oldest_unread_story_date = oldest_unread_story_date
self.needs_unread_recalc = False
self.save()
if (self.unread_count_positive == 0 and
self.unread_count_neutral == 0):
self.mark_feed_read()
if not silent:
logging.info(' ---> [%s] Computing social scores: %s (%s/%s/%s)' % (user_profile, self.subscription_user_id, feed_scores['negative'], feed_scores['neutral'], feed_scores['positive']))
return self
@classmethod
def mark_dirty_sharing_story(cls, user_id, story_feed_id, story_guid_hash):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
friends_key = "F:%s:F" % (user_id)
share_key = "S:%s:%s" % (story_feed_id, story_guid_hash)
following_user_ids = r.sinter(friends_key, share_key)
following_user_ids = [int(f) for f in following_user_ids]
if not following_user_ids:
return None
social_subs = cls.objects.filter(user_id=user_id, subscription_user_id__in=following_user_ids)
for social_sub in social_subs:
social_sub.needs_unread_recalc = True
social_sub.save()
return social_subs
class MCommentReply(mongo.EmbeddedDocument):
reply_id = mongo.ObjectIdField()
user_id = mongo.IntField()
publish_date = mongo.DateTimeField()
comments = mongo.StringField()
email_sent = mongo.BooleanField(default=False)
liking_users = mongo.ListField(mongo.IntField())
def canonical(self):
reply = {
'reply_id': self.reply_id,
'user_id': self.user_id,
'publish_date': relative_timesince(self.publish_date),
'date': self.publish_date,
'comments': self.comments,
}
return reply
meta = {
'ordering': ['publish_date'],
'id_field': 'reply_id',
'allow_inheritance': False,
}
class MSharedStory(mongo.Document):
user_id = mongo.IntField()
shared_date = mongo.DateTimeField()
comments = mongo.StringField()
has_comments = mongo.BooleanField(default=False)
has_replies = mongo.BooleanField(default=False)
replies = mongo.ListField(mongo.EmbeddedDocumentField(MCommentReply))
source_user_id = mongo.IntField()
story_hash = mongo.StringField()
story_feed_id = mongo.IntField()
story_date = mongo.DateTimeField()
story_title = mongo.StringField(max_length=1024)
story_content = mongo.StringField()
story_content_z = mongo.BinaryField()
story_original_content = mongo.StringField()
story_original_content_z = mongo.BinaryField()
original_text_z = mongo.BinaryField()
story_content_type = mongo.StringField(max_length=255)
story_author_name = mongo.StringField()
story_permalink = mongo.StringField()
story_guid = mongo.StringField(unique_with=('user_id',))
story_guid_hash = mongo.StringField(max_length=6)
image_urls = mongo.ListField(mongo.StringField(max_length=1024))
story_tags = mongo.ListField(mongo.StringField(max_length=250))
posted_to_services = mongo.ListField(mongo.StringField(max_length=20))
mute_email_users = mongo.ListField(mongo.IntField())
liking_users = mongo.ListField(mongo.IntField())
emailed_reshare = mongo.BooleanField(default=False)
emailed_replies = mongo.ListField(mongo.ObjectIdField())
image_count = mongo.IntField()
image_sizes = mongo.ListField(mongo.DictField())
meta = {
'collection': 'shared_stories',
'indexes': [('user_id', '-shared_date'), ('user_id', 'story_feed_id'),
'shared_date', 'story_guid', 'story_feed_id', 'story_hash'],
'index_drop_dups': True,
'ordering': ['-shared_date'],
'allow_inheritance': False,
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
return "%s: %s (%s)%s%s" % (user.username,
self.decoded_story_title[:20],
self.story_feed_id,
': ' if self.has_comments else '',
self.comments[:20])
@property
def guid_hash(self):
return hashlib.sha1(self.story_guid).hexdigest()[:6]
@property
def feed_guid_hash(self):
return "%s:%s" % (self.story_feed_id or "0", self.guid_hash)
@property
def decoded_story_title(self):
h = HTMLParser.HTMLParser()
return h.unescape(self.story_title)
def canonical(self):
return {
"user_id": self.user_id,
"shared_date": self.shared_date,
"story_title": self.story_title,
"story_content": self.story_content_z and zlib.decompress(self.story_content_z),
"comments": self.comments,
}
def save(self, *args, **kwargs):
scrubber = SelectiveScriptScrubber()
if self.story_content:
self.story_content = scrubber.scrub(self.story_content)
self.story_content_z = zlib.compress(self.story_content)
self.story_content = None
if self.story_original_content:
self.story_original_content_z = zlib.compress(self.story_original_content)
self.story_original_content = None
self.story_guid_hash = hashlib.sha1(self.story_guid).hexdigest()[:6]
self.story_title = strip_tags(self.story_title)
self.story_hash = self.feed_guid_hash
self.comments = linkify(strip_tags(self.comments))
for reply in self.replies:
reply.comments = linkify(strip_tags(reply.comments))
self.shared_date = self.shared_date or datetime.datetime.utcnow()
self.has_replies = bool(len(self.replies))
super(MSharedStory, self).save(*args, **kwargs)
author = MSocialProfile.get_user(self.user_id)
author.count_follows()
self.sync_redis()
MActivity.new_shared_story(user_id=self.user_id, source_user_id=self.source_user_id,
story_title=self.story_title,
comments=self.comments, story_feed_id=self.story_feed_id,
story_id=self.story_guid, share_date=self.shared_date)
return self
def delete(self, *args, **kwargs):
MActivity.remove_shared_story(user_id=self.user_id, story_feed_id=self.story_feed_id,
story_id=self.story_guid)
self.remove_from_redis()
super(MSharedStory, self).delete(*args, **kwargs)
def unshare_story(self):
socialsubs = MSocialSubscription.objects.filter(subscription_user_id=self.user_id,
needs_unread_recalc=False)
for socialsub in socialsubs:
socialsub.needs_unread_recalc = True
socialsub.save()
self.delete()
@classmethod
def feed_quota(cls, user_id, feed_id, days=1, quota=1):
day_ago = datetime.datetime.now()-datetime.timedelta(days=days)
shared_count = cls.objects.filter(shared_date__gte=day_ago, story_feed_id=feed_id).count()
return shared_count >= quota
@classmethod
def count_potential_spammers(cls, days=1):
day_ago = datetime.datetime.now()-datetime.timedelta(days=days)
stories = cls.objects.filter(shared_date__gte=day_ago)
shared = [{'u': s.user_id, 'f': s.story_feed_id} for s in stories]
ddusers = defaultdict(lambda: defaultdict(int))
for story in shared:
ddusers[story['u']][story['f']] += 1
users = {}
for user_id, feeds in ddusers.items():
users[user_id] = dict(feeds)
pprint(users)
return users
@classmethod
def get_shared_stories_from_site(cls, feed_id, user_id, story_url, limit=3):
your_story = cls.objects.filter(story_feed_id=feed_id,
story_permalink=story_url,
user_id=user_id).limit(1).first()
same_stories = cls.objects.filter(story_feed_id=feed_id,
story_permalink=story_url,
user_id__ne=user_id
).order_by('-shared_date')
same_stories = [{
"user_id": story.user_id,
"comments": story.comments,
"relative_date": relative_timesince(story.shared_date),
"blurblog_permalink": story.blurblog_permalink(),
} for story in same_stories]
other_stories = []
if feed_id:
other_stories = cls.objects.filter(story_feed_id=feed_id,
story_permalink__ne=story_url
).order_by('-shared_date').limit(limit)
other_stories = [{
"user_id": story.user_id,
"story_title": story.story_title,
"story_permalink": story.story_permalink,
"comments": story.comments,
"relative_date": relative_timesince(story.shared_date),
"blurblog_permalink": story.blurblog_permalink(),
} for story in other_stories]
return your_story, same_stories, other_stories
def set_source_user_id(self, source_user_id):
if source_user_id == self.user_id:
return
def find_source(source_user_id, seen_user_ids):
parent_shared_story = MSharedStory.objects.filter(user_id=source_user_id,
story_guid=self.story_guid,
story_feed_id=self.story_feed_id).limit(1)
if parent_shared_story and parent_shared_story[0].source_user_id:
user_id = parent_shared_story[0].source_user_id
if user_id in seen_user_ids:
return source_user_id
else:
seen_user_ids.append(user_id)
return find_source(user_id, seen_user_ids)
else:
return source_user_id
if source_user_id:
source_user_id = find_source(source_user_id, [])
if source_user_id == self.user_id:
return
elif not self.source_user_id or source_user_id != self.source_user_id:
self.source_user_id = source_user_id
logging.debug(" ---> Re-share from %s." % source_user_id)
self.save()
MInteraction.new_reshared_story(user_id=self.source_user_id,
reshare_user_id=self.user_id,
comments=self.comments,
story_title=self.story_title,
story_feed_id=self.story_feed_id,
story_id=self.story_guid)
def mute_for_user(self, user_id):
if user_id not in self.mute_email_users:
self.mute_email_users.append(user_id)
self.save()
@classmethod
def switch_feed(cls, original_feed_id, duplicate_feed_id):
shared_stories = cls.objects.filter(story_feed_id=duplicate_feed_id)
logging.info(" ---> %s shared stories" % shared_stories.count())
for story in shared_stories:
story.story_feed_id = original_feed_id
story.save()
@classmethod
def collect_popular_stories(cls, cutoff=None, days=None, shared_feed_ids=None):
if not days:
days = 3
if not cutoff:
cutoff = 6
if not shared_feed_ids:
shared_feed_ids = []
# shared_stories_count = sum(json.decode(MStatistics.get('stories_shared')))
# cutoff = cutoff or max(math.floor(.025 * shared_stories_count), 3)
today = datetime.datetime.now() - datetime.timedelta(days=days)
map_f = """
function() {
emit(this.story_hash, {
'story_hash': this.story_hash,
'feed_id': this.story_feed_id,
'title': this.story_title,
'count': 1
});
}
"""
reduce_f = """
function(key, values) {
var r = {'story_hash': key, 'count': 0};
for (var i=0; i < values.length; i++) {
r.feed_id = values[i].feed_id;
r.title = values[i].title;
r.count += values[i].count;
}
return r;
}
"""
finalize_f = """
function(key, value) {
if (value.count >= %(cutoff)s && [%(shared_feed_ids)s].indexOf(value.feed_id) == -1) {
var english_title = value.title.replace(/[^\\062-\\177]/g, "");
if (english_title.length < 5) return;
return value;
}
}
""" % {'cutoff': cutoff, 'shared_feed_ids': ', '.join(shared_feed_ids)}
res = cls.objects(shared_date__gte=today).map_reduce(map_f, reduce_f,
finalize_f=finalize_f,
output='inline')
stories = dict([(r.key, r.value) for r in res if r.value])
return stories, cutoff
@classmethod
def share_popular_stories(cls, cutoff=None, days=None, interactive=True):
publish_new_stories = False
popular_profile = MSocialProfile.objects.get(username='popular')
popular_user = User.objects.get(pk=popular_profile.user_id)
week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
shared_feed_ids = [str(s.story_feed_id)
for s in MSharedStory.objects(user_id=popular_profile.user_id,
shared_date__gte=week_ago).only('story_feed_id')]
shared_stories_today, cutoff = cls.collect_popular_stories(cutoff=cutoff, days=days,
shared_feed_ids=shared_feed_ids)
shared = 0
for story_hash, story_info in shared_stories_today.items():
story, _ = MStory.find_story(story_info['feed_id'], story_info['story_hash'])
if not story:
logging.user(popular_user, "~FRPopular stories, story not found: %s" % story_info)
continue
if story.story_feed_id in shared_feed_ids:
logging.user(popular_user, "~FRPopular stories, story feed just shared: %s" % story_info)
continue
if interactive:
feed = Feed.get_by_id(story.story_feed_id)
accept_story = raw_input("%s / %s [Y/n]: " % (story.decoded_story_title, feed.title))
if accept_story in ['n', 'N']: continue
story_db = dict([(k, v) for k, v in story._data.items()
if k is not None and v is not None])
story_db.pop('user_id', None)
story_db.pop('id', None)
story_db.pop('comments', None)
story_db.pop('replies', None)
story_db['has_comments'] = False
story_db['has_replies'] = False
story_db['shared_date'] = datetime.datetime.now()
story_values = {
'user_id': popular_profile.user_id,
'story_guid': story_db['story_guid'],
'defaults': story_db,
}
shared_story, created = MSharedStory.objects.get_or_create(**story_values)
if created:
shared_story.post_to_service('twitter')
shared += 1
shared_feed_ids.append(story.story_feed_id)
publish_new_stories = True
logging.user(popular_user, "~FCSharing: ~SB~FM%s (%s shares, %s min)" % (
story.decoded_story_title[:50],
story_info['count'],
cutoff))
if publish_new_stories:
socialsubs = MSocialSubscription.objects.filter(subscription_user_id=popular_user.pk)
for socialsub in socialsubs:
socialsub.needs_unread_recalc = True
socialsub.save()
shared_story.publish_update_to_subscribers()
return shared
@staticmethod
def check_shared_story_hashes(user_id, story_hashes, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_POOL)
pipeline = r.pipeline()
for story_hash in story_hashes:
feed_id, guid_hash = MStory.split_story_hash(story_hash)
share_key = "S:%s:%s" % (feed_id, guid_hash)
pipeline.sismember(share_key, user_id)
shared_hashes = pipeline.execute()
return [story_hash for s, story_hash in enumerate(story_hashes) if shared_hashes[s]]
@classmethod
def sync_all_redis(cls, drop=False):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
h = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# h2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
if drop:
for key_name in ["C", "S"]:
keys = r.keys("%s:*" % key_name)
print " ---> Removing %s keys named %s:*" % (len(keys), key_name)
for key in keys:
r.delete(key)
for story in cls.objects.all():
story.sync_redis_shares(r=r)
story.sync_redis_story(r=h)
def sync_redis(self):
self.sync_redis_shares()
self.sync_redis_story()
def sync_redis_shares(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_POOL)
share_key = "S:%s:%s" % (self.story_feed_id, self.guid_hash)
comment_key = "C:%s:%s" % (self.story_feed_id, self.guid_hash)
r.sadd(share_key, self.user_id)
if self.has_comments:
r.sadd(comment_key, self.user_id)
else:
r.srem(comment_key, self.user_id)
def sync_redis_story(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
r.sadd('B:%s' % self.user_id, self.feed_guid_hash)
# r2.sadd('B:%s' % self.user_id, self.feed_guid_hash)
r.zadd('zB:%s' % self.user_id, self.feed_guid_hash,
time.mktime(self.shared_date.timetuple()))
# r2.zadd('zB:%s' % self.user_id, self.feed_guid_hash,
# time.mktime(self.shared_date.timetuple()))
r.expire('B:%s' % self.user_id, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire('B:%s' % self.user_id, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire('zB:%s' % self.user_id, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire('zB:%s' % self.user_id, settings.DAYS_OF_STORY_HASHES*24*60*60)
def remove_from_redis(self):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
share_key = "S:%s:%s" % (self.story_feed_id, self.guid_hash)
r.srem(share_key, self.user_id)
comment_key = "C:%s:%s" % (self.story_feed_id, self.guid_hash)
r.srem(comment_key, self.user_id)
h = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# h2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
h.srem('B:%s' % self.user_id, self.feed_guid_hash)
# h2.srem('B:%s' % self.user_id, self.feed_guid_hash)
h.zrem('zB:%s' % self.user_id, self.feed_guid_hash)
# h2.zrem('zB:%s' % self.user_id, self.feed_guid_hash)
def publish_update_to_subscribers(self):
try:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
feed_id = "social:%s" % self.user_id
listeners_count = r.publish(feed_id, 'story:new')
if listeners_count:
logging.debug(" ---> ~FMPublished to %s subscribers" % (listeners_count))
except redis.ConnectionError:
logging.debug(" ***> ~BMRedis is unavailable for real-time.")
def comments_with_author(self):
comments = {
'id': self.id,
'user_id': self.user_id,
'comments': self.comments,
'shared_date': relative_timesince(self.shared_date),
'date': self.shared_date,
'replies': [reply.canonical() for reply in self.replies],
'liking_users': self.liking_users and list(self.liking_users),
'source_user_id': self.source_user_id,
}
return comments
def comment_with_author_and_profiles(self):
comment = self.comments_with_author()
profile_user_ids = set([comment['user_id']])
reply_user_ids = [reply['user_id'] for reply in comment['replies']]
profile_user_ids = profile_user_ids.union(reply_user_ids)
profile_user_ids = profile_user_ids.union(comment['liking_users'])
if comment['source_user_id']:
profile_user_ids.add(comment['source_user_id'])
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
return comment, profiles
@classmethod
def stories_with_comments_and_profiles(cls, stories, user_id, check_all=False):
r = redis.Redis(connection_pool=settings.REDIS_POOL)
friend_key = "F:%s:F" % (user_id)
profile_user_ids = set()
for story in stories:
story['friend_comments'] = []
story['public_comments'] = []
story['reply_count'] = 0
if check_all or story['comment_count']:
comment_key = "C:%s:%s" % (story['story_feed_id'], story['guid_hash'])
story['comment_count'] = r.scard(comment_key)
friends_with_comments = [int(f) for f in r.sinter(comment_key, friend_key)]
sharer_user_ids = [int(f) for f in r.smembers(comment_key)]
shared_stories = []
if sharer_user_ids:
params = {
'story_hash': story['story_hash'],
'user_id__in': sharer_user_ids,
}
shared_stories = cls.objects.filter(**params)
for shared_story in shared_stories:
comments = shared_story.comments_with_author()
story['reply_count'] += len(comments['replies'])
if shared_story.user_id in friends_with_comments:
story['friend_comments'].append(comments)
else:
story['public_comments'].append(comments)
if comments.get('source_user_id'):
profile_user_ids.add(comments['source_user_id'])
if comments.get('liking_users'):
profile_user_ids = profile_user_ids.union(comments['liking_users'])
all_comments = story['friend_comments'] + story['public_comments']
profile_user_ids = profile_user_ids.union([reply['user_id']
for c in all_comments
for reply in c['replies']])
if story.get('source_user_id'):
profile_user_ids.add(story['source_user_id'])
story['comment_count_friends'] = len(friends_with_comments)
story['comment_count_public'] = story['comment_count'] - len(friends_with_comments)
if check_all or story['share_count']:
share_key = "S:%s:%s" % (story['story_feed_id'], story['guid_hash'])
story['share_count'] = r.scard(share_key)
friends_with_shares = [int(f) for f in r.sinter(share_key, friend_key)]
nonfriend_user_ids = [int(f) for f in r.sdiff(share_key, friend_key)]
profile_user_ids.update(nonfriend_user_ids)
profile_user_ids.update(friends_with_shares)
story['commented_by_public'] = [c['user_id'] for c in story['public_comments']]
story['commented_by_friends'] = [c['user_id'] for c in story['friend_comments']]
story['shared_by_public'] = list(set(nonfriend_user_ids) -
set(story['commented_by_public']))
story['shared_by_friends'] = list(set(friends_with_shares) -
set(story['commented_by_friends']))
story['share_count_public'] = story['share_count'] - len(friends_with_shares)
story['share_count_friends'] = len(friends_with_shares)
story['friend_user_ids'] = list(set(story['commented_by_friends'] + story['shared_by_friends']))
story['public_user_ids'] = list(set(story['commented_by_public'] + story['shared_by_public']))
if not story['share_user_ids']:
story['share_user_ids'] = story['friend_user_ids'] + story['public_user_ids']
if story.get('source_user_id'):
profile_user_ids.add(story['source_user_id'])
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
# Toss public comments by private profiles
profiles_dict = dict((profile['user_id'], profile) for profile in profiles)
for story in stories:
commented_by_public = story.get('commented_by_public') or [c['user_id'] for c in story['public_comments']]
for user_id in commented_by_public:
if profiles_dict[user_id]['private']:
story['public_comments'] = [c for c in story['public_comments'] if c['user_id'] != user_id]
story['comment_count_public'] -= 1
return stories, profiles
@staticmethod
def attach_users_to_stories(stories, profiles):
profiles = dict([(p['user_id'], p) for p in profiles])
for s, story in enumerate(stories):
for u, user_id in enumerate(story['shared_by_friends']):
if user_id not in profiles: continue
stories[s]['shared_by_friends'][u] = profiles[user_id]
for u, user_id in enumerate(story['shared_by_public']):
if user_id not in profiles: continue
stories[s]['shared_by_public'][u] = profiles[user_id]
for comment_set in ['friend_comments', 'public_comments']:
for c, comment in enumerate(story[comment_set]):
if comment['user_id'] not in profiles: continue
stories[s][comment_set][c]['user'] = profiles[comment['user_id']]
if comment['source_user_id'] and comment['source_user_id'] in profiles:
stories[s][comment_set][c]['source_user'] = profiles[comment['source_user_id']]
for r, reply in enumerate(comment['replies']):
if reply['user_id'] not in profiles: continue
stories[s][comment_set][c]['replies'][r]['user'] = profiles[reply['user_id']]
stories[s][comment_set][c]['liking_user_ids'] = list(comment['liking_users'])
for u, user_id in enumerate(comment['liking_users']):
if user_id not in profiles: continue
stories[s][comment_set][c]['liking_users'][u] = profiles[user_id]
return stories
@staticmethod
def attach_users_to_comment(comment, profiles):
profiles = dict([(p['user_id'], p) for p in profiles])
if comment['user_id'] not in profiles: return comment
comment['user'] = profiles[comment['user_id']]
if comment['source_user_id']:
comment['source_user'] = profiles[comment['source_user_id']]
for r, reply in enumerate(comment['replies']):
if reply['user_id'] not in profiles: continue
comment['replies'][r]['user'] = profiles[reply['user_id']]
comment['liking_user_ids'] = list(comment['liking_users'])
for u, user_id in enumerate(comment['liking_users']):
if user_id not in profiles: continue
comment['liking_users'][u] = profiles[user_id]
return comment
def add_liking_user(self, user_id):
if user_id not in self.liking_users:
self.liking_users.append(user_id)
self.save()
def remove_liking_user(self, user_id):
if user_id in self.liking_users:
self.liking_users.remove(user_id)
self.save()
def blurblog_permalink(self):
profile = MSocialProfile.get_user(self.user_id)
return "%s/story/%s/%s" % (
profile.blurblog_url,
slugify(self.story_title)[:20],
self.guid_hash[:6]
)
def generate_post_to_service_message(self, truncate=None, include_url=True):
message = strip_tags(self.comments)
if not message or len(message) < 1:
message = self.decoded_story_title
if include_url and truncate:
message = truncate_chars(message, truncate - 18 - 30)
feed = Feed.get_by_id(self.story_feed_id)
if feed:
if truncate:
message += " (%s)" % truncate_chars(feed.feed_title, 18)
else:
message += " (%s)" % truncate_chars(feed.feed_title, 30)
if include_url:
message += " " + self.blurblog_permalink()
elif include_url:
if truncate:
message = truncate_chars(message, truncate - 14)
message += " " + self.blurblog_permalink()
return message
def post_to_service(self, service):
user = User.objects.get(pk=self.user_id)
if service in self.posted_to_services:
logging.user(user, "~BM~FRAlready posted to %s." % (service))
return
posted = False
social_service = MSocialServices.objects.get(user_id=self.user_id)
message = self.generate_post_to_service_message()
logging.user(user, "~BM~FGPosting to %s: ~SB%s" % (service, message))
if service == 'twitter':
posted = social_service.post_to_twitter(self)
elif service == 'facebook':
posted = social_service.post_to_facebook(self)
elif service == 'appdotnet':
posted = social_service.post_to_appdotnet(self)
if posted:
self.posted_to_services.append(service)
self.save()
def notify_user_ids(self, include_parent=True):
user_ids = set()
for reply in self.replies:
if reply.user_id not in self.mute_email_users:
user_ids.add(reply.user_id)
if include_parent and self.user_id not in self.mute_email_users:
user_ids.add(self.user_id)
return list(user_ids)
def reply_for_id(self, reply_id):
for reply in self.replies:
if reply.reply_id == reply_id:
return reply
def send_emails_for_new_reply(self, reply_id):
if reply_id in self.emailed_replies:
logging.debug(" ***> Already sent reply email: %s on %s" % (reply_id, self))
return
reply = self.reply_for_id(reply_id)
if not reply:
logging.debug(" ***> Reply doesn't exist: %s on %s" % (reply_id, self))
return
notify_user_ids = self.notify_user_ids()
if reply.user_id in notify_user_ids:
notify_user_ids.remove(reply.user_id)
reply_user = User.objects.get(pk=reply.user_id)
reply_user_profile = MSocialProfile.get_user(reply.user_id)
sent_emails = 0
story_feed = Feed.get_by_id(self.story_feed_id)
comment = self.comments_with_author()
profile_user_ids = set([comment['user_id']])
reply_user_ids = list(r['user_id'] for r in comment['replies'])
profile_user_ids = profile_user_ids.union(reply_user_ids)
if self.source_user_id:
profile_user_ids.add(self.source_user_id)
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
comment = MSharedStory.attach_users_to_comment(comment, profiles)
for user_id in notify_user_ids:
user = User.objects.get(pk=user_id)
if not user.email or not user.profile.send_emails:
if not user.email:
logging.user(user, "~FMNo email to send to, skipping.")
elif not user.profile.send_emails:
logging.user(user, "~FMDisabled emails, skipping.")
continue
mute_url = "http://%s%s" % (
Site.objects.get_current().domain,
reverse('social-mute-story', kwargs={
'secret_token': user.profile.secret_token,
'shared_story_id': self.id,
})
)
data = {
'reply_user_profile': reply_user_profile,
'comment': comment,
'shared_story': self,
'story_feed': story_feed,
'mute_url': mute_url,
}
story_title = self.decoded_story_title.replace('\n', ' ')
text = render_to_string('mail/email_reply.txt', data)
html = pynliner.fromString(render_to_string('mail/email_reply.xhtml', data))
subject = "%s replied to you on \"%s\" on NewsBlur" % (reply_user.username, story_title)
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (user.username, user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
sent_emails += 1
logging.user(reply_user, "~BB~FM~SBSending %s/%s email%s for new reply: %s" % (
sent_emails, len(notify_user_ids),
'' if len(notify_user_ids) == 1 else 's',
self.decoded_story_title[:30]))
self.emailed_replies.append(reply.reply_id)
self.save()
def send_email_for_reshare(self):
if self.emailed_reshare:
logging.debug(" ***> Already sent reply email: %s" % self)
return
reshare_user = User.objects.get(pk=self.user_id)
reshare_user_profile = MSocialProfile.get_user(self.user_id)
original_user = User.objects.get(pk=self.source_user_id)
original_shared_story = MSharedStory.objects.get(user_id=self.source_user_id,
story_guid=self.story_guid)
if not original_user.email or not original_user.profile.send_emails:
if not original_user.email:
logging.user(original_user, "~FMNo email to send to, skipping.")
elif not original_user.profile.send_emails:
logging.user(original_user, "~FMDisabled emails, skipping.")
return
story_feed = Feed.get_by_id(self.story_feed_id)
comment = self.comments_with_author()
profile_user_ids = set([comment['user_id']])
reply_user_ids = [reply['user_id'] for reply in comment['replies']]
profile_user_ids = profile_user_ids.union(reply_user_ids)
if self.source_user_id:
profile_user_ids.add(self.source_user_id)
profiles = MSocialProfile.objects.filter(user_id__in=list(profile_user_ids))
profiles = [profile.canonical(compact=True) for profile in profiles]
comment = MSharedStory.attach_users_to_comment(comment, profiles)
mute_url = "http://%s%s" % (
Site.objects.get_current().domain,
reverse('social-mute-story', kwargs={
'secret_token': original_user.profile.secret_token,
'shared_story_id': original_shared_story.id,
})
)
data = {
'comment': comment,
'shared_story': self,
'reshare_user_profile': reshare_user_profile,
'original_shared_story': original_shared_story,
'story_feed': story_feed,
'mute_url': mute_url,
}
story_title = self.decoded_story_title.replace('\n', ' ')
text = render_to_string('mail/email_reshare.txt', data)
html = pynliner.fromString(render_to_string('mail/email_reshare.xhtml', data))
subject = "%s re-shared \"%s\" from you on NewsBlur" % (reshare_user.username, story_title)
msg = EmailMultiAlternatives(subject, text,
from_email='NewsBlur <%s>' % settings.HELLO_EMAIL,
to=['%s <%s>' % (original_user.username, original_user.email)])
msg.attach_alternative(html, "text/html")
msg.send()
self.emailed_reshare = True
self.save()
logging.user(reshare_user, "~BB~FM~SBSending %s email for story re-share: %s" % (
original_user.username,
self.decoded_story_title[:30]))
def calculate_image_sizes(self, force=False):
if not self.story_content_z:
return
if not force and self.image_count:
return self.image_sizes
headers = {
'User-Agent': 'NewsBlur Image Fetcher - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_1) '
'AppleWebKit/534.48.3 (KHTML, like Gecko) Version/5.1 '
'Safari/534.48.3)' % (
settings.NEWSBLUR_URL
),
}
soup = BeautifulSoup(zlib.decompress(self.story_content_z))
image_sources = [img.get('src') for img in soup.findAll('img')]
image_sizes = []
for image_source in image_sources[:10]:
if any(ignore in image_source for ignore in IGNORE_IMAGE_SOURCES):
continue
req = requests.get(image_source, headers=headers, stream=True)
datastream = StringIO(req.content[:30])
_, width, height = image_size(datastream)
if width <= 16 or height <= 16:
continue
image_sizes.append({'src': image_source, 'size': (width, height)})
if image_sizes:
image_sizes = sorted(image_sizes, key=lambda i: i['size'][0] * i['size'][1],
reverse=True)
self.image_sizes = image_sizes
self.image_count = len(image_sizes)
self.save()
logging.debug(" ---> ~SN~FGFetched image sizes on shared story: ~SB%s images" % self.image_count)
return image_sizes
def fetch_original_text(self, force=False, request=None):
original_text_z = self.original_text_z
feed = Feed.get_by_id(self.story_feed_id)
if not original_text_z or force:
ti = TextImporter(self, feed, request=request)
original_text = ti.fetch()
else:
logging.user(request, "~FYFetching ~FGoriginal~FY story text, ~SBfound.")
original_text = zlib.decompress(original_text_z)
return original_text
class MSocialServices(mongo.Document):
user_id = mongo.IntField()
autofollow = mongo.BooleanField(default=True)
twitter_uid = mongo.StringField()
twitter_access_key = mongo.StringField()
twitter_access_secret = mongo.StringField()
twitter_friend_ids = mongo.ListField(mongo.StringField())
twitter_picture_url = mongo.StringField()
twitter_username = mongo.StringField()
twitter_refresh_date = mongo.DateTimeField()
facebook_uid = mongo.StringField()
facebook_access_token = mongo.StringField()
facebook_friend_ids = mongo.ListField(mongo.StringField())
facebook_picture_url = mongo.StringField()
facebook_refresh_date = mongo.DateTimeField()
appdotnet_uid = mongo.StringField()
appdotnet_access_token= mongo.StringField()
appdotnet_friend_ids = mongo.ListField(mongo.StringField())
appdotnet_picture_url = mongo.StringField()
appdotnet_refresh_date= mongo.DateTimeField()
upload_picture_url = mongo.StringField()
syncing_twitter = mongo.BooleanField(default=False)
syncing_facebook = mongo.BooleanField(default=False)
syncing_appdotnet = mongo.BooleanField(default=False)
meta = {
'collection': 'social_services',
'indexes': ['user_id', 'twitter_friend_ids', 'facebook_friend_ids', 'twitter_uid', 'facebook_uid', 'appdotnet_uid'],
'allow_inheritance': False,
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
return "%s (Twitter: %s, FB: %s, ADN: %s)" % (user.username, self.twitter_uid, self.facebook_uid, self.appdotnet_uid)
def canonical(self):
user = User.objects.get(pk=self.user_id)
return {
'twitter': {
'twitter_username': self.twitter_username,
'twitter_picture_url': self.twitter_picture_url,
'twitter_uid': self.twitter_uid,
'syncing': self.syncing_twitter,
},
'facebook': {
'facebook_uid': self.facebook_uid,
'facebook_picture_url': self.facebook_picture_url,
'syncing': self.syncing_facebook,
},
'appdotnet': {
'appdotnet_uid': self.appdotnet_uid,
'appdotnet_picture_url': self.appdotnet_picture_url,
'syncing': self.syncing_appdotnet,
},
'gravatar': {
'gravatar_picture_url': "https://www.gravatar.com/avatar/" + \
hashlib.md5(user.email.lower()).hexdigest()
},
'upload': {
'upload_picture_url': self.upload_picture_url
}
}
@classmethod
def get_user(cls, user_id):
try:
profile, created = cls.objects.get_or_create(user_id=user_id)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(user_id=user_id)
logging.debug(" ---> ~FRDeleting dupe social services. %s found." % dupes.count())
for dupe in dupes[1:]:
dupe.delete()
profile = dupes[0]
created = False
if created:
profile.save()
return profile
@classmethod
def profile(cls, user_id):
profile = cls.get_user(user_id=user_id)
return profile.canonical()
def save_uploaded_photo(self, photo):
photo_body = photo.read()
filename = photo.name
s3 = s3_utils.S3Store()
image_name = s3.save_profile_picture(self.user_id, filename, photo_body)
if image_name:
self.upload_picture_url = "https://s3.amazonaws.com/%s/avatars/%s/thumbnail_%s" % (
settings.S3_AVATARS_BUCKET_NAME,
self.user_id,
image_name,
)
self.save()
return image_name and self.upload_picture_url
def twitter_api(self):
twitter_consumer_key = settings.TWITTER_CONSUMER_KEY
twitter_consumer_secret = settings.TWITTER_CONSUMER_SECRET
auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
auth.set_access_token(self.twitter_access_key, self.twitter_access_secret)
api = tweepy.API(auth)
return api
def facebook_api(self):
graph = facebook.GraphAPI(self.facebook_access_token)
return graph
def appdotnet_api(self):
adn_api = appdotnet.Appdotnet(access_token=self.appdotnet_access_token)
return adn_api
def sync_twitter_friends(self):
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMTwitter import starting...")
api = self.twitter_api()
if not api:
logging.user(user, "~BG~FMTwitter import ~SBfailed~SN: no api access.")
self.syncing_twitter = False
self.save()
return
twitter_user = api.me()
self.twitter_picture_url = twitter_user.profile_image_url_https
self.twitter_username = twitter_user.screen_name
self.twitter_refreshed_date = datetime.datetime.utcnow()
self.syncing_twitter = False
self.save()
profile = MSocialProfile.get_user(self.user_id)
profile.location = profile.location or twitter_user.location
profile.bio = profile.bio or twitter_user.description
profile.website = profile.website or twitter_user.url
profile.save()
profile.count_follows()
if not profile.photo_url or not profile.photo_service:
self.set_photo('twitter')
try:
friend_ids = list(unicode(friend.id) for friend in tweepy.Cursor(api.friends).items())
except tweepy.TweepError, e:
logging.user(user, "~BG~FMTwitter import ~SBfailed~SN: %s" % e)
return
if not friend_ids:
logging.user(user, "~BG~FMTwitter import ~SBfailed~SN: no friend_ids.")
self.twitter_friend_ids = friend_ids
self.save()
following = self.follow_twitter_friends()
if not following:
logging.user(user, "~BG~FMTwitter import finished.")
def follow_twitter_friends(self):
social_profile = MSocialProfile.get_user(self.user_id)
following = []
followers = 0
if not self.autofollow:
return following
# Follow any friends already on NewsBlur
user_social_services = MSocialServices.objects.filter(twitter_uid__in=self.twitter_friend_ids)
for user_social_service in user_social_services:
followee_user_id = user_social_service.user_id
socialsub = social_profile.follow_user(followee_user_id)
if socialsub:
following.append(followee_user_id)
# Friends already on NewsBlur should follow back
# following_users = MSocialServices.objects.filter(twitter_friend_ids__contains=self.twitter_uid)
# for following_user in following_users:
# if following_user.autofollow:
# following_user_profile = MSocialProfile.get_user(following_user.user_id)
# following_user_profile.follow_user(self.user_id, check_unfollowed=True)
# followers += 1
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMTwitter import: %s users, now following ~SB%s~SN with ~SB%s~SN follower-backs" % (len(self.twitter_friend_ids), len(following), followers))
return following
def sync_facebook_friends(self):
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMFacebook import starting...")
graph = self.facebook_api()
if not graph:
logging.user(user, "~BG~FMFacebook import ~SBfailed~SN: no api access.")
self.syncing_facebook = False
self.save()
return
friends = graph.get_connections("me", "friends")
if not friends:
logging.user(user, "~BG~FMFacebook import ~SBfailed~SN: no friend_ids.")
self.syncing_facebook = False
self.save()
return
facebook_friend_ids = [unicode(friend["id"]) for friend in friends["data"]]
self.facebook_friend_ids = facebook_friend_ids
self.facebook_refresh_date = datetime.datetime.utcnow()
self.facebook_picture_url = "//graph.facebook.com/%s/picture" % self.facebook_uid
self.syncing_facebook = False
self.save()
facebook_user = graph.request('me', args={'fields':'website,bio,location'})
profile = MSocialProfile.get_user(self.user_id)
profile.location = profile.location or (facebook_user.get('location') and facebook_user['location']['name'])
profile.bio = profile.bio or facebook_user.get('bio')
if not profile.website and facebook_user.get('website'):
profile.website = facebook_user.get('website').split()[0]
profile.save()
profile.count_follows()
if not profile.photo_url or not profile.photo_service:
self.set_photo('facebook')
self.follow_facebook_friends()
def follow_facebook_friends(self):
social_profile = MSocialProfile.get_user(self.user_id)
following = []
followers = 0
if not self.autofollow:
return following
# Follow any friends already on NewsBlur
user_social_services = MSocialServices.objects.filter(facebook_uid__in=self.facebook_friend_ids)
for user_social_service in user_social_services:
followee_user_id = user_social_service.user_id
socialsub = social_profile.follow_user(followee_user_id)
if socialsub:
following.append(followee_user_id)
# Friends already on NewsBlur should follow back
# following_users = MSocialServices.objects.filter(facebook_friend_ids__contains=self.facebook_uid)
# for following_user in following_users:
# if following_user.autofollow:
# following_user_profile = MSocialProfile.get_user(following_user.user_id)
# following_user_profile.follow_user(self.user_id, check_unfollowed=True)
# followers += 1
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMFacebook import: %s users, now following ~SB%s~SN with ~SB%s~SN follower-backs" % (len(self.facebook_friend_ids), len(following), followers))
return following
def sync_appdotnet_friends(self):
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMApp.net import starting...")
api = self.appdotnet_api()
if not api:
logging.user(user, "~BG~FMApp.net import ~SBfailed~SN: no api access.")
self.syncing_appdotnet = False
self.save()
return
friend_ids = []
has_more_friends = True
before_id = None
since_id = None
while has_more_friends:
friends_resp = api.getUserFollowingIds(self.appdotnet_uid,
before_id=before_id,
since_id=since_id)
friends = json.decode(friends_resp)
before_id = friends['meta'].get('min_id')
since_id = friends['meta'].get('max_id')
has_more_friends = friends['meta'].get('more')
friend_ids.extend([fid for fid in friends['data']])
if not friend_ids:
logging.user(user, "~BG~FMApp.net import ~SBfailed~SN: no friend_ids.")
self.syncing_appdotnet = False
self.save()
return
adn_user = json.decode(api.getUser(self.appdotnet_uid))['data']
self.appdotnet_picture_url = adn_user['avatar_image']['url']
self.appdotnet_username = adn_user['username']
self.appdotnet_friend_ids = friend_ids
self.appdotnet_refreshed_date = datetime.datetime.utcnow()
self.syncing_appdotnet = False
self.save()
profile = MSocialProfile.get_user(self.user_id)
profile.bio = profile.bio or adn_user['description']['text']
profile.save()
profile.count_follows()
if not profile.photo_url or not profile.photo_service:
self.set_photo('appdotnet')
self.follow_appdotnet_friends()
def follow_appdotnet_friends(self):
social_profile = MSocialProfile.get_user(self.user_id)
following = []
followers = 0
if not self.autofollow:
return following
# Follow any friends already on NewsBlur
user_social_services = MSocialServices.objects.filter(appdotnet_uid__in=self.appdotnet_friend_ids)
for user_social_service in user_social_services:
followee_user_id = user_social_service.user_id
socialsub = social_profile.follow_user(followee_user_id)
if socialsub:
following.append(followee_user_id)
# Friends already on NewsBlur should follow back
# following_users = MSocialServices.objects.filter(appdotnet_friend_ids__contains=self.appdotnet_uid)
# for following_user in following_users:
# if following_user.autofollow:
# following_user_profile = MSocialProfile.get_user(following_user.user_id)
# following_user_profile.follow_user(self.user_id, check_unfollowed=True)
# followers += 1
user = User.objects.get(pk=self.user_id)
logging.user(user, "~BG~FMApp.net import: %s users, now following ~SB%s~SN with ~SB%s~SN follower-backs" % (len(self.appdotnet_friend_ids), len(following), followers))
return following
def disconnect_twitter(self):
self.twitter_uid = None
self.save()
def disconnect_facebook(self):
self.facebook_uid = None
self.save()
def disconnect_appdotnet(self):
self.appdotnet_uid = None
self.save()
def set_photo(self, service):
profile = MSocialProfile.get_user(self.user_id)
if service == 'nothing':
service = None
profile.photo_service = service
if not service:
profile.photo_url = None
elif service == 'twitter':
profile.photo_url = self.twitter_picture_url
elif service == 'facebook':
profile.photo_url = self.facebook_picture_url
elif service == 'upload':
profile.photo_url = self.upload_picture_url
elif service == 'gravatar':
user = User.objects.get(pk=self.user_id)
profile.photo_url = "https://www.gravatar.com/avatar/" + \
hashlib.md5(user.email).hexdigest()
profile.save()
return profile
@classmethod
def sync_all_twitter_photos(cls, days=14):
week_ago = datetime.datetime.now() - datetime.timedelta(days=days)
shares = MSharedStory.objects.filter(shared_date__gte=week_ago)
sharers = sorted(set([s.user_id for s in shares]))
print " ---> %s sharing user_ids" % len(sorted(sharers))
for user_id in sharers:
profile = MSocialProfile.objects.get(user_id=user_id)
if not profile.photo_service == 'twitter': continue
ss = MSocialServices.objects.get(user_id=user_id)
try:
ss.sync_twitter_photo()
print " ---> Syncing %s" % user_id
except Exception, e:
print " ***> Exception on %s: %s" % (user_id, e)
def sync_twitter_photo(self):
profile = MSocialProfile.get_user(self.user_id)
if profile.photo_service != "twitter":
return
user = User.objects.get(pk=self.user_id)
logging.user(user, "~FCSyncing Twitter profile photo...")
try:
api = self.twitter_api()
me = api.me()
except tweepy.TweepError, e:
logging.user(user, "~FRException (%s): ~FCsetting to blank profile photo" % e)
self.twitter_picture_url = None
self.set_photo("nothing")
return
self.twitter_picture_url = me.profile_image_url_https
self.save()
self.set_photo('twitter')
def post_to_twitter(self, shared_story):
message = shared_story.generate_post_to_service_message(truncate=140)
try:
api = self.twitter_api()
api.update_status(status=message)
except tweepy.TweepError, e:
print e
return
return True
def post_to_facebook(self, shared_story):
message = shared_story.generate_post_to_service_message(include_url=False)
shared_story.calculate_image_sizes()
content = zlib.decompress(shared_story.story_content_z)[:1024]
try:
api = self.facebook_api()
# api.put_wall_post(message=message)
api.put_object('me', '%s:share' % settings.FACEBOOK_NAMESPACE,
link=shared_story.blurblog_permalink(),
type="link",
name=shared_story.decoded_story_title,
description=content,
website=shared_story.blurblog_permalink(),
message=message,
)
except facebook.GraphAPIError, e:
print e
return
return True
def post_to_appdotnet(self, shared_story):
message = shared_story.generate_post_to_service_message(truncate=256)
try:
api = self.appdotnet_api()
api.createPost(text=message, links=[{
'text': shared_story.decoded_story_title,
'url': shared_story.blurblog_permalink()
}])
except Exception, e:
print e
return
return True
class MInteraction(mongo.Document):
user_id = mongo.IntField()
date = mongo.DateTimeField(default=datetime.datetime.now)
category = mongo.StringField()
title = mongo.StringField()
content = mongo.StringField()
with_user_id = mongo.IntField()
feed_id = mongo.DynamicField()
story_feed_id= mongo.IntField()
content_id = mongo.StringField()
meta = {
'collection': 'interactions',
'indexes': [('user_id', '-date'), 'category', 'with_user_id'],
'allow_inheritance': False,
'index_drop_dups': True,
'ordering': ['-date'],
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
with_user = self.with_user_id and User.objects.get(pk=self.with_user_id)
return "<%s> %s on %s: %s - %s" % (user.username, with_user and with_user.username, self.date,
self.category, self.content and self.content[:20])
def canonical(self):
return {
'date': self.date,
'category': self.category,
'title': self.title,
'content': self.content,
'with_user_id': self.with_user_id,
'feed_id': self.feed_id,
'story_feed_id': self.story_feed_id,
'content_id': self.content_id,
}
@classmethod
def publish_update_to_subscribers(self, user_id):
user = User.objects.get(pk=user_id)
try:
r = redis.Redis(connection_pool=settings.REDIS_POOL)
listeners_count = r.publish(user.username, 'interaction:new')
if listeners_count:
logging.debug(" ---> ~FMPublished to %s subscribers" % (listeners_count))
except redis.ConnectionError:
logging.debug(" ***> ~BMRedis is unavailable for real-time.")
@classmethod
def user(cls, user_id, page=1, limit=None, categories=None):
user_profile = Profile.objects.get(user=user_id)
dashboard_date = user_profile.dashboard_date or user_profile.last_seen_on
page = max(1, page)
limit = int(limit) if limit else 4
offset = (page-1) * limit
interactions_db = cls.objects.filter(user_id=user_id)
if categories:
interactions_db = interactions_db.filter(category__in=categories)
interactions_db = interactions_db[offset:offset+limit+1]
has_next_page = len(interactions_db) > limit
interactions_db = interactions_db[offset:offset+limit]
with_user_ids = [i.with_user_id for i in interactions_db if i.with_user_id]
social_profiles = dict((p.user_id, p) for p in MSocialProfile.objects.filter(user_id__in=with_user_ids))
interactions = []
for interaction_db in interactions_db:
interaction = interaction_db.canonical()
social_profile = social_profiles.get(interaction_db.with_user_id)
if social_profile:
interaction['photo_url'] = social_profile.profile_photo_url
interaction['with_user'] = social_profiles.get(interaction_db.with_user_id)
interaction['time_since'] = relative_timesince(interaction_db.date)
interaction['date'] = interaction_db.date
interaction['is_new'] = interaction_db.date > dashboard_date
interactions.append(interaction)
return interactions, has_next_page
@classmethod
def user_unread_count(cls, user_id):
user_profile = Profile.objects.get(user=user_id)
dashboard_date = user_profile.dashboard_date or user_profile.last_seen_on
interactions_count = cls.objects.filter(user_id=user_id, date__gte=dashboard_date).count()
return interactions_count
@classmethod
def new_follow(cls, follower_user_id, followee_user_id):
params = {
'user_id': followee_user_id,
'with_user_id': follower_user_id,
'category': 'follow',
}
try:
cls.objects.get_or_create(**params)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(**params).order_by('-date')
logging.debug(" ---> ~FRDeleting dupe follow interactions. %s found." % dupes.count())
for dupe in dupes[1:]:
dupe.delete()
cls.publish_update_to_subscribers(followee_user_id)
@classmethod
def new_comment_reply(cls, user_id, reply_user_id, reply_content, story_id, story_feed_id, story_title=None, original_message=None):
params = {
'user_id': user_id,
'with_user_id': reply_user_id,
'category': 'comment_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % user_id,
'story_feed_id': story_feed_id,
'title': story_title,
'content_id': story_id,
}
if original_message:
params['content'] = original_message
original = cls.objects.filter(**params).limit(1)
if original:
original = original[0]
original.content = linkify(strip_tags(reply_content))
original.save()
else:
original_message = None
if not original_message:
cls.objects.create(**params)
cls.publish_update_to_subscribers(user_id)
@classmethod
def remove_comment_reply(cls, user_id, reply_user_id, reply_content, story_id, story_feed_id):
params = {
'user_id': user_id,
'with_user_id': reply_user_id,
'category': 'comment_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % user_id,
'story_feed_id': story_feed_id,
'content_id': story_id,
}
original = cls.objects.filter(**params)
original.delete()
cls.publish_update_to_subscribers(user_id)
@classmethod
def new_comment_like(cls, liking_user_id, comment_user_id, story_id, story_title, comments):
cls.objects.get_or_create(user_id=comment_user_id,
with_user_id=liking_user_id,
category="comment_like",
feed_id="social:%s" % comment_user_id,
content_id=story_id,
defaults={
"title": story_title,
"content": comments,
})
cls.publish_update_to_subscribers(comment_user_id)
@classmethod
def new_reply_reply(cls, user_id, comment_user_id, reply_user_id, reply_content, story_id, story_feed_id, story_title=None, original_message=None):
params = {
'user_id': user_id,
'with_user_id': reply_user_id,
'category': 'reply_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % comment_user_id,
'story_feed_id': story_feed_id,
'title': story_title,
'content_id': story_id,
}
if original_message:
params['content'] = original_message
original = cls.objects.filter(**params).limit(1)
if original:
original = original[0]
original.content = reply_content
original.save()
else:
original_message = None
if not original_message:
cls.objects.create(**params)
cls.publish_update_to_subscribers(user_id)
@classmethod
def remove_reply_reply(cls, user_id, comment_user_id, reply_user_id, reply_content, story_id, story_feed_id):
params = {
'user_id': user_id,
'with_user_id': reply_user_id,
'category': 'reply_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % comment_user_id,
'story_feed_id': story_feed_id,
'content_id': story_id,
}
original = cls.objects.filter(**params)
original.delete()
cls.publish_update_to_subscribers(user_id)
@classmethod
def new_reshared_story(cls, user_id, reshare_user_id, comments, story_title, story_feed_id, story_id, original_comments=None):
params = {
'user_id': user_id,
'with_user_id': reshare_user_id,
'category': 'story_reshare',
'content': comments,
'title': story_title,
'feed_id': "social:%s" % reshare_user_id,
'story_feed_id': story_feed_id,
'content_id': story_id,
}
if original_comments:
params['content'] = original_comments
original = cls.objects.filter(**params).limit(1)
if original:
interaction = original[0]
interaction.content = comments
interaction.save()
else:
original_comments = None
if not original_comments:
cls.objects.create(**params)
cls.publish_update_to_subscribers(user_id)
class MActivity(mongo.Document):
user_id = mongo.IntField()
date = mongo.DateTimeField(default=datetime.datetime.now)
category = mongo.StringField()
title = mongo.StringField()
content = mongo.StringField()
with_user_id = mongo.IntField()
feed_id = mongo.DynamicField()
story_feed_id= mongo.IntField()
content_id = mongo.StringField()
meta = {
'collection': 'activities',
'indexes': [('user_id', '-date'), 'category', 'with_user_id'],
'allow_inheritance': False,
'index_drop_dups': True,
'ordering': ['-date'],
}
def __unicode__(self):
user = User.objects.get(pk=self.user_id)
return "<%s> %s - %s" % (user.username, self.category, self.content and self.content[:20])
def canonical(self):
return {
'date': self.date,
'category': self.category,
'title': self.title,
'content': self.content,
'user_id': self.user_id,
'with_user_id': self.with_user_id or self.user_id,
'feed_id': self.feed_id or self.story_feed_id,
'story_feed_id': self.story_feed_id or self.feed_id,
'content_id': self.content_id,
}
@classmethod
def user(cls, user_id, page=1, limit=4, public=False, categories=None):
user_profile = Profile.objects.get(user=user_id)
dashboard_date = user_profile.dashboard_date or user_profile.last_seen_on
page = max(1, page)
limit = int(limit)
offset = (page-1) * limit
activities_db = cls.objects.filter(user_id=user_id)
if categories:
activities_db = activities_db.filter(category__in=categories)
if public:
activities_db = activities_db.filter(category__nin=['star', 'feedsub'])
activities_db = activities_db[offset:offset+limit+1]
has_next_page = len(activities_db) > limit
activities_db = activities_db[offset:offset+limit]
with_user_ids = [a.with_user_id for a in activities_db if a.with_user_id]
social_profiles = dict((p.user_id, p) for p in MSocialProfile.objects.filter(user_id__in=with_user_ids))
activities = []
for activity_db in activities_db:
activity = activity_db.canonical()
activity['date'] = activity_db.date
activity['time_since'] = relative_timesince(activity_db.date)
social_profile = social_profiles.get(activity_db.with_user_id)
if social_profile:
activity['photo_url'] = social_profile.profile_photo_url
activity['is_new'] = activity_db.date > dashboard_date
activity['with_user'] = social_profiles.get(activity_db.with_user_id or activity_db.user_id)
activities.append(activity)
return activities, has_next_page
@classmethod
def new_starred_story(cls, user_id, story_title, story_feed_id, story_id):
cls.objects.get_or_create(user_id=user_id,
category='star',
story_feed_id=story_feed_id,
content_id=story_id,
defaults=dict(content=story_title))
@classmethod
def remove_starred_story(cls, user_id, story_feed_id, story_id):
params = {
'user_id': user_id,
'category': 'star',
'story_feed_id': story_feed_id,
'content_id': story_id,
}
original = cls.objects.filter(**params)
original.delete()
@classmethod
def new_feed_subscription(cls, user_id, feed_id, feed_title):
params = {
"user_id": user_id,
"category": 'feedsub',
"feed_id": feed_id,
}
try:
cls.objects.get_or_create(defaults=dict(content=feed_title), **params)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(**params).order_by('-date')
logging.debug(" ---> ~FRDeleting dupe feed subscription activities. %s found." % dupes.count())
for dupe in dupes[1:]:
dupe.delete()
@classmethod
def new_follow(cls, follower_user_id, followee_user_id):
params = {
'user_id': follower_user_id,
'with_user_id': followee_user_id,
'category': 'follow',
}
try:
cls.objects.get_or_create(**params)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(**params).order_by('-date')
logging.debug(" ---> ~FRDeleting dupe follow activities. %s found." % dupes.count())
for dupe in dupes[1:]:
dupe.delete()
@classmethod
def new_comment_reply(cls, user_id, comment_user_id, reply_content, story_id, story_feed_id, story_title=None, original_message=None):
params = {
'user_id': user_id,
'with_user_id': comment_user_id,
'category': 'comment_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % comment_user_id,
'story_feed_id': story_feed_id,
'title': story_title,
'content_id': story_id,
}
if original_message:
params['content'] = original_message
original = cls.objects.filter(**params).limit(1)
if original:
original = original[0]
original.content = linkify(strip_tags(reply_content))
original.save()
else:
original_message = None
if not original_message:
cls.objects.create(**params)
@classmethod
def remove_comment_reply(cls, user_id, comment_user_id, reply_content, story_id, story_feed_id):
params = {
'user_id': user_id,
'with_user_id': comment_user_id,
'category': 'comment_reply',
'content': linkify(strip_tags(reply_content)),
'feed_id': "social:%s" % comment_user_id,
'story_feed_id': story_feed_id,
'content_id': story_id,
}
original = cls.objects.filter(**params)
original.delete()
@classmethod
def new_comment_like(cls, liking_user_id, comment_user_id, story_id, story_title, comments):
cls.objects.get_or_create(user_id=liking_user_id,
with_user_id=comment_user_id,
category="comment_like",
feed_id="social:%s" % comment_user_id,
content_id=story_id,
defaults={
"title": story_title,
"content": comments,
})
@classmethod
def new_shared_story(cls, user_id, source_user_id, story_title, comments, story_feed_id, story_id, share_date=None):
data = {
"user_id": user_id,
"category": 'sharedstory',
"feed_id": "social:%s" % user_id,
"story_feed_id": story_feed_id,
"content_id": story_id,
}
try:
a, _ = cls.objects.get_or_create(defaults={
'with_user_id': source_user_id,
'title': story_title,
'content': comments,
}, **data)
except cls.MultipleObjectsReturned:
dupes = cls.objects.filter(**data)
logging.debug(" ---> ~FRDeleting dupe shared story activities. %s found." % dupes.count())
a = dupes[0]
for dupe in dupes[1:]:
dupe.delete()
if a.content != comments:
a.content = comments
a.save()
if source_user_id and a.with_user_id != source_user_id:
a.source_user_id = source_user_id
a.save()
if share_date:
a.date = share_date
a.save()
@classmethod
def remove_shared_story(cls, user_id, story_feed_id, story_id):
params = dict(user_id=user_id,
category='sharedstory',
feed_id="social:%s" % user_id,
story_feed_id=story_feed_id,
content_id=story_id)
try:
a = cls.objects.get(**params)
except cls.DoesNotExist:
return
except cls.MultipleObjectsReturned:
a = cls.objects.filter(**params)
a.delete()
@classmethod
def new_signup(cls, user_id):
cls.objects.get_or_create(user_id=user_id,
with_user_id=user_id,
category="signup")
class MFollowRequest(mongo.Document):
follower_user_id = mongo.IntField(unique_with='followee_user_id')
followee_user_id = mongo.IntField()
date = mongo.DateTimeField(default=datetime.datetime.now)
meta = {
'collection': 'follow_request',
'indexes': ['follower_user_id', 'followee_user_id'],
'ordering': ['-date'],
'allow_inheritance': False,
'index_drop_dups': True,
}
@classmethod
def add(cls, follower_user_id, followee_user_id):
cls.objects.get_or_create(follower_user_id=follower_user_id,
followee_user_id=followee_user_id)
@classmethod
def remove(cls, follower_user_id, followee_user_id):
cls.objects.filter(follower_user_id=follower_user_id,
followee_user_id=followee_user_id).delete()
| eric-stanley/NewsBlur | apps/social/models.py | Python | mit | 137,635 | [
"BLAST"
] | 33248716eb7a1ddb22b017a8bbfc11e48163d0eae9df14b3441d1696d1d056bb |
__author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
"""
EngineTemplate
Implemented by EngineBokeh, EnglineMatplotlib and EnginePlotly to do underlying plotting
"""
import abc
from math import log10, floor
import numpy
import pandas
import datetime
from chartpy.style import Style
from chartpy.chartconstants import ChartConstants
cc = ChartConstants()
# compatible with Python 2 *and* 3:
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
class EngineTemplate(ABC):
def init(self):
return
@abc.abstractmethod
def plot_chart(self, data_frame, style, type):
return
def get_time_stamp(self):
return str(datetime.datetime.now()).replace(':', '-').replace(' ', '-').replace(".", "-")
def get_bar_indices(self, data_frame, style, chart_type, bar_ind):
has_bar = 'no-bar'
xd = data_frame.index
no_of_bars = len(data_frame.columns)
if style.chart_type is not None:
if isinstance(style.chart_type, list):
if 'bar' in style.chart_type:
xd = bar_ind
no_of_bars = style.chart_type.count('bar')
has_bar = 'barv'
elif 'stacked' in style.chart_type:
xd = bar_ind
no_of_bars = 1
has_bar = 'barv'
elif 'bar' == style.chart_type:
xd = bar_ind
has_bar = 'barv'
elif 'barh' == style.chart_type:
xd = bar_ind
has_bar = 'barh'
elif 'stacked' == style.chart_type:
xd = bar_ind
has_bar = 'barh'
else:
if chart_type == 'bar' or chart_type == 'stacked':
xd = bar_ind
has_bar = 'barv'
return xd, bar_ind, has_bar, no_of_bars
def assign(self, structure, field, default):
if hasattr(structure, field): default = getattr(structure, field)
return default
def assign_list(self, style, field, list):
if hasattr(style, field):
list = [str(x) for x in getattr(style, field)]
return list
def get_linewidth(self, label, linewidth_1, linewidth_2, linewidth_2_series):
if label in linewidth_2_series:
return linewidth_2
return linewidth_1
def round_to_1(self, x):
return round(x, -int(floor(log10(x))))
def split_data_frame_to_list(self, data_frame, style):
data_frame_list = []
if isinstance(data_frame, list):
data_frame_list = data_frame
else:
if style.subplots == True and isinstance(data_frame, pandas.DataFrame):
for col in data_frame.columns:
data_frame_list.append(
pandas.DataFrame(index=data_frame.index, columns=[col], data=data_frame[col]))
else:
data_frame_list.append(data_frame)
return data_frame_list
def generate_file_names(self, style, engine):
if style.html_file_output is not None and not (style.auto_generate_html_filename):
pass
else:
import time
style.html_file_output = (self.get_time_stamp() + "-" + engine + ".html")
style.auto_generate_html_filename = True
if style.file_output is not None and not (style.auto_generate_filename):
pass
else:
import time
style.file_output = (self.get_time_stamp() + "-" + engine + ".png")
style.auto_generate_filename = True
return style
def get_max_min_dataframes(self, data_frame_list):
"""Gets minimum and maximum values for a series of dataframes. Can be particularly useful for adjusting colormaps
for lightness/darkness.
Parameters
----------
data_frame_list : DataFrame (list)
DataFrames to be checked
Returns
-------
float, float
Minimum and maximum values
"""
if not (isinstance(data_frame_list, list)):
data_frame_list = [data_frame_list]
import sys
minz = sys.float_info.max
maxz = sys.float_info.min
for data_frame in data_frame_list:
minz_1 = data_frame.min(axis=0).min()
maxz_1 = data_frame.max(axis=0).max()
if minz_1 != numpy.nan:
minz = min(minz, minz_1)
if maxz_1 != numpy.nan:
maxz = max(maxz, maxz_1)
return minz, maxz
def get_max_min_x_axis(self, data_frame_list):
"""Gets minimum and maximum values for the x_axis. Can be particularly useful for adjusting colormaps
for lightness/darkness.
Parameters
----------
data_frame_list : DataFrame (list)
DataFrames to be checked
Returns
-------
obj, obj
Minimum and maximum values
"""
import sys
minz = data_frame_list[0].index[0]
maxz = data_frame_list[0].index[-1]
for data_frame in data_frame_list:
minz_1 = data_frame.index[0]
maxz_1 = data_frame.index[-1]
if minz_1 != numpy.nan:
minz = min(minz, minz_1)
if maxz_1 != numpy.nan:
maxz = max(maxz, maxz_1)
return minz, maxz
#######################################################################################################################
try:
from bokeh.plotting import figure, output_file, show, gridplot, save
from bokeh.models import Range1d
from bokeh.charts import HeatMap # TODO deprecated need to redo
except:
pass
class EngineBokeh(EngineTemplate):
def plot_chart(self, data_frame, style, chart_type):
cm = ColorMaster()
if style.scale_factor > 0:
scale_factor = abs(style.scale_factor) * 2 / 3
else:
scale_factor = abs(style.scale_factor)
try:
if style.bokeh_plot_mode == "offline_jupyter":
from bokeh.io import output_notebook
output_notebook()
except:
pass
try:
style = self.generate_file_names(style, 'bokeh')
output_file(style.html_file_output)
except:
pass
data_frame_list = self.split_data_frame_to_list(data_frame, style)
plot_list = []
plot_width = int((style.width * scale_factor))
plot_height = int((style.height * scale_factor) / len(data_frame_list))
for data_frame in data_frame_list:
bar_ind = numpy.arange(1, len(data_frame.index) + 1)
xd, bar_ind, has_bar, no_of_bars = self.get_bar_indices(data_frame, style, chart_type, bar_ind)
separate_chart = False
if chart_type == 'heatmap':
# TODO
p1 = HeatMap(data_frame,
title='Random', plot_width=plot_width, plot_height=plot_height)
separate_chart = True
# if has a vertical bar than categorical x-axis
elif has_bar == 'barv':
p1 = figure(
plot_width=plot_width,
plot_height=plot_height,
x_range=[str(x).replace(':', '.') for x in data_frame.index]
)
from math import pi
p1.xaxis.major_label_orientation = pi / 2
elif type(data_frame.index) == pandas.Timestamp or (
type(xd[0]) == pandas.Timestamp and type(xd[-1]) == pandas.Timestamp) \
or type(data_frame.index) == pandas.DatetimeIndex:
p1 = figure(
x_axis_type="datetime",
plot_width=plot_width,
plot_height=plot_height,
# x_range=(xd[0], xd[-1]) # at present Bokeh doesn't like to set limits with datetime, hopefully will change!
)
# otherwise numerical axis
else:
p1 = figure(
plot_width=plot_width,
plot_height=plot_height,
x_range=(xd[0], xd[-1])
)
# set the fonts
p1.axis.major_label_text_font_size = str(10) + "pt"
p1.axis.major_label_text_font = cc.bokeh_font
p1.axis.major_label_text_font_style = cc.bokeh_font_style
p1.xaxis.axis_label_text_font_size = str(10) + "pt"
p1.xaxis.axis_label_text_font = cc.bokeh_font
p1.xaxis.axis_label_text_font_style = cc.bokeh_font_style
p1.xaxis.axis_label = style.x_title
p1.xaxis.visible = style.x_axis_showgrid
p1.yaxis.axis_label_text_font_size = str(10) + "pt"
p1.yaxis.axis_label_text_font = cc.bokeh_font
p1.yaxis.axis_label_text_font_style = cc.bokeh_font_style
p1.yaxis.axis_label = style.y_title
p1.yaxis.visible = style.y_axis_showgrid
p1.legend.location = "top_left"
p1.legend.label_text_font_size = str(10) + "pt"
p1.legend.label_text_font = cc.bokeh_font
p1.legend.label_text_font_style = cc.bokeh_font_style
p1.legend.background_fill_alpha = 0.75
p1.legend.border_line_width = 0
# set chart outline
p1.outline_line_width = 0
# Plot.title.text
p1.title.text_font_size = str(14) + "pt"
p1.title.text_font = cc.bokeh_font
# TODO fix label
# if style.display_source_label:
# p1.text([30 * scale_factor, 30 * scale_factor], [0, 0], text = [style.brand_label],
# text_font_size = str(10 * scale_factor) + "pt", text_align = "left",
# text_font = GraphistyleConstants().bokeh_font)
color_spec = cm.create_color_list(style, data_frame)
import matplotlib
bar_space = 0.2
bar_width = (1 - bar_space) / (no_of_bars)
bar_index = 0
has_bar = 'no-bar'
if not (separate_chart):
# plot each series in the dataframe separately
for i in range(0, len(data_frame.columns)):
label = str(data_frame.columns[i])
glyph_name = 'glpyh' + str(i)
# set chart type which can differ for each time series
if isinstance(chart_type, list):
chart_type_ord = chart_type[i]
else:
chart_type_ord = chart_type
# get the color
if color_spec[i] is None:
color_spec[i] = self.get_color_list(i)
try:
color_spec[i] = matplotlib.colors.rgb2hex(color_spec[i])
except:
pass
yd = data_frame.iloc[:, i]
# plot each time series as appropriate line, scatter etc.
if chart_type_ord == 'line':
linewidth_t = self.get_linewidth(label,
style.linewidth, style.linewidth_2, style.linewidth_2_series)
if linewidth_t is None: linewidth_t = 1
if style.display_legend:
p1.line(xd, yd, color=color_spec[i], line_width=linewidth_t, name=glyph_name,
legend=label,
)
else:
p1.line(xd, data_frame.iloc[:, i], color=color_spec[i], line_width=linewidth_t,
name=glyph_name)
elif (chart_type_ord == 'bar'):
bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(1, len(bar_ind) + 1)]
bar_pos_right = [x + bar_width for x in bar_pos]
if style.display_legend:
p1.quad(top=yd, bottom=0 * yd, left=bar_pos, right=bar_pos_right, color=color_spec[i],
legend=label)
else:
p1.quad(top=yd, bottom=0 * yd, left=bar_pos, right=bar_pos_right, color=color_spec[i])
bar_index = bar_index + 1
bar_ind = bar_ind + bar_width
elif (chart_type_ord == 'barh'):
# TODO
pass
elif chart_type_ord == 'scatter':
linewidth_t = self.get_linewidth(label,
style.linewidth, style.linewidth_2, style.linewidth_2_series)
if linewidth_t is None: linewidth_t = 1
if style.display_legend:
p1.circle(xd, yd, color=color_spec[i], line_width=linewidth_t, name=glyph_name,
legend=label,
)
else:
p1.circle(xd, yd, color=color_spec[i], line_width=linewidth_t, name=glyph_name)
p1.grid.grid_line_alpha = 0.3
# p1.min_border_left = -40
# p1.min_border_right = 0
# p1.min_border_top = 0
# p1.min_border_bottom = 0
p1.min_border = -50
plot_list.append(p1)
p_final = gridplot(plot_list, ncols=1)
try:
p_final.title.text = style.title
except:
pass
if style.silent_display:
save(p_final)
else:
show(p_final) # open a browser
def get_color_list(self, i):
color_palette = cc.bokeh_palette
return color_palette[i % len(color_palette)]
def generic_settings(self):
return
######################################################################################################################
# TODO bqplot interface not implemented yet
try:
from IPython.display import display
from bqplot import (
OrdinalScale, LinearScale, Bars, Lines, Axis, Figure
)
except:
pass
class EngineBqplot(EngineTemplate):
def plot_chart(self, data_frame, style, chart_type):
pass
def get_color_list(self, i):
color_palette = cc.bokeh_palette
return color_palette[i % len(color_palette)]
def generic_settings(self):
return
#######################################################################################################################
# vispy based plots
try:
from vispy import plot as vp
except:
pass
class EngineVisPy(EngineTemplate):
def plot_chart(self, data_frame, style, chart_type):
cm = ColorMaster()
scale_factor = abs(style.scale_factor)
try:
if style.vispy_plot_mode == "offline_jupyter":
pass
except:
pass
try:
style = self.generate_file_names(style, 'vispy')
except:
pass
data_frame_list = self.split_data_frame_to_list(data_frame, style)
plot_list = []
plot_width = int((style.width * scale_factor))
plot_height = int((style.height * scale_factor) / len(data_frame_list))
fig = vp.Fig(size=(plot_width, plot_height), show=False, title=style.title)
min_x, max_x = self.get_max_min_x_axis(data_frame_list=data_frame_list)
for data_frame in data_frame_list:
bar_ind = numpy.arange(1, len(data_frame.index) + 1)
if data_frame.index.name == 'Date':
data_frame = data_frame.copy()
data_frame = data_frame.reset_index()
data_frame = data_frame.drop(['Date'], axis=1)
xd, bar_ind, has_bar, no_of_bars = self.get_bar_indices(data_frame, style, chart_type, bar_ind)
xd = data_frame.index
# make the x-axis float as a temporary fix, vispy can't handle Date labels
separate_chart = False
# axis properties
color_spec = cm.create_color_list(style, data_frame)
import matplotlib
bar_space = 0.2
bar_width = (1 - bar_space) / (no_of_bars)
bar_index = 0
separate_chart = False
if chart_type == 'surface':
# TODO
separate_chart = True
has_bar = 'no-bar'
if not (separate_chart):
# plot each series in the dataframe separately
for i in range(0, len(data_frame.columns)):
label = str(data_frame.columns[i])
glyph_name = 'glpyh' + str(i)
# set chart type which can differ for each time series
if isinstance(chart_type, list):
chart_type_ord = chart_type[i]
else:
chart_type_ord = chart_type
# get the color
if color_spec[i] is None:
color_spec[i] = self.get_color_list(i)
try:
color_spec[i] = matplotlib.colors.rgb2hex(color_spec[i])
except:
pass
yd = data_frame.iloc[:, i]
# plot each time series as appropriate line, scatter etc.
if chart_type_ord == 'line':
fig[0, 0].plot(np.array((xd, yd)).T, marker_size=0, color=color_spec[i])
# fig[0, 0].view.camera.set_range(x=(min_x, max_x))
# TODO
pass
elif (chart_type_ord == 'bar'):
# TODO
pass
elif (chart_type_ord == 'barh'):
# TODO
pass
elif chart_type_ord == 'scatter':
# TODO
pass
if style.silent_display:
pass
else:
if style.save_fig:
import vispy.io as io
io.write_png(style.file_output, fig.render())
fig.show(run=True)
# print(min_x); print(max_x)
# fig[0, 0].view.camera.set_range(x=(min_x, max_x))
def get_color_list(self, i):
color_palette = cc.bokeh_palette
return color_palette[i % len(color_palette)]
def generic_settings(self):
return
#######################################################################################################################
# matplotlib based libraries
from datetime import timedelta
import numpy as np
try:
import matplotlib
import matplotlib.pyplot as plt
except:
pass
try:
from mpl_toolkits.mplot3d import Axes3D # need to import in order to do 3D plots (even if not called)
except:
pass
# Later version of Pandas will need to register converters
try:
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
except:
pass
try:
from matplotlib.dates import YearLocator, MonthLocator, DayLocator, HourLocator, MinuteLocator
from matplotlib.ticker import MultipleLocator
except:
pass
class EngineMatplotlib(EngineTemplate):
def plot_chart(self, data_frame, style, chart_type):
self.apply_style_sheet(style)
if style.xkcd:
plt.xkcd()
# create figure & add a subplot
fig = plt.figure(figsize=((style.width * abs(style.scale_factor)) / style.dpi,
(style.height * abs(style.scale_factor)) / style.dpi), dpi=style.dpi)
# matplotlib 1.5
try:
cyc = matplotlib.rcParams['axes.prop_cycle']
color_cycle = [x['color'] for x in cyc]
except KeyError:
# pre 1.5
pass
# color_cycle = matplotlib.rcParams['axes.color_cycle']
cm = ColorMaster()
data_frame_list = self.split_data_frame_to_list(data_frame, style)
subplot_no = 1
first_ax = None
movie_frame = []
ordinal = 0
minz, maxz = self.get_max_min_dataframes(data_frame_list=data_frame_list)
for data_frame in data_frame_list:
bar_ind = np.arange(0, len(data_frame.index))
# for bar charts, create a proxy x-axis (then relabel)
xd, bar_ind, has_bar, no_of_bars = self.get_bar_indices(data_frame, style, chart_type, bar_ind)
try:
xd = xd.to_pydatetime()
except:
pass
ax, ax2, subplot_no, ordinal = self._create_subplot(fig, chart_type, style, subplot_no, first_ax, ordinal)
# for stacked bar
yoff_pos = np.zeros(len(data_frame.index.values)) # the bottom values for stacked bar chart
yoff_neg = np.zeros(len(data_frame.index.values)) # the bottom values for stacked bar chart
zeros = np.zeros(len(data_frame.index.values))
# for bar chart
bar_space = 0.2
bar_width = (1 - bar_space) / (no_of_bars)
bar_index = 0
try:
has_matrix = 'no'
if not (isinstance(chart_type, list)):
ax_temp = ax
# get all the correct colors (and construct gradients if necessary eg. from 'blues')
color = style.color
if style.color == []:
color = cc.chartfactory_default_colormap
else:
if isinstance(style.color, list):
color = style.color[subplot_no - 1]
if chart_type == 'heatmap':
ax_temp.set_frame_on(False)
# weird hack, otherwise comes out all inverted!
data_frame = data_frame.iloc[::-1]
if style.normalize_colormap:
movie_frame.append(
ax_temp.pcolor(data_frame.values, cmap=color, alpha=0.8, vmax=maxz, vmin=minz))
else:
movie_frame.append(ax_temp.pcolor(data_frame.values, cmap=color, alpha=0.8))
has_matrix = '2d-matrix'
elif chart_type == 'surface':
# TODO still very early alpha
X, Y = np.meshgrid(range(0, len(data_frame.columns)), range(0, len(data_frame.index)))
Z = data_frame.values
if style.normalize_colormap:
movie_frame.append(ax_temp.plot_surface(X, Y, Z, cmap=color, rstride=1, cstride=1,
vmax=maxz, vmin=minz))
else:
movie_frame.append(ax_temp.plot_surface(X, Y, Z, cmap=color, rstride=1, cstride=1))
has_matrix = '3d-matrix'
if (has_matrix == 'no'):
# Plot the lines (using custom palettes as appropriate)
color_spec = cm.create_color_list(style, data_frame)
# Some lines we should exclude from the color and use the default palette
for i in range(0, len(data_frame.columns.values)):
if isinstance(chart_type, list):
chart_type_ord = chart_type[i]
else:
chart_type_ord = chart_type
label = str(data_frame.columns[i])
ax_temp = self.get_axis(ax, ax2, label, style.y_axis_2_series)
yd = data_frame.iloc[:, i]
if color_spec[i] is None:
color_spec[i] = color_cycle[i % len(color_cycle)]
if (chart_type_ord == 'line'):
linewidth_t = self.get_linewidth(label,
style.linewidth, style.linewidth_2,
style.linewidth_2_series)
if linewidth_t is None: linewidth_t = matplotlib.rcParams['axes.linewidth']
movie_frame.append(ax_temp.plot(xd, yd, label=label, color=color_spec[i],
linewidth=linewidth_t), )
elif (chart_type_ord == 'bar'):
# for multiple bars we need to allocate space properly
bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(0, len(bar_ind))]
movie_frame.append(ax_temp.bar(bar_pos, yd, bar_width, label=label, color=color_spec[i]))
bar_index = bar_index + 1
elif (chart_type_ord == 'barh'):
# for multiple bars we need to allocate space properly
bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(0, len(bar_ind))]
movie_frame.append(ax_temp.barh(bar_pos, yd, bar_width, label=label, color=color_spec[i]))
bar_index = bar_index + 1
elif (chart_type_ord == 'stacked'):
bar_pos = [k - (1 - bar_space) / 2. + bar_index * bar_width for k in range(0, len(bar_ind))]
yoff = np.where(yd > 0, yoff_pos, yoff_neg)
movie_frame.append(ax_temp.bar(bar_pos, yd, label=label, color=color_spec[i], bottom=yoff))
yoff_pos = yoff_pos + np.maximum(yd, zeros)
yoff_neg = yoff_neg + np.minimum(yd, zeros)
# bar_index = bar_index + 1
elif (chart_type_ord == 'scatter'):
movie_frame.append(ax_temp.scatter(xd, yd, label=label, color=color_spec[i]))
if style.line_of_best_fit is True:
self.trendline(ax_temp, xd.values, yd.values, order=1, color=color_spec[i], alpha=1,
scale_factor=abs(style.scale_factor))
# format X axis
self.format_x_axis(ax_temp, data_frame, style, has_bar, bar_ind, bar_width, has_matrix)
except Exception as e:
pass
# print(str(e))
self._create_legend(ax, ax2, style)
try:
ax_temp.set_zlim(minz, maxz)
except:
pass
anim = None
# Should we animate the figure?
if style.animate_figure:
if style.animate_titles is None:
titles = range(1, len(data_frame_list) + 1)
else:
titles = style.animate_titles
# Initialization function: weirdly need to plot the last one (otherwise get ghosting!)
def init():
return [movie_frame[-1]]
def update(i):
fig.canvas.set_window_title(str(titles[i]))
return [movie_frame[i]]
import matplotlib.animation as animation
try:
anim = animation.FuncAnimation(plt.gcf(), update, interval=style.animate_frame_ms, blit=True,
frames=len(data_frame_list),
init_func=init, repeat=True)
except Exception as e:
print(str(e))
# fig.autofmt_xdate()
try:
style = self.generate_file_names(style, 'matplotlib')
if style.save_fig:
# TODO get save movie file to work in GIF and MP4 (hangs currently on these)
# install FFMPEG with: conda install --channel https://conda.anaconda.org/conda-forge ffmpeg
if style.animate_figure:
pass
file = style.file_output.upper()
# if '.GIF' in file:
# anim.save(style.file_output, writer='imagemagick', fps=5, dpi=80)
# print('GIF saved')
# FFwriter = animation.FFMpegWriter()
# plt.rcParams['animation.ffmpeg_path'] = 'c:\\ffmpeg\\bin\\ffmpeg.exe'
# Writer = animation.writers['ffmpeg']
# writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
# anim.save('test.mp4', writer=writer)
plt.savefig(style.file_output, transparent=False)
except Exception as e:
print(str(e))
####### various matplotlib converters are unstable
# convert to D3 format with mpld3
try:
# output matplotlib charts externally to D3 based libraries
import mpld3
if style.display_mpld3 == True:
mpld3.save_d3_html(fig, style.html_file_output)
mpld3.show(fig)
except:
pass
# FRAGILE! convert to Bokeh format
# better to use direct Bokeh renderer
try:
if (style.convert_matplotlib_to_bokeh == True):
from bokeh.plotting import output_file, show
from bokeh import mpl
output_file(style.html_file_output)
show(mpl.to_bokeh())
except:
pass
# FRAGILE! convert matplotlib chart to Plotly format
# recommend using AdapterCufflinks instead to directly plot to Plotly
try:
import plotly.plotly as py
import plotly
import plotly.tools as tls
if style.convert_matplotlib_to_plotly == True:
plotly.tools.set_credentials_file(username=style.plotly_username,
api_key=style.plotly_api_key)
py_fig = tls.mpl_to_plotly(fig, strip_style=True)
plot_url = py.plot_mpl(py_fig, filename=style.plotly_url)
except:
pass
# display in matplotlib window (or clear from pyplot)
try:
if cc.chartfactory_silent_display == True:
plt.close(fig)
return fig
elif style.silent_display == False:
if not (style.block_new_plots):
# TODO
pass
plt.show()
else:
plt.close(fig)
return fig
except:
pass
def apply_style_sheet(self, style):
# set the matplotlib style sheet & defaults
matplotlib.rcdefaults()
# first search ChartPy styles, then try matplotlib
try:
plt.style.use(cc.chartfactory_style_sheet[style.style_sheet])
except:
plt.style.use(style.style_sheet)
# adjust font size for scale factor
matplotlib.rcParams.update({'font.size': matplotlib.rcParams['font.size'] * abs(style.scale_factor)})
# do not use offsets/scientific notation
matplotlib.rcParams.update({'axes.formatter.useoffset': False})
def format_x_axis(self, ax, data_frame, style, has_bar, bar_ind, bar_width, has_matrix):
if has_matrix == '2d-matrix' or has_matrix == '3d-matrix':
x_bar_ind = np.arange(0, len(data_frame.columns))
y_bar_ind = np.arange(0, len(data_frame.index))
offset = 0.5
ax.set_xticks(x_bar_ind + offset)
ax.set_xlim([0, len(x_bar_ind)])
ax.set_yticks(y_bar_ind + offset)
ax.set_ylim([0, len(y_bar_ind)])
plt.setp(plt.yticks()[1], rotation=90)
ax.set_xticklabels(data_frame.columns, minor=False)
ax.set_yticklabels(data_frame.index, minor=False)
ax.plot([], [])
for x in range(len(data_frame.index)):
for y in range(len(data_frame.columns)):
plt.text(x + offset, y + offset, '%.0f' % data_frame.iloc[x, y],
horizontalalignment='center',
verticalalignment='center',
)
return
if has_bar == 'barv':
if matplotlib.__version__ > '1.9':
offset = bar_width / 2.0 # for matplotlib 2
else:
offset = 0
ax.set_xticks(bar_ind - offset)
ax.set_xticklabels(data_frame.index)
ax.set_xlim([-1, len(bar_ind)])
# if lots of labels make text smaller and rotate
if len(bar_ind) > 6:
plt.setp(plt.xticks()[1], rotation=90)
# plt.gca().tight_layout()
# matplotlib.rcParams.update({'figure.autolayout': True})
# plt.gcf().subplots_adjust(bottom=5)
import matplotlib.dates as mdates
if style.date_formatter is not None:
myFmt = mdates.DateFormatter(style.date_formatter)
plt.tight_layout()
# ax.tick_params(axis='x', labelsize=matplotlib.rcParams['font.size'] * 0.5)
return
elif has_bar == 'barh':
ax.set_yticks(bar_ind)
ax.set_yticklabels(data_frame.index)
ax.set_ylim([-1, len(bar_ind)])
# if lots of labels make text smaller and rotate
if len(bar_ind) > 6:
# plt.setp(plt.yticks()[1])
# plt.gca().tight_layout()
# matplotlib.rcParams.update({'figure.autolayout': True})
# plt.gcf().subplots_adjust(bottom=5)
import matplotlib.dates as mdates
if style.date_formatter is not None:
ax.format_ydata = mdates.DateFormatter(style.date_formatter)
plt.tight_layout()
# ax.tick_params(axis='x', labelsize=matplotlib.rcParams['font.size'] * 0.5)
return
# format X axis
dates = data_frame.index
# scaling for time series plots with hours and minutes only (and no dates)
if hasattr(data_frame.index[0], 'hour') and not (hasattr(data_frame.index[0], 'month')):
ax.xaxis.set_major_locator(MultipleLocator(86400. / 3.))
ax.xaxis.set_minor_locator(MultipleLocator(86400. / 24.))
ax.grid(b=style.x_axis_showgrid, which='minor', color='w', linewidth=0.5)
# TODO have more refined way of formating time series x-axis!
# scaling for time series plots with dates too
else:
# to handle dates
try:
dates = dates.to_pydatetime()
diff = data_frame.index[-1] - data_frame.index[0]
import matplotlib.dates as md
if style.date_formatter is not None:
# from matplotlib.ticker import Formatter
#
# class MyFormatter(Formatter):
# def __init__(self, dates, fmt='%H:%M'):
# self.dates = dates
# self.fmt = fmt
#
# def __call__(self, x, pos=0):
# 'Return the label for time x at position pos'
# ind = int(round(x))
# if ind >= len(self.dates) or ind < 0: return ''
#
# return self.dates[ind].strftime(self.fmt)
#
# formatter = MyFormatter(dates)
# ax.xaxis.set_major_formatter(formatter)
ax.xaxis.set_major_formatter(md.DateFormatter(style.date_formatter))
elif diff < timedelta(days=4):
date_formatter = '%H:%M'
xfmt = md.DateFormatter(date_formatter)
ax.xaxis.set_major_formatter(xfmt)
if diff < timedelta(minutes=20):
ax.xaxis.set_major_locator(MinuteLocator(byminute=range(60), interval=2))
ax.xaxis.set_minor_locator(MinuteLocator(interval=1))
elif diff < timedelta(hours=1):
ax.xaxis.set_major_locator(MinuteLocator(byminute=range(60), interval=5))
ax.xaxis.set_minor_locator(MinuteLocator(interval=2))
elif diff < timedelta(hours=6):
locator = HourLocator(interval=1)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_minor_locator(MinuteLocator(interval=30))
elif diff < timedelta(days=3):
ax.xaxis.set_major_locator(HourLocator(interval=6))
ax.xaxis.set_minor_locator(HourLocator(interval=1))
elif diff < timedelta(days=10):
locator = DayLocator(interval=2)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(md.DateFormatter('%d %b %y'))
day_locator = DayLocator(interval=1)
ax.xaxis.set_minor_locator(day_locator)
elif diff < timedelta(days=40):
locator = DayLocator(interval=10)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(md.DateFormatter('%d %b %y'))
day_locator = DayLocator(interval=1)
ax.xaxis.set_minor_locator(day_locator)
elif diff < timedelta(days=365 * 0.5):
locator = MonthLocator(bymonthday=1, interval=2)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(md.DateFormatter('%b %y'))
months_locator = MonthLocator(interval=1)
ax.xaxis.set_minor_locator(months_locator)
elif diff < timedelta(days=365 * 2):
locator = MonthLocator(bymonthday=1, interval=3)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(md.DateFormatter('%b %y'))
months_locator = MonthLocator(interval=1)
ax.xaxis.set_minor_locator(months_locator)
elif diff < timedelta(days=365 * 5):
locator = YearLocator()
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(md.DateFormatter('%Y'))
else:
years = floor(diff.days / 365.0 / 5.0)
locator = YearLocator(years)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(md.DateFormatter('%Y'))
if matplotlib.__version__ > '1.9':
max = dates.max()
min = dates.min()
plt.xlim(min, max)
except:
try:
# otherwise we have integers, rather than dates
# TODO needs smarter more generalised mapping of dates
max = dates.max()
min = dates.min()
big_step = self.round_to_1((max - min) / 10)
small_step = big_step / 5
ax.xaxis.set_major_locator(MultipleLocator(big_step))
ax.xaxis.set_minor_locator(MultipleLocator(small_step))
plt.xlim(min, max)
except:
pass
def get_axis(self, ax, ax2, label, y_axis_2_series):
if label in y_axis_2_series: return ax2
return ax
def trendline(self, ax, xd, yd, order=1, color='red', alpha=1, Rval=False, scale_factor=1):
""" Make a line of best fit """
# Calculate trendline
xd[np.isnan(xd)] = 0
yd[np.isnan(yd)] = 0
coeffs = np.polyfit(xd, yd, order)
intercept = coeffs[-1]
slope = coeffs[-2]
if order == 2:
power = coeffs[0]
else:
power = 0
minxd = np.min(xd)
maxxd = np.max(xd)
xl = np.array([minxd, maxxd])
yl = power * xl ** 2 + slope * xl + intercept
# plot trendline
ax.plot(xl, yl, color=color, alpha=alpha)
# calculate R squared
p = np.poly1d(coeffs)
ybar = np.sum(yd) / len(yd)
ssreg = np.sum((p(xd) - ybar) ** 2)
sstot = np.sum((yd - ybar) ** 2)
Rsqr = ssreg / sstot
if Rval == False:
text = 'R^2 = %0.2f, m = %0.4f, c = %0.4f' % (Rsqr, slope, intercept)
ax.annotate(text, xy=(1, 1), xycoords='axes fraction', fontsize=8 * abs(scale_factor),
xytext=(-5 * abs(scale_factor), 10 * abs(scale_factor)), textcoords='offset points',
ha='right', va='top')
# Plot R^2 value
# ax.text(0.65, 0.95, text, fontsize = 10 * scale_factor,
# ha= 'left',
# va = 'top', transform = ax.transAxes)
pass
else:
# return the R^2 value:
return Rsqr
def _create_brand_label(self, ax, anno, scale_factor):
ax.annotate(anno, xy=(1, 1), xycoords='axes fraction',
fontsize=10 * abs(scale_factor), color='white',
xytext=(0 * abs(scale_factor), 15 * abs(scale_factor)), textcoords='offset points',
va="center", ha="center",
bbox=dict(boxstyle="round,pad=0.0", facecolor=cc.chartfactory_brand_color))
def _create_subplot(self, fig, chart_type, style, subplot_no, first_ax, ordinal):
if style.title is not None:
fig.suptitle(style.title, fontsize=14 * abs(style.scale_factor))
chart_projection = '2d'
if not (isinstance(chart_type, list)):
if chart_type == 'surface': chart_projection = '3d'
if style.subplots == False and first_ax is None:
if chart_projection == '3d':
ax = fig.add_subplot(111, projection=chart_projection)
else:
ax = fig.add_subplot(111)
else:
if first_ax is None:
if chart_projection == '3d':
ax = fig.add_subplot(2, 1, subplot_no, projection=chart_projection)
else:
ax = fig.add_subplot(2, 1, subplot_no)
first_ax = ax
if style.share_subplot_x:
if chart_projection == '3d':
ax = fig.add_subplot(2, 1, subplot_no, sharex=first_ax, projection=chart_projection)
else:
ax = fig.add_subplot(2, 1, subplot_no, sharex=first_ax)
else:
if chart_projection == '3d':
ax = fig.add_subplot(2, 1, subplot_no, projection=chart_projection)
else:
ax = fig.add_subplot(2, 1, subplot_no)
subplot_no = subplot_no + 1
if style.x_title != '': ax.set_xlabel(style.x_title)
if style.y_title != '': ax.set_ylabel(style.y_title)
plt.xlabel(style.x_title)
plt.ylabel(style.y_title)
# format Y axis
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(y_formatter)
# create a second y axis if necessary
ax2 = []
ax.xaxis.grid(style.x_axis_showgrid)
ax.yaxis.grid(style.y_axis_showgrid)
if style.y_axis_2_series != []:
ax2 = ax.twinx()
# set grid for second y axis
ax2.yaxis.grid(style.y_axis_2_showgrid)
return ax, ax2, subplot_no, ordinal + 1
def _create_legend(self, ax, ax2, style):
if style.display_source_label == True and style.source is not None:
ax.annotate('Source: ' + style.source, xy=(1, 0), xycoords='axes fraction',
fontsize=7 * abs(style.scale_factor),
xytext=(-5 * abs(style.scale_factor), 10 * abs(style.scale_factor)), textcoords='offset points',
ha='right', va='top', color=style.source_color)
if style.display_brand_label == True:
self._create_brand_label(ax, anno=style.brand_label, scale_factor=abs(style.scale_factor))
leg = []
leg2 = []
loc = 'best'
# if we have two y-axis then make sure legends are in opposite corners
if ax2 != []: loc = 2
try:
leg = ax.legend(loc=loc, prop={'size': 10 * abs(style.scale_factor)})
leg.get_frame().set_linewidth(0.0)
leg.get_frame().set_alpha(0)
if ax2 != []:
leg2 = ax2.legend(loc=1, prop={'size': 10 * abs(style.scale_factor)})
leg2.get_frame().set_linewidth(0.0)
leg2.get_frame().set_alpha(0)
except:
pass
try:
if style.display_legend is False:
if leg != []: leg.remove()
if leg2 != []: leg.remove()
except:
pass
#######################################################################################################################
cf = None
try:
import plotly # JavaScript based plotting library with Python connector
import plotly.offline as py_offline
import cufflinks as cf
plotly.tools.set_config_file(plotly_domain='https://type-here.com',
world_readable=cc.plotly_world_readable,
sharing=cc.plotly_sharing)
except:
pass
# Requires plotly 4
try:
from plotly.graph_objects import Figure
import plotly.graph_objects as go
except:
pass
# For online plotting (optional)
try:
import chart_studio.plotly as py_online
except:
pass
try:
import base64
# plotly.utils.memoize = memoize
except:
pass
class EnginePlotly(EngineTemplate):
def start_orca(self, path=None):
# orca is a seperate plotly application for converting Plotly figures to PNG format
# Note, now recommended to use kaleido https://github.com/plotly/Kaleido, which is
# much easier to install
if path is not None:
plotly.io.orca.config.executable = path
plotly.io.orca.ensure_server()
def plot_chart(self, data_frame, style, chart_type):
# Special case if we have a pre-created Plotly object
if isinstance(data_frame, Figure):
return self.publish_plot(data_frame, style)
mode = 'lines'
if style is None: style = Style()
marker_size = 1
x = '';
y = '';
z = ''
scale = 1
try:
# Adjust sizing if offline_html format
if (style.plotly_plot_mode == 'offline_html' and style.scale_factor > 0):
scale = float(2.0 / 3.0)
except:
pass
# Check other plots implemented by Cufflinks
cm = ColorMaster()
# Create figure
data_frame_list = self.split_data_frame_to_list(data_frame, style)
fig_list = []
cols = []
# If we provide a list of Figures this will get ignored
try:
for data_frame in data_frame_list:
cols.append(data_frame.columns)
cols = list(numpy.array(cols).flat)
# Get all the correct colors (and construct gradients if necessary eg. from 'Blues')
# need to change to strings for cufflinks
color_list = cm.create_color_list(style, [], cols=cols)
color_spec = []
# If no colors are specified then just use our default color set from chart constants
if color_list == [None] * len(color_list):
color_spec = [None] * len(color_list)
for i in range(0, len(color_list)):
# Get the color
if color_spec[i] is None:
color_spec[i] = self.get_color_list(i)
try:
color_spec[i] = matplotlib.colors.rgb2hex(color_spec[i])
except:
pass
else:
# Otherwise assume all the colors are rgba
for color in color_list:
color = 'rgba' + str(color)
color_spec.append(color)
except Exception as e:
pass
start = 0
title_list = style.title
if not(isinstance(title_list, list)):
title_list = [style.title] * len(data_frame_list)
# Go through each data_frame in the list and plot
for i in range(0, len(data_frame_list)):
data_frame = data_frame_list[i]
title = title_list[i]
if isinstance(data_frame, Figure):
fig = data_frame
else:
if style.drop_na:
data_frame = data_frame.dropna()
if isinstance(chart_type, list):
chart_type_ord = chart_type[i]
else:
chart_type_ord = chart_type
end = start + len(data_frame.columns)
color_spec1 = color_spec[start:start + end]
start = end
# Special call for choropleth (uses Plotly API directly)
# Special case for map/choropleth which has yet to be implemented in Cufflinks
# will likely remove this in the future
if chart_type_ord == 'choropleth':
for col in data_frame.columns:
try:
data_frame[col] = data_frame[col].astype(str)
except:
pass
if style.color != []:
color = style.color
else:
color = [[0.0, 'rgb(242,240,247)'], [0.2, 'rgb(218,218,235)'], [0.4, 'rgb(188,189,220)'], \
[0.6, 'rgb(158,154,200)'], [0.8, 'rgb(117,107,177)'], [1.0, 'rgb(84,39,143)']]
text = ''
if 'text' in data_frame.columns:
text = data_frame['Text']
data = [dict(
type='choropleth',
colorscale=color,
autocolorscale=False,
locations=data_frame['Code'],
z=data_frame[style.plotly_choropleth_field].astype(float),
locationmode=style.plotly_location_mode,
text=text,
marker=dict(
line=dict(
color='rgb(255,255,255)',
width=1
)
),
colorbar=dict(
title=style.units
)
)]
layout = dict(
title=title,
geo=dict(
scope=style.plotly_scope,
projection=dict(type=style.plotly_projection),
showlakes=True,
lakecolor='rgb(255, 255, 255)',
),
)
fig = dict(data=data, layout=layout)
# Otherwise underlying Cufflinks library underneath
elif style.plotly_helper == 'cufflinks':
# NOTE: we use cufflinks library, which simplifies plotting DataFrames in plotly
if chart_type_ord == 'surface':
fig = data_frame.iplot(kind=chart_type,
title=title,
xTitle=style.x_title,
yTitle=style.y_title,
zTitle=style.z_title,
x=x, y=y, z=z,
mode=mode,
size=marker_size,
sharing=style.plotly_sharing,
theme=style.plotly_theme,
bestfit=style.line_of_best_fit,
legend=style.display_legend,
colorscale=style.color,
dimensions=(style.width * abs(style.scale_factor) * scale,
style.height * abs(style.scale_factor) * scale),
asFigure=True)
# Setting axis is different with a surface
if style.x_axis_range is not None:
fig.update_layout(scene=dict(xaxis=dict(range=style.x_axis_range)))
if style.y_axis_range is not None:
fig.update_layout(scene=dict(xaxis=dict(range=style.y_axis_range)))
if style.z_axis_range is not None:
fig.update_layout(scene=dict(xaxis=dict(range=style.z_axis_range)))
elif chart_type_ord == 'heatmap':
fig = data_frame.iplot(kind=chart_type,
title=title,
xTitle=style.x_title,
yTitle=style.y_title,
x=x, y=y,
mode=mode,
size=marker_size,
sharing=style.plotly_sharing,
theme=style.plotly_theme,
bestfit=style.line_of_best_fit,
legend=style.display_legend,
colorscale=style.color,
dimensions=(style.width * abs(style.scale_factor) * scale,
style.height * abs(style.scale_factor) * scale),
asFigure=True)
# Otherwise we have a line plot (or similar such as a scatter plot, or bar chart etc)
else:
full_line = style.connect_line_gaps
if chart_type_ord == 'line':
full_line = True
# chart_type_ord = 'scatter'
mode = 'lines'
elif chart_type_ord in ['dash', 'dashdot', 'dot']:
chart_type_ord = 'scatter'
elif chart_type_ord == 'line+markers':
full_line = True
chart_type_ord = 'line'
mode = 'lines+markers'
marker_size = 5
elif chart_type_ord == 'scatter':
mode = 'markers'
marker_size = 5
elif chart_type_ord == 'bubble':
chart_type_ord = 'scatter'
mode = 'markers'
# TODO check this!
# Can have issues calling cufflinks with a theme which is None, so split up the cases
if style.plotly_theme is None:
plotly_theme = 'pearl'
else:
plotly_theme = style.plotly_theme
m = 0
y_axis_2_series = [x for x in style.y_axis_2_series if x in data_frame.columns]
vspan = None
if style.x_shade_dates is not None:
vspan = {'x0': data_frame.index[0].strftime("%Y-%m-%d"),
'x1': data_frame.index[-1].strftime("%Y-%m-%d"), 'color': 'rgba(30,30,30,0.3)',
'fill': True, 'opacity': .4}
# Sometimes Plotly has issues generating figures in dash, so if fails first, try again
while m < 10:
if True:
if vspan is None:
fig = data_frame.iplot(kind=chart_type_ord,
title=title,
xTitle=style.x_title,
yTitle=style.y_title,
x=x, y=y, z=z,
subplots=False,
sharing=style.plotly_sharing,
mode=mode,
secondary_y=y_axis_2_series,
size=marker_size,
theme=plotly_theme,
colorscale='dflt',
bestfit=style.line_of_best_fit,
legend=style.display_legend,
width=style.linewidth,
color=color_spec1,
dimensions=(style.width * abs(style.scale_factor) * scale,
style.height * abs(style.scale_factor) * scale),
asFigure=True)
else:
fig = data_frame.iplot(kind=chart_type_ord,
title=title,
xTitle=style.x_title,
yTitle=style.y_title,
x=x, y=y, z=z,
subplots=False,
sharing=style.plotly_sharing,
mode=mode,
secondary_y=y_axis_2_series,
size=marker_size,
theme=plotly_theme,
colorscale='dflt',
bestfit=style.line_of_best_fit,
legend=style.display_legend,
width=style.linewidth,
color=color_spec1,
dimensions=(style.width * abs(style.scale_factor) * scale,
style.height * abs(style.scale_factor) * scale),
vspan=vspan,
asFigure=True)
m = 10;
break
#except Exception as e:
print("Will attempt to re-render: " + str(e))
import time
time.sleep(0.3)
m = m + 1
# For lines set the property of connectgaps (cannot specify directly in cufflinks)
if full_line:
for z in range(0, len(fig['data'])):
fig['data'][z].connectgaps = style.connect_line_gaps
for k in range(0, len(fig['data'])):
if full_line:
fig['data'][k].connectgaps = style.connect_line_gaps
if style.line_shape != None:
if isinstance(style.line_shape, str):
line_shape = [style.line_shape] * len(fig['data'])
else:
line_shape = style.line_shape
for k in range(0, len(fig['data'])):
fig['data'][k].line.shape = line_shape[k]
if style.plotly_webgl:
for k in range(0, len(fig['data'])):
if fig['data'][k].type == 'scatter':
fig['data'][k].type = 'scattergl'
if style.stackgroup is not None:
if isinstance(style.stackgroup, list):
stackgroup = style.stackgroup
else:
stackgroup = ['A'] * len(fig['data'])
for k in range(0, len(fig['data'])):
fig['data'][k].stackgroup = stackgroup[k]
# Use plotly express (not implemented yet)
elif style.plotly_helper == 'plotly_express':
# TODO
pass
# Common properties
# Override other properties, which cannot be set directly by cufflinks/or you want to reset later
if style.title is not None:
try:
fig.update_layout(title=style.title)
except:
pass
# Add second y axis title
if style.y_2_title is not None:
if style.y_2_title != '':
try:
fig['layout'].update(yaxis2=dict(title=style.y_2_title))
except:
pass
if style.x_axis_range is not None:
try:
fig['layout'].update(xaxis=dict(range=style.x_axis_range, autorange=False))
except:
pass
if style.y_axis_range is not None:
try:
fig['layout'].update(yaxis=dict(range=style.y_axis_range, autorange=False))
except:
pass
if style.y_axis_2_range is not None:
try:
fig['layout'].update(yaxis2=dict(range=style.y_axis_2_range, autorange=False))
except:
pass
if style.z_axis_range is not None:
try:
fig['layout'].update(zaxis=dict(range=style.z_axis_range, autorange=False))
except:
pass
if style.font_family is not None:
try:
fig.update_layout(font_family=style.font_family)
except:
pass
if style.x_axis_type is not None:
try:
fig.update_xaxes(type=style.x_axis_type)
except:
pass
if style.y_axis_type is not None:
try:
fig.update_yaxes(type=style.y_axis_type)
except:
pass
if style.x_dtick is not None:
try:
fig.update_layout(xaxis=dict(tickmode='linear', dtick=style.x_dtick))
except:
pass
if style.y_dtick is not None:
try:
fig.update_layout(yaxis=dict(tickmode='linear', dtick=style.y_dtick))
except:
pass
# Add shaded regions
fig = self._multi_shade(fig, style)
# Legend Properties
if style.legend_x_anchor is not None:
try: fig.update_layout(legend=dict(xanchor=style.legend_x_anchor))
except: pass
if style.legend_y_anchor is not None:
try: fig.update_layout(legend=dict(yanchor=style.legend_y_anchor))
except: pass
if style.legend_x_pos is not None:
try: fig.update_layout(legend=dict(x=style.legend_x_pos))
except: pass
if style.legend_y_pos is not None:
try: fig.update_layout(legend=dict(y=style.legend_y_pos))
except: pass
if style.legend_bgcolor is not None:
try: fig.update_layout(legend=dict(bgcolor=style.legend_bgcolor))
except: pass
if style.legend_orientation is not None:
try: fig.update_layout(legend=dict(orientation=style.legend_orientation))
except: pass
if style.barmode is not None:
try: fig.update_layout(barmode=style.barmode)
except: pass
fig_list.append(fig)
#### Plotted all the lines
# Create subplots if more than one figure
if len(fig_list) > 1 and style.animate_figure == False and style.subplots == True:
from plotly.subplots import make_subplots
fig = make_subplots(rows=len(fig_list), cols=1)
# layout = fig_list[0]['layout']
# fig.layout = fig_list[0].layout
# for k, v in list(layout.items()):
# if 'xaxis' not in k and 'yaxis' not in k:
# fig['layout'].update({k: v})
for i, f in enumerate(fig_list):
fig.append_trace(f.data[0], row=i+1, col=1)
#fig = cf.subplots(fig_list, base_layout=fig_list[0].to_dict()['layout'], shape=(len(fig_list), 1),
# shared_xaxes=False, shared_yaxes=False)
if not(isinstance(style.title, list)):
fig['layout'].update(title=style.title)
fig['layout'].update(width=style.width * abs(style.scale_factor))
fig['layout'].update(height=style.height * abs(style.scale_factor))
elif style.animate_figure:
fig = fig_list[0]
# Add buttons to play/pause
fig["layout"]["updatemenus"] = [
{
"buttons": [
{
"args": [None, {"frame": {"duration": style.animate_frame_ms, "redraw": True},
"fromcurrent": True, "transition": {"duration": style.animate_frame_ms,
"easing": "quadratic-in-out"}}],
"label": "Play",
"method": "animate"
},
{
"args": [[None], {"frame": {"duration": 0, "redraw": True},
"mode": "immediate",
"transition": {"duration": 0}}],
"label": "Pause",
"method": "animate"
}
],
"direction": "left",
"pad": {"r": 10, "t": 87},
"showactive": False,
"type": "buttons",
"x": 0.1,
"xanchor": "right",
"y": 0,
"yanchor": "top"
}
]
if style.animate_titles is not None:
animate_titles = style.animate_titles
else:
animate_titles = list(range(0, len(fig_list)))
# Add an animation frame for each data frame
frames = []
for fig_temp, title_temp in zip(fig_list, animate_titles):
frames.append(go.Frame(data=fig_temp['data'],
name=str(title_temp),
layout=go.Layout(title=str(title_temp))))
fig.update(frames=frames)
# Add a slider, with the frame labels
sliders_dict = {
"active": 0,
"yanchor": "top",
"xanchor": "left",
"currentvalue": {
"visible": True,
"xanchor": "right"
},
"transition": {"duration": style.animate_frame_ms, "easing": "cubic-in-out"},
"pad": {"b": 10, "t": 50},
"len": 0.9,
"x": 0.1,
"y": 0,
"steps": []
}
for i in range(0, len(fig_list)):
slider_step = {"args": [
[animate_titles[i]],
{"frame" : {"duration": style.animate_frame_ms, "redraw": True},
"mode" : "immediate",
"transition" : {"duration": style.animate_frame_ms}}
],
"label" : str(animate_titles[i]),
"method" : "animate"}
sliders_dict["steps"].append(slider_step)
fig["layout"]["sliders"] = [sliders_dict]
#else:
# Add an animation frame for each data frame
# fig.update(frames=[go.Frame(data=fig_temp['data']) for fig_temp in fig_list])
else:
fig = fig_list[0]
fig.update(dict(layout=dict(legend=dict(
x=0.05,
y=1
))))
# Adjust margins
if style.thin_margin:
fig.update(dict(layout=dict(margin=go.layout.Margin(
l=20,
r=20,
b=40,
t=40,
pad=0
))))
# Change background color
fig.update(dict(layout=dict(paper_bgcolor='rgba(0,0,0,0)')))
fig.update(dict(layout=dict(plot_bgcolor='rgba(0,0,0,0)')))
# Deal with grids
if (not(style.x_axis_showgrid)): fig.update(dict(layout=dict(xaxis=dict(showgrid=style.x_axis_showgrid))))
if (not(style.y_axis_showgrid)): fig.update(dict(layout=dict(yaxis=dict(showgrid=style.y_axis_showgrid))))
if (not(style.y_axis_2_showgrid)): fig.update(
dict(layout=dict(yaxis2=dict(showgrid=style.y_axis_2_showgrid))))
# Override properties, which cannot be set directly by cufflinks
# For the type of line (ie. line or scatter)
# For making the lined dashed, dotted etc.
if style.subplots == False and isinstance(chart_type, list):
for j in range(0, len(fig['data'])):
mode = None;
dash = None;
line_shape = None;
if chart_type[j] == 'line':
mode = 'lines'
elif chart_type[j] == 'line+markers':
mode = 'lines+markers'
elif chart_type[j] == 'scatter':
mode = 'markers'
elif chart_type[j] in ['dash', 'dashdot', 'dot']:
dash = chart_type[j]
mode = 'lines'
elif chart_type[j] in ['hv', 'vh', 'vhv', 'spline', 'linear']:
line_shape = chart_type[j]
mode = 'lines'
elif chart_type[j] == 'bubble':
mode = 'markers'
bubble_series = style.bubble_series[cols[j]]
bubble_series = bubble_series.fillna(0)
# dash = chart_type[j]
# data_frame[bubble_series.name] = bubble_series
scale = float(bubble_series.max())
fig['data'][j].marker.size = \
(style.bubble_size_scalar * (bubble_series.values / scale)).tolist()
if mode is not None:
fig['data'][j].mode = mode
if dash is not None:
fig['data'][j].line.dash = dash
if line_shape is not None:
fig['data'][j].line.shape = line_shape
# If candlestick specified add that (needed to be appended on top of the Plotly figure's data
if style.candlestick_series is not None and not(style.plotly_webgl):
# self.logger.debug("About to create candlesticks")
if isinstance(style.candlestick_series, Figure):
fig_candle = style.candlestick_series
else:
# from plotly.tools import FigureFactory as FF
fig_candle = create_candlestick(style.candlestick_series['open'],
style.candlestick_series['high'],
style.candlestick_series['low'],
style.candlestick_series['close'],
dates=style.candlestick_series['close'].index
)
if style.candlestick_increasing_color is not None:
# Increasing
fig_candle['data'][0].fillcolor = cm.get_color_code(style.candlestick_increasing_color)
fig_candle['data'][0].line.color = cm.get_color_code(style.candlestick_increasing_line_color)
if style.candlestick_decreasing_color is not None:
# Decreasing
fig_candle['data'][1].fillcolor = cm.get_color_code(style.candlestick_decreasing_color)
fig_candle['data'][1].line.color = cm.get_color_code(style.candlestick_decreasing_line_color)
try:
# Append the data to the existing Plotly figure, plotted earlier
fig.data.append(fig_candle.data[0]);
fig.data.append(fig_candle.data[1])
except:
# Later version of Plotly
fig.add_trace(fig_candle.data[0])
fig.add_trace(fig_candle.data[1])
# Overlay other Plotly figures on top of
if style.overlay_fig is not None:
for d in style.overlay_fig.data:
fig.add_trace(d)
x_y_line_list = []
# fig.layout.yrange
# add x-line:
for x_y_line in style.x_y_line:
start = x_y_line[0]
finish = x_y_line[1]
x_y_line_list.append(
{
'type': 'line',
'x0': start[0],
'y0': start[1],
'x1': finish[0],
'y1': finish[1],
'line': {
'color': 'black',
'width': 0.5,
'dash': 'dot',
},
}
)
# x_y_line_list = [{
# 'type': 'line',
# 'x0': 1,
# 'y0': 0,
# 'x1': 1,
# 'y1': 2,
# 'line': {
# 'color': 'rgb(55, 128, 191)',
# 'width': 3,
# },
# }]
if len(x_y_line_list) > 0:
fig.layout.shapes = x_y_line_list
# publish the plot (depending on the output mode eg. to HTML file/Jupyter notebook)
# also return as a Figure object for plotting by a web server app (eg. Flask/Dash)
return self.publish_plot(fig, style)
def _multi_shade(self, fig, style):
""" Adds shaded areas for specified dates in a plotly plot.
The lines of the areas are set to transparent using rgba(0,0,0,0)
"""
import copy
if style.x_shade_dates is None:
return fig
if isinstance(style.x_shade_dates, list):
x0 = style.x_shade_dates[0]
x1 = style.x_shade_dates[1]
else:
x0 = list(style.x_shade_dates.keys())
x1 = list(style.x_shade_dates.values())
# Get dict from tuple made by vspan()
x_elem = fig['layout']['shapes'][0]
# Container (list) for dicts / shapes
shp_lst = []
# Make dicts according to x0 and X1
# and edit elements of those dicts
for i in range(0,len(x0)):
shp_lst.append(copy.deepcopy(x_elem))
shp_lst[i]['x0'] = x0[i]
shp_lst[i]['x1'] = x1[i]
shp_lst[i]['line']['color'] = 'rgba(0,0,0,0)'
# Replace shape in fig with multiple new shapes
fig['layout']['shapes']= tuple(shp_lst)
return fig
def publish_plot(self, fig, style):
# change background color
fig.update(dict(layout=dict(paper_bgcolor='rgba(0,0,0,0)')))
fig.update(dict(layout=dict(plot_bgcolor='rgba(0,0,0,0)')))
if style is None: style = Style()
style = self.generate_file_names(style, 'plotly')
if style.plotly_plot_mode == 'dash':
pass
elif style.plotly_plot_mode == 'online':
plotly.tools.set_credentials_file(username=style.plotly_username, api_key=style.plotly_api_key)
py_online.plot(fig, filename=style.plotly_url,
world_readable=style.plotly_world_readable,
auto_open=not (style.silent_display),
asImage=style.plotly_as_image)
elif style.plotly_plot_mode == 'offline_html':
py_offline.plot(fig, filename=style.html_file_output, auto_open=not(style.silent_display))
elif style.plotly_plot_mode == 'offline_png':
# Needs orca
fig.write_image(style.file_output)
elif style.plotly_plot_mode == 'offline_embed_js_div':
return py_offline.plot(fig, include_plotlyjs=True, output_type='div') # HTML string
elif style.plotly_plot_mode == 'offline_div':
return py_offline.plot(fig, include_plotlyjs=False, output_type='div') # HTML string
elif style.plotly_plot_mode == 'offline_image_png_bytes':
return plotly.io.to_image(fig, format='png') # PNG as bytes
elif style.plotly_plot_mode == 'offline_image_png_in_html':
return '<img src="data:image/png;base64,' + \
base64.b64encode(plotly.io.to_image(fig, format='png')).decode(
'utf8') + '">' # PNG as bytes in HTML image
elif style.plotly_plot_mode == 'offline_image_svg_in_html':
return '<img src="data:image/svg;base64,' + \
base64.b64encode(plotly.io.to_image(fig, format='svg')).decode(
'utf8') + '">' # SVG as bytes in HTML image
# can display in HTML as <img src="data:image/png;base64,[ENCODED STRING GOES HERE]">
elif style.plotly_plot_mode == 'offline_jupyter':
# plot in IPython notebook
py_offline.init_notebook_mode()
py_offline.iplot(fig)
elif style.plotly_plot_mode == 'offline_jupyter_connected':
# plot in IPython notebook
py_offline.init_notebook_mode(connected=True)
py_offline.iplot(fig)
# plotly.offline.plot(fig, filename=style.file_output, format='png',
# width=style.width * style.scale_factor, height=style.height * style.scale_factor)
elif style.plotly_plot_mode != 'dash':
try:
py_online.image.save_as(fig, filename=style.file_output, format='png',
width=style.width * abs(style.scale_factor),
height=style.height * abs(style.scale_factor))
except:
pass
return fig
def get_color_list(self, i):
color_palette = cc.plotly_palette
return color_palette[i % len(color_palette)]
#######################################################################################################################
# Create color lists to be used in plots
class ColorMaster(object):
def create_color_list(self, style, data_frame, cols=None):
if cols is None:
cols = data_frame.columns
# Get all the correct colors (and construct gradients if necessary eg. from 'blues')
color = self.construct_color(style, 'color', len(cols) - len(style.color_2_series))
color_2 = self.construct_color(style, 'color_2', len(style.color_2_series))
return self.assign_color(cols, color, color_2,
style.exclude_from_color, style.color_2_series)
def construct_color(self, style, color_field_name, no_of_entries):
color = []
if hasattr(style, color_field_name):
if isinstance(getattr(style, color_field_name), list):
color = getattr(style, color_field_name, color)
else:
try:
color = self.create_colormap(no_of_entries, getattr(style, color_field_name))
except:
pass
return color
def exclude_from_color(self, style):
if not (isinstance(style.exclude_from_color, list)):
style.exclude_from_color = [style.exclude_from_color]
exclude_from_color = [str(x) for x in style.exclude_from_color]
return exclude_from_color
def assign_color(self, labels, color, color_2, exclude_from_color,
color_2_series):
color_list = []
axis_1_color_index = 0;
axis_2_color_index = 0
# convert all the labels to strings
labels = [str(x) for x in labels]
# go through each label
for label in labels:
color_spec = None
if label in exclude_from_color:
color_spec = None
elif label in color_2_series:
if color_2 != []:
color_spec = self.get_color_code(color_2[axis_2_color_index])
axis_2_color_index = axis_2_color_index + 1
else:
if color != []:
color_spec = self.get_color_code(color[axis_1_color_index])
axis_1_color_index = axis_1_color_index + 1
try:
color_spec = matplotlib.colors.colorConverter.to_rgba(color_spec)
except:
pass
color_list.append(color_spec)
return color_list
def get_color_code(self, code):
# redefine color names
dict = cc.chartfactory_color_overwrites
if code in dict: return dict[code]
return code
def create_colormap(self, num_colors, map_name):
## matplotlib ref for colors: http://matplotlib.org/examples/color/colormaps_reference.html
cm = matplotlib.cm.get_cmap(name=map_name)
return [cm(1. * i / num_colors) for i in range(num_colors)]
########################################################################################################################
## faster version of Plotly's candlestick drawing module (assumes NumPy) ###############################################
from plotly.figure_factory import utils
from plotly.figure_factory._ohlc import (_DEFAULT_INCREASING_COLOR,
_DEFAULT_DECREASING_COLOR,
validate_ohlc)
def make_increasing_candle(open, high, low, close, dates, **kwargs):
"""
Makes boxplot trace for increasing candlesticks
_make_increasing_candle() and _make_decreasing_candle separate the
increasing traces from the decreasing traces so kwargs (such as
color) can be passed separately to increasing or decreasing traces
when direction is set to 'increasing' or 'decreasing' in
FigureFactory.create_candlestick()
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing values
:param (list) dates: list of datetime objects. Default: None
:param kwargs: kwargs to be passed to increasing trace via
plotly.graph_objs.Scatter.
:rtype (list) candle_incr_data: list of the box trace for
increasing candlesticks.
"""
increase_x, increase_y = _Candlestick(
open, high, low, close, dates, **kwargs).get_candle_increase()
if 'line' in kwargs:
kwargs.setdefault('fillcolor', kwargs['line']['color'])
else:
kwargs.setdefault('fillcolor', _DEFAULT_INCREASING_COLOR)
if 'name' in kwargs:
kwargs.setdefault('showlegend', True)
else:
kwargs.setdefault('showlegend', False)
kwargs.setdefault('name', 'Increasing')
kwargs.setdefault('line', dict(color=_DEFAULT_INCREASING_COLOR))
candle_incr_data = dict(type='box',
x=increase_x,
y=increase_y,
whiskerwidth=0,
boxpoints=False,
**kwargs)
return [candle_incr_data]
def make_decreasing_candle(open, high, low, close, dates, **kwargs):
"""
Makes boxplot trace for decreasing candlesticks
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing values
:param (list) dates: list of datetime objects. Default: None
:param kwargs: kwargs to be passed to decreasing trace via
plotly.graph_objs.Scatter.
:rtype (list) candle_decr_data: list of the box trace for
decreasing candlesticks.
"""
decrease_x, decrease_y = _Candlestick(
open, high, low, close, dates, **kwargs).get_candle_decrease()
if 'line' in kwargs:
kwargs.setdefault('fillcolor', kwargs['line']['color'])
else:
kwargs.setdefault('fillcolor', _DEFAULT_DECREASING_COLOR)
kwargs.setdefault('showlegend', False)
kwargs.setdefault('line', dict(color=_DEFAULT_DECREASING_COLOR))
kwargs.setdefault('name', 'Decreasing')
candle_decr_data = dict(type='box',
x=decrease_x,
y=decrease_y,
whiskerwidth=0,
boxpoints=False,
**kwargs)
return [candle_decr_data]
def create_candlestick(open, high, low, close, dates=None, direction='both',
**kwargs):
"""
BETA function that creates a candlestick chart
:param (list) open: opening values
:param (list) high: high values
:param (list) low: low values
:param (list) close: closing values
:param (list) dates: list of datetime objects. Default: None
:param (string) direction: direction can be 'increasing', 'decreasing',
or 'both'. When the direction is 'increasing', the returned figure
consists of all candlesticks where the close value is greater than
the corresponding open value, and when the direction is
'decreasing', the returned figure consists of all candlesticks
where the close value is less than or equal to the corresponding
open value. When the direction is 'both', both increasing and
decreasing candlesticks are returned. Default: 'both'
:param kwargs: kwargs passed through plotly.graph_objs.Scatter.
These kwargs describe other attributes about the ohlc Scatter trace
such as the color or the legend name. For more information on valid
kwargs call help(plotly.graph_objs.Scatter)
:rtype (dict): returns a representation of candlestick chart figure.
Example 1: Simple candlestick chart from a Pandas DataFrame
```
import plotly.plotly as py
from plotly.figure_factory import create_candlestick
from datetime import datetime
import pandas.io.data as web
df = web.DataReader("aapl", 'yahoo', datetime(2007, 10, 1), datetime(2009, 4, 1))
fig = create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)
py.plot(fig, filename='finance/aapl-candlestick', validate=False)
```
Example 2: Add text and annotations to the candlestick chart
```
fig = create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index)
# Update the fig - all options here: https://plot.ly/python/reference/#Layout
fig['layout'].update({
'title': 'The Great Recession',
'yaxis': {'title': 'AAPL Stock'},
'shapes': [{
'x0': '2007-12-01', 'x1': '2007-12-01',
'y0': 0, 'y1': 1, 'xref': 'x', 'yref': 'paper',
'line': {'color': 'rgb(30,30,30)', 'width': 1}
}],
'annotations': [{
'x': '2007-12-01', 'y': 0.05, 'xref': 'x', 'yref': 'paper',
'showarrow': False, 'xanchor': 'left',
'text': 'Official start of the recession'
}]
})
py.plot(fig, filename='finance/aapl-recession-candlestick', validate=False)
```
Example 3: Customize the candlestick colors
```
import plotly.plotly as py
from plotly.figure_factory import create_candlestick
from plotly.graph_objs import Line, Marker
from datetime import datetime
import pandas.io.data as web
df = web.DataReader("aapl", 'yahoo', datetime(2008, 1, 1), datetime(2009, 4, 1))
# Make increasing candlesticks and customize their color and name
fig_increasing = create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,
direction='increasing', name='AAPL',
marker=Marker(color='rgb(150, 200, 250)'),
line=Line(color='rgb(150, 200, 250)'))
# Make decreasing candlesticks and customize their color and name
fig_decreasing = create_candlestick(df.Open, df.High, df.Low, df.Close, dates=df.index,
direction='decreasing',
marker=Marker(color='rgb(128, 128, 128)'),
line=Line(color='rgb(128, 128, 128)'))
# Initialize the figure
fig = fig_increasing
# Add decreasing data with .extend()
fig['data'].extend(fig_decreasing['data'])
py.iplot(fig, filename='finance/aapl-candlestick-custom', validate=False)
```
Example 4: Candlestick chart with datetime objects
```
import plotly.plotly as py
from plotly.figure_factory import create_candlestick
from datetime import datetime
# Add data
open_data = [33.0, 33.3, 33.5, 33.0, 34.1]
high_data = [33.1, 33.3, 33.6, 33.2, 34.8]
low_data = [32.7, 32.7, 32.8, 32.6, 32.8]
close_data = [33.0, 32.9, 33.3, 33.1, 33.1]
dates = [datetime(year=2013, month=10, day=10),
datetime(year=2013, month=11, day=10),
datetime(year=2013, month=12, day=10),
datetime(year=2014, month=1, day=10),
datetime(year=2014, month=2, day=10)]
# Create ohlc
fig = create_candlestick(open_data, high_data,
low_data, close_data, dates=dates)
py.iplot(fig, filename='finance/simple-candlestick', validate=False)
```
"""
# if dates is not None:
# utils.validate_equal_length(open, high, low, close, dates)
# else:
# utils.validate_equal_length(open, high, low, close)
# validate_ohlc(open, high, low, close, direction, **kwargs)
if direction == 'increasing':
candle_incr_data = make_increasing_candle(open, high, low, close,
dates, **kwargs)
data = candle_incr_data
elif direction == 'decreasing':
candle_decr_data = make_decreasing_candle(open, high, low, close,
dates, **kwargs)
data = candle_decr_data
else:
candle_incr_data = make_increasing_candle(open, high, low, close,
dates, **kwargs)
candle_decr_data = make_decreasing_candle(open, high, low, close,
dates, **kwargs)
data = candle_incr_data + candle_decr_data
layout = go.Layout()
return go.Figure(data=data, layout=layout)
class _Candlestick(object):
"""
Refer to FigureFactory.create_candlestick() for docstring.
"""
def __init__(self, open, high, low, close, dates, **kwargs):
# assume we can get NumPy arrays (much quicker than ordinary arrays)
self.open = open.values
self.high = high.values
self.low = low.values
self.close = close.values
if dates is not None:
self.x = dates
else:
self.x = [x for x in range(len(self.open))]
self.get_candle_increase()
def get_candle_increase(self):
"""
Separate increasing data from decreasing data.
The data is increasing when close value > open value
and decreasing when the close value <= open value.
"""
increase_y = []
increase_x = []
for index in range(len(self.open)):
if self.close[index] > self.open[index]:
increase_y.append(self.low[index])
increase_y.append(self.open[index])
increase_y.append(self.close[index])
increase_y.append(self.close[index])
increase_y.append(self.close[index])
increase_y.append(self.high[index])
increase_x.append(self.x[index])
increase_x = [[x, x, x, x, x, x] for x in increase_x]
increase_x = utils.flatten(increase_x)
return increase_x, increase_y
def get_candle_decrease(self):
"""
Separate increasing data from decreasing data.
The data is increasing when close value > open value
and decreasing when the close value <= open value.
"""
decrease_y = []
decrease_x = []
for index in range(len(self.open)):
if self.close[index] <= self.open[index]:
decrease_y.append(self.low[index])
decrease_y.append(self.open[index])
decrease_y.append(self.close[index])
decrease_y.append(self.close[index])
decrease_y.append(self.close[index])
decrease_y.append(self.high[index])
decrease_x.append(self.x[index])
decrease_x = [[x, x, x, x, x, x] for x in decrease_x]
decrease_x = utils.flatten(decrease_x)
return decrease_x, decrease_y
| cuemacro/chartpy | chartpy/engine.py | Python | apache-2.0 | 96,504 | [
"ORCA"
] | 931c840cc2070103faf649834d1a110a87b5f2dc7f70f56f2472a2eb67b4dc25 |
# -*- coding: utf-8 -*-
# Documentation {{{1 #
########################
# Copyright (C) 2009-2013 Roman Zimbelmann <hut@lavabit.com>
# This configuration file is licensed under the same terms as ranger.
# ===================================================================
# This file contains ranger's commands.
# It's all in python; lines beginning with # are comments.
#
# Note that additional commands are automatically generated from the methods
# of the class ranger.core.actions.Actions.
#
# You can customize commands in the file ~/.config/ranger/commands.py.
# It has the same syntax as this file. In fact, you can just copy this
# file there with `ranger --copy-config=commands' and make your modifications.
# But make sure you update your configs when you update ranger.
#
# ===================================================================
# Every class defined here which is a subclass of `Command' will be used as a
# command in ranger. Several methods are defined to interface with ranger:
# execute(): called when the command is executed.
# cancel(): called when closing the console.
# tab(): called when <TAB> is pressed.
# quick(): called after each keypress.
#
# The return values for tab() can be either:
# None: There is no tab completion
# A string: Change the console to this string
# A list/tuple/generator: cycle through every item in it
#
# The return value for quick() can be:
# False: Nothing happens
# True: Execute the command afterwards
#
# The return value for execute() and cancel() doesn't matter.
#
# ===================================================================
# Commands have certain attributes and methods that facilitate parsing of
# the arguments:
#
# self.line: The whole line that was written in the console.
# self.args: A list of all (space-separated) arguments to the command.
# self.quantifier: If this command was mapped to the key "X" and
# the user pressed 6X, self.quantifier will be 6.
# self.arg(n): The n-th argument, or an empty string if it doesn't exist.
# self.rest(n): The n-th argument plus everything that followed. For example,
# If the command was "search foo bar a b c", rest(2) will be "bar a b c"
# self.start(n): The n-th argument and anything before it. For example,
# If the command was "search foo bar a b c", rest(2) will be "bar a b c"
#
# ===================================================================
# And this is a little reference for common ranger functions and objects:
#
# self.fm: A reference to the "fm" object which contains most information
# about ranger.
# self.fm.notify(string): Print the given string on the screen.
# self.fm.notify(string, bad=True): Print the given string in RED.
# self.fm.reload_cwd(): Reload the current working directory.
# self.fm.thisdir: The current working directory. (A File object.)
# self.fm.thisfile: The current file. (A File object too.)
# self.fm.thistab.get_selection(): A list of all selected files.
# self.fm.execute_console(string): Execute the string as a ranger command.
# self.fm.open_console(string): Open the console with the given string
# already typed in for you.
# self.fm.move(direction): Moves the cursor in the given direction, which
# can be something like down=3, up=5, right=1, left=1, to=6, ...
#
# File objects (for example self.fm.thisfile) have these useful attributes and
# methods:
#
# cf.path: The path to the file.
# cf.basename: The base name only.
# cf.load_content(): Force a loading of the directories content (which
# obviously works with directories only)
# cf.is_directory: True/False depending on whether it's a directory.
#
# For advanced commands it is unavoidable to dive a bit into the source code
# of ranger.
# ===================================================================
from ranger.api.commands import *
import re
import os
import sys
# Custom commands {{{1 #
##########################
class vesta(Command):
""" Open files with vesta """
fileName=""
def getFileName(self):
if not self.fileName:
return str(self.fm.thisfile.path)
else:
return self.fileName
def setFileName(self, name):
self.fileName=name
def checkFileName(self):
import re
import shutil
fileName=self.getFileName()
if re.match(r".*POSCAR.+",fileName):
newFileName=fileName+".vasp"
shutil.copyfile(fileName, newFileName)
self.setFileName(newFileName)
def execute(self):
self.checkFileName()
self.fm.notify(self.getFileName())
self.fm.execute_command("vesta "+self.getFileName(), flags="f")
# Default commands {{{1 #
###########################
class alias(Command):
""":alias <newcommand> <oldcommand>
Copies the oldcommand as newcommand.
"""
context = 'browser'
resolve_macros = False
def execute(self):
if not self.arg(1) or not self.arg(2):
self.fm.notify('Syntax: alias <newcommand> <oldcommand>', bad=True)
else:
self.fm.commands.alias(self.arg(1), self.rest(2))
class cd(Command):
""":cd [-r] <dirname>
The cd command changes the directory.
The command 'cd -' is equivalent to typing ``.
Using the option "-r" will get you to the real path.
"""
def execute(self):
import os.path
if self.arg(1) == '-r':
self.shift()
destination = os.path.realpath(self.rest(1))
if os.path.isfile(destination):
destination = os.path.dirname(destination)
else:
destination = self.rest(1)
if not destination:
destination = '~'
if destination == '-':
self.fm.enter_bookmark('`')
else:
self.fm.cd(destination)
def tab(self):
import os
from os.path import dirname, basename, expanduser, join
cwd = self.fm.thisdir.path
rel_dest = self.rest(1)
bookmarks = [v.path for v in self.fm.bookmarks.dct.values()
if rel_dest in v.path ]
# expand the tilde into the user directory
if rel_dest.startswith('~'):
rel_dest = expanduser(rel_dest)
# define some shortcuts
abs_dest = join(cwd, rel_dest)
abs_dirname = dirname(abs_dest)
rel_basename = basename(rel_dest)
rel_dirname = dirname(rel_dest)
try:
# are we at the end of a directory?
if rel_dest.endswith('/') or rel_dest == '':
_, dirnames, _ = next(os.walk(abs_dest))
# are we in the middle of the filename?
else:
_, dirnames, _ = next(os.walk(abs_dirname))
dirnames = [dn for dn in dirnames \
if dn.startswith(rel_basename)]
except (OSError, StopIteration):
# os.walk found nothing
pass
else:
dirnames.sort()
dirnames = bookmarks + dirnames
# no results, return None
if len(dirnames) == 0:
return
# one result. since it must be a directory, append a slash.
if len(dirnames) == 1:
return self.start(1) + join(rel_dirname, dirnames[0]) + '/'
# more than one result. append no slash, so the user can
# manually type in the slash to advance into that directory
return (self.start(1) + join(rel_dirname, dirname) for dirname in dirnames)
class chain(Command):
""":chain <command1>; <command2>; ...
Calls multiple commands at once, separated by semicolons.
"""
def execute(self):
for command in self.rest(1).split(";"):
self.fm.execute_console(command)
class shell(Command):
escape_macros_for_shell = True
def execute(self):
if self.arg(1) and self.arg(1)[0] == '-':
flags = self.arg(1)[1:]
command = self.rest(2)
else:
flags = ''
command = self.rest(1)
if not command and 'p' in flags:
command = 'cat %f'
if command:
if '%' in command:
command = self.fm.substitute_macros(command, escape=True)
self.fm.execute_command(command, flags=flags)
def tab(self):
from ranger.ext.get_executables import get_executables
if self.arg(1) and self.arg(1)[0] == '-':
command = self.rest(2)
else:
command = self.rest(1)
start = self.line[0:len(self.line) - len(command)]
try:
position_of_last_space = command.rindex(" ")
except ValueError:
return (start + program + ' ' for program \
in get_executables() if program.startswith(command))
if position_of_last_space == len(command) - 1:
selection = self.fm.thistab.get_selection()
if len(selection) == 1:
return self.line + selection[0].shell_escaped_basename + ' '
else:
return self.line + '%s '
else:
before_word, start_of_word = self.line.rsplit(' ', 1)
return (before_word + ' ' + file.shell_escaped_basename \
for file in self.fm.thisdir.files \
if file.shell_escaped_basename.startswith(start_of_word))
class open_with(Command):
def execute(self):
app, flags, mode = self._get_app_flags_mode(self.rest(1))
self.fm.execute_file(
files = [f for f in self.fm.thistab.get_selection()],
app = app,
flags = flags,
mode = mode)
def tab(self):
return self._tab_through_executables()
def _get_app_flags_mode(self, string):
"""Extracts the application, flags and mode from a string.
examples:
"mplayer f 1" => ("mplayer", "f", 1)
"aunpack 4" => ("aunpack", "", 4)
"p" => ("", "p", 0)
"" => None
"""
app = ''
flags = ''
mode = 0
split = string.split()
if len(split) == 0:
pass
elif len(split) == 1:
part = split[0]
if self._is_app(part):
app = part
elif self._is_flags(part):
flags = part
elif self._is_mode(part):
mode = part
elif len(split) == 2:
part0 = split[0]
part1 = split[1]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
elif self._is_mode(part1):
mode = part1
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
elif len(split) >= 3:
part0 = split[0]
part1 = split[1]
part2 = split[2]
if self._is_app(part0):
app = part0
if self._is_flags(part1):
flags = part1
if self._is_mode(part2):
mode = part2
elif self._is_mode(part1):
mode = part1
if self._is_flags(part2):
flags = part2
elif self._is_flags(part0):
flags = part0
if self._is_mode(part1):
mode = part1
elif self._is_mode(part0):
mode = part0
if self._is_flags(part1):
flags = part1
return app, flags, int(mode)
def _is_app(self, arg):
return not self._is_flags(arg) and not arg.isdigit()
def _is_flags(self, arg):
from ranger.core.runner import ALLOWED_FLAGS
return all(x in ALLOWED_FLAGS for x in arg)
def _is_mode(self, arg):
return all(x in '0123456789' for x in arg)
class set_(Command):
""":set <option name>=<python expression>
Gives an option a new value.
"""
name = 'set' # don't override the builtin set class
def execute(self):
name = self.arg(1)
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value)
def tab(self):
name, value, name_done = self.parse_setting_line()
settings = self.fm.settings
if not name:
return sorted(self.firstpart + setting for setting in settings)
if not value and not name_done:
return (self.firstpart + setting for setting in settings \
if setting.startswith(name))
if not value:
return self.firstpart + str(settings[name])
if bool in settings.types_of(name):
if 'true'.startswith(value.lower()):
return self.firstpart + 'True'
if 'false'.startswith(value.lower()):
return self.firstpart + 'False'
class setlocal(set_):
""":setlocal path=<python string> <option name>=<python expression>
Gives an option a new value.
"""
PATH_RE = re.compile(r'^\s*path="?(.*?)"?\s*$')
def execute(self):
import os.path
match = self.PATH_RE.match(self.arg(1))
if match:
path = os.path.normpath(os.path.expanduser(match.group(1)))
self.shift()
elif self.fm.thisdir:
path = self.fm.thisdir.path
else:
path = None
if path:
name = self.arg(1)
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value, localpath=path)
class setintag(setlocal):
""":setintag <tag or tags> <option name>=<option value>
Sets an option for directories that are tagged with a specific tag.
"""
def execute(self):
tags = self.arg(1)
self.shift()
name, value, _ = self.parse_setting_line()
self.fm.set_option_from_string(name, value, tags=tags)
class quit(Command):
""":quit
Closes the current tab. If there is only one tab, quit the program.
"""
def execute(self):
if len(self.fm.tabs) <= 1:
self.fm.exit()
self.fm.tab_close()
class quitall(Command):
""":quitall
Quits the program immediately.
"""
def execute(self):
self.fm.exit()
class quit_bang(quitall):
""":quit!
Quits the program immediately.
"""
name = 'quit!'
allow_abbrev = False
class terminal(Command):
""":terminal
Spawns an "x-terminal-emulator" starting in the current directory.
"""
def execute(self):
import os
from ranger.ext.get_executables import get_executables
command = os.environ.get('TERMCMD', os.environ.get('TERM'))
if command not in get_executables():
command = 'x-terminal-emulator'
if command not in get_executables():
command = 'xterm'
self.fm.run(command, flags='f')
class delete(Command):
""":delete
Tries to delete the selection.
"Selection" is defined as all the "marked files" (by default, you
can mark files with space or v). If there are no marked files,
use the "current file" (where the cursor is)
When attempting to delete non-empty directories or multiple
marked files, it will require a confirmation.
"""
allow_abbrev = False
def execute(self):
import os
if self.rest(1):
self.fm.notify("Error: delete takes no arguments! It deletes "
"the selected file(s).", bad=True)
return
cwd = self.fm.thisdir
cf = self.fm.thisfile
if not cwd or not cf:
self.fm.notify("Error: no file selected for deletion!", bad=True)
return
confirm = self.fm.settings.confirm_on_delete
many_files = (cwd.marked_items or (cf.is_directory and not cf.is_link \
and len(os.listdir(cf.path)) > 0))
if confirm != 'never' and (confirm != 'multiple' or many_files):
self.fm.ui.console.ask("Confirm deletion of: %s (y/N)" %
', '.join(f.basename for f in self.fm.thistab.get_selection()),
self._question_callback, ('n', 'N', 'y', 'Y'))
else:
# no need for a confirmation, just delete
self.fm.delete()
def _question_callback(self, answer):
if answer == 'y' or answer == 'Y':
self.fm.delete()
class mark_tag(Command):
""":mark_tag [<tags>]
Mark all tags that are tagged with either of the given tags.
When leaving out the tag argument, all tagged files are marked.
"""
do_mark = True
def execute(self):
cwd = self.fm.thisdir
tags = self.rest(1).replace(" ","")
if not self.fm.tags:
return
for fileobj in cwd.files:
try:
tag = self.fm.tags.tags[fileobj.realpath]
except KeyError:
continue
if not tags or tag in tags:
cwd.mark_item(fileobj, val=self.do_mark)
self.fm.ui.status.need_redraw = True
self.fm.ui.need_redraw = True
class console(Command):
""":console <command>
Open the console with the given command.
"""
def execute(self):
position = None
if self.arg(1)[0:2] == '-p':
try:
position = int(self.arg(1)[2:])
self.shift()
except:
pass
self.fm.open_console(self.rest(1), position=position)
class load_copy_buffer(Command):
""":load_copy_buffer
Load the copy buffer from confdir/copy_buffer
"""
copy_buffer_filename = 'copy_buffer'
def execute(self):
from ranger.container.file import File
from os.path import exists
try:
fname = self.fm.confpath(self.copy_buffer_filename)
f = open(fname, 'r')
except:
return self.fm.notify("Cannot open %s" % \
(fname or self.copy_buffer_filename), bad=True)
self.fm.copy_buffer = set(File(g) \
for g in f.read().split("\n") if exists(g))
f.close()
self.fm.ui.redraw_main_column()
class save_copy_buffer(Command):
""":save_copy_buffer
Save the copy buffer to confdir/copy_buffer
"""
copy_buffer_filename = 'copy_buffer'
def execute(self):
fname = None
try:
fname = self.fm.confpath(self.copy_buffer_filename)
f = open(fname, 'w')
except:
return self.fm.notify("Cannot open %s" % \
(fname or self.copy_buffer_filename), bad=True)
f.write("\n".join(f.path for f in self.fm.copy_buffer))
f.close()
class unmark_tag(mark_tag):
""":unmark_tag [<tags>]
Unmark all tags that are tagged with either of the given tags.
When leaving out the tag argument, all tagged files are unmarked.
"""
do_mark = False
class mkdir(Command):
""":mkdir <dirname>
Creates a directory with the name <dirname>.
"""
def execute(self):
from os.path import join, expanduser, lexists
from os import mkdir
dirname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
if not lexists(dirname):
mkdir(dirname)
else:
self.fm.notify("file/directory exists!", bad=True)
def tab(self):
return self._tab_directory_content()
class touch(Command):
""":touch <fname>
Creates a file with the name <fname>.
"""
def execute(self):
from os.path import join, expanduser, lexists
fname = join(self.fm.thisdir.path, expanduser(self.rest(1)))
if not lexists(fname):
open(fname, 'a').close()
else:
self.fm.notify("file/directory exists!", bad=True)
def tab(self):
return self._tab_directory_content()
class edit(Command):
""":edit <filename>
Opens the specified file in vim
"""
def execute(self):
if not self.arg(1):
self.fm.edit_file(self.fm.thisfile.path)
else:
self.fm.edit_file(self.rest(1))
def tab(self):
return self._tab_directory_content()
class eval_(Command):
""":eval [-q] <python code>
Evaluates the python code.
`fm' is a reference to the FM instance.
To display text, use the function `p'.
Examples:
:eval fm
:eval len(fm.directories)
:eval p("Hello World!")
"""
name = 'eval'
resolve_macros = False
def execute(self):
if self.arg(1) == '-q':
code = self.rest(2)
quiet = True
else:
code = self.rest(1)
quiet = False
import ranger
global cmd, fm, p, quantifier
fm = self.fm
cmd = self.fm.execute_console
p = fm.notify
quantifier = self.quantifier
try:
try:
result = eval(code)
except SyntaxError:
exec(code)
else:
if result and not quiet:
p(result)
except Exception as err:
p(err)
class rename(Command):
""":rename <newname>
Changes the name of the currently highlighted file to <newname>
"""
def execute(self):
from ranger.container.file import File
from os import access
new_name = self.rest(1)
if not new_name:
return self.fm.notify('Syntax: rename <newname>', bad=True)
if new_name == self.fm.thisfile.basename:
return
if access(new_name, os.F_OK):
return self.fm.notify("Can't rename: file already exists!", bad=True)
self.fm.rename(self.fm.thisfile, new_name)
f = File(new_name)
self.fm.thisdir.pointed_obj = f
self.fm.thisfile = f
def tab(self):
return self._tab_directory_content()
class chmod(Command):
""":chmod <octal number>
Sets the permissions of the selection to the octal number.
The octal number is between 0 and 777. The digits specify the
permissions for the user, the group and others.
A 1 permits execution, a 2 permits writing, a 4 permits reading.
Add those numbers to combine them. So a 7 permits everything.
"""
def execute(self):
mode = self.rest(1)
if not mode:
mode = str(self.quantifier)
try:
mode = int(mode, 8)
if mode < 0 or mode > 0o777:
raise ValueError
except ValueError:
self.fm.notify("Need an octal number between 0 and 777!", bad=True)
return
for file in self.fm.thistab.get_selection():
try:
os.chmod(file.path, mode)
except Exception as ex:
self.fm.notify(ex)
try:
# reloading directory. maybe its better to reload the selected
# files only.
self.fm.thisdir.load_content()
except:
pass
class bulkrename(Command):
""":bulkrename
This command opens a list of selected files in an external editor.
After you edit and save the file, it will generate a shell script
which does bulk renaming according to the changes you did in the file.
This shell script is opened in an editor for you to review.
After you close it, it will be executed.
"""
def execute(self):
import sys
import tempfile
from ranger.container.file import File
from ranger.ext.shell_escape import shell_escape as esc
py3 = sys.version > "3"
# Create and edit the file list
filenames = [f.basename for f in self.fm.thistab.get_selection()]
listfile = tempfile.NamedTemporaryFile()
if py3:
listfile.write("\n".join(filenames).encode("utf-8"))
else:
listfile.write("\n".join(filenames))
listfile.flush()
self.fm.execute_file([File(listfile.name)], app='editor')
listfile.seek(0)
if py3:
new_filenames = listfile.read().decode("utf-8").split("\n")
else:
new_filenames = listfile.read().split("\n")
listfile.close()
if all(a == b for a, b in zip(filenames, new_filenames)):
self.fm.notify("No renaming to be done!")
return
# Generate and execute script
cmdfile = tempfile.NamedTemporaryFile()
cmdfile.write(b"# This file will be executed when you close the editor.\n")
cmdfile.write(b"# Please double-check everything, clear the file to abort.\n")
if py3:
cmdfile.write("\n".join("mv -vi -- " + esc(old) + " " + esc(new) \
for old, new in zip(filenames, new_filenames) \
if old != new).encode("utf-8"))
else:
cmdfile.write("\n".join("mv -vi -- " + esc(old) + " " + esc(new) \
for old, new in zip(filenames, new_filenames) if old != new))
cmdfile.flush()
self.fm.execute_file([File(cmdfile.name)], app='editor')
self.fm.run(['/bin/sh', cmdfile.name], flags='w')
cmdfile.close()
class relink(Command):
""":relink <newpath>
Changes the linked path of the currently highlighted symlink to <newpath>
"""
def execute(self):
from ranger.container.file import File
new_path = self.rest(1)
cf = self.fm.thisfile
if not new_path:
return self.fm.notify('Syntax: relink <newpath>', bad=True)
if not cf.is_link:
return self.fm.notify('%s is not a symlink!' % cf.basename, bad=True)
if new_path == os.readlink(cf.path):
return
try:
os.remove(cf.path)
os.symlink(new_path, cf.path)
except OSError as err:
self.fm.notify(err)
self.fm.reset()
self.fm.thisdir.pointed_obj = cf
self.fm.thisfile = cf
def tab(self):
if not self.rest(1):
return self.line+os.readlink(self.fm.thisfile.path)
else:
return self._tab_directory_content()
class help_(Command):
""":help
Display ranger's manual page.
"""
name = 'help'
def execute(self):
if self.quantifier == 1:
self.fm.dump_keybindings()
elif self.quantifier == 2:
self.fm.dump_commands()
elif self.quantifier == 3:
self.fm.dump_settings()
else:
self.fm.display_help()
class copymap(Command):
""":copymap <keys> <newkeys1> [<newkeys2>...]
Copies a "browser" keybinding from <keys> to <newkeys>
"""
context = 'browser'
def execute(self):
if not self.arg(1) or not self.arg(2):
return self.fm.notify("Not enough arguments", bad=True)
for arg in self.args[2:]:
self.fm.ui.keymaps.copy(self.context, self.arg(1), arg)
class copypmap(copymap):
""":copypmap <keys> <newkeys1> [<newkeys2>...]
Copies a "pager" keybinding from <keys> to <newkeys>
"""
context = 'pager'
class copycmap(copymap):
""":copycmap <keys> <newkeys1> [<newkeys2>...]
Copies a "console" keybinding from <keys> to <newkeys>
"""
context = 'console'
class copytmap(copymap):
""":copycmap <keys> <newkeys1> [<newkeys2>...]
Copies a "taskview" keybinding from <keys> to <newkeys>
"""
context = 'taskview'
class unmap(Command):
""":unmap <keys> [<keys2>, ...]
Remove the given "browser" mappings
"""
context = 'browser'
def execute(self):
for arg in self.args[1:]:
self.fm.ui.keymaps.unbind(self.context, arg)
class cunmap(unmap):
""":cunmap <keys> [<keys2>, ...]
Remove the given "console" mappings
"""
context = 'browser'
class punmap(unmap):
""":punmap <keys> [<keys2>, ...]
Remove the given "pager" mappings
"""
context = 'pager'
class tunmap(unmap):
""":tunmap <keys> [<keys2>, ...]
Remove the given "taskview" mappings
"""
context = 'taskview'
class map_(Command):
""":map <keysequence> <command>
Maps a command to a keysequence in the "browser" context.
Example:
map j move down
map J move down 10
"""
name = 'map'
context = 'browser'
resolve_macros = False
def execute(self):
self.fm.ui.keymaps.bind(self.context, self.arg(1), self.rest(2))
class cmap(map_):
""":cmap <keysequence> <command>
Maps a command to a keysequence in the "console" context.
Example:
cmap <ESC> console_close
cmap <C-x> console_type test
"""
context = 'console'
class tmap(map_):
""":tmap <keysequence> <command>
Maps a command to a keysequence in the "taskview" context.
"""
context = 'taskview'
class pmap(map_):
""":pmap <keysequence> <command>
Maps a command to a keysequence in the "pager" context.
"""
context = 'pager'
class scout(Command):
""":scout [-FLAGS] <pattern>
Swiss army knife command for searching, traveling and filtering files.
The command takes various flags as arguments which can be used to
influence its behaviour:
-a = automatically open a file on unambiguous match
-e = open the selected file when pressing enter
-f = filter files that match the current search pattern
-g = interpret pattern as a glob pattern
-i = ignore the letter case of the files
-k = keep the console open when changing a directory with the command
-l = letter skipping; e.g. allow "rdme" to match the file "readme"
-m = mark the matching files after pressing enter
-M = unmark the matching files after pressing enter
-p = permanent filter: hide non-matching files after pressing enter
-s = smart case; like -i unless pattern contains upper case letters
-t = apply filter and search pattern as you type
-v = inverts the match
Multiple flags can be combined. For example, ":scout -gpt" would create
a :filter-like command using globbing.
"""
AUTO_OPEN = 'a'
OPEN_ON_ENTER = 'e'
FILTER = 'f'
SM_GLOB = 'g'
IGNORE_CASE = 'i'
KEEP_OPEN = 'k'
SM_LETTERSKIP = 'l'
MARK = 'm'
UNMARK = 'M'
PERM_FILTER = 'p'
SM_REGEX = 'r'
SMART_CASE = 's'
AS_YOU_TYPE = 't'
INVERT = 'v'
def __init__(self, *args, **kws):
Command.__init__(self, *args, **kws)
self._regex = None
self.flags, self.pattern = self.parse_flags()
def execute(self):
thisdir = self.fm.thisdir
flags = self.flags
pattern = self.pattern
regex = self._build_regex()
count = self._count(move=True)
self.fm.thistab.last_search = regex
self.fm.set_search_method(order="search")
if self.MARK in flags or self.UNMARK in flags:
value = flags.find(self.MARK) > flags.find(self.UNMARK)
if self.FILTER in flags:
for f in thisdir.files:
thisdir.mark_item(f, value)
else:
for f in thisdir.files:
if regex.search(f.basename):
thisdir.mark_item(f, value)
if self.PERM_FILTER in flags:
thisdir.filter = regex if pattern else None
# clean up:
self.cancel()
if self.OPEN_ON_ENTER in flags or \
self.AUTO_OPEN in flags and count == 1:
if os.path.exists(pattern):
self.fm.cd(pattern)
else:
self.fm.move(right=1)
if self.KEEP_OPEN in flags and thisdir != self.fm.thisdir:
# reopen the console:
self.fm.open_console(self.line[0:-len(pattern)])
if thisdir != self.fm.thisdir and pattern != "..":
self.fm.block_input(0.5)
def cancel(self):
self.fm.thisdir.temporary_filter = None
self.fm.thisdir.refilter()
def quick(self):
asyoutype = self.AS_YOU_TYPE in self.flags
if self.FILTER in self.flags:
self.fm.thisdir.temporary_filter = self._build_regex()
if self.PERM_FILTER in self.flags and asyoutype:
self.fm.thisdir.filter = self._build_regex()
if self.FILTER in self.flags or self.PERM_FILTER in self.flags:
self.fm.thisdir.refilter()
if self._count(move=asyoutype) == 1 and self.AUTO_OPEN in self.flags:
return True
return False
def tab(self):
self._count(move=True, offset=1)
def _build_regex(self):
if self._regex is not None:
return self._regex
frmat = "%s"
flags = self.flags
pattern = self.pattern
if pattern == ".":
return re.compile("")
# Handle carets at start and dollar signs at end separately
if pattern.startswith('^'):
pattern = pattern[1:]
frmat = "^" + frmat
if pattern.endswith('$'):
pattern = pattern[:-1]
frmat += "$"
# Apply one of the search methods
if self.SM_REGEX in flags:
regex = pattern
elif self.SM_GLOB in flags:
regex = re.escape(pattern).replace("\\*", ".*").replace("\\?", ".")
elif self.SM_LETTERSKIP in flags:
regex = ".*".join(re.escape(c) for c in pattern)
else:
regex = re.escape(pattern)
regex = frmat % regex
# Invert regular expression if necessary
if self.INVERT in flags:
regex = "^(?:(?!%s).)*$" % regex
# Compile Regular Expression
options = re.LOCALE | re.UNICODE
if self.IGNORE_CASE in flags or self.SMART_CASE in flags and \
pattern.islower():
options |= re.IGNORECASE
try:
self._regex = re.compile(regex, options)
except:
self._regex = re.compile("")
return self._regex
def _count(self, move=False, offset=0):
from collections import deque
count = 0
cwd = self.fm.thisdir
pattern = self.pattern
if not pattern:
return 0
if pattern == '.':
return 0
if pattern == '..':
return 1
deq = deque(cwd.files)
deq.rotate(-cwd.pointer - offset)
i = offset
regex = self._build_regex()
for fsobj in deq:
if regex.search(fsobj.basename):
count += 1
if move and count == 1:
cwd.move(to=(cwd.pointer + i) % len(cwd.files))
self.fm.thisfile = cwd.pointed_obj
if count > 1:
return count
i += 1
return count == 1
class grep(Command):
""":grep <string>
Looks for a string in all marked files or directories
"""
def execute(self):
if self.rest(1):
action = ['grep', '--line-number']
action.extend(['-e', self.rest(1), '-r'])
action.extend(f.path for f in self.fm.thistab.get_selection())
self.fm.execute_command(action, flags='p')
# Version control commands
# --------------------------------
class stage(Command):
"""
:stage
Stage selected files for the corresponding version control system
"""
def execute(self):
from ranger.ext.vcs import VcsError
filelist = [f.path for f in self.fm.thistab.get_selection()]
self.fm.thisdir.vcs_outdated = True
# for f in self.fm.thistab.get_selection():
# f.vcs_outdated = True
try:
self.fm.thisdir.vcs.add(filelist)
except VcsError:
self.fm.notify("Could not stage files.")
self.fm.reload_cwd()
class unstage(Command):
"""
:unstage
Unstage selected files for the corresponding version control system
"""
def execute(self):
from ranger.ext.vcs import VcsError
filelist = [f.path for f in self.fm.thistab.get_selection()]
self.fm.thisdir.vcs_outdated = True
# for f in self.fm.thistab.get_selection():
# f.vcs_outdated = True
try:
self.fm.thisdir.vcs.reset(filelist)
except VcsError:
self.fm.notify("Could not unstage files.")
self.fm.reload_cwd()
class diff(Command):
"""
:diff
Displays a diff of selected files against last last commited version
"""
def execute(self):
from ranger.ext.vcs import VcsError
import tempfile
L = self.fm.thistab.get_selection()
if len(L) == 0: return
filelist = [f.path for f in L]
vcs = L[0].vcs
diff = vcs.get_raw_diff(filelist=filelist)
if len(diff.strip()) > 0:
tmp = tempfile.NamedTemporaryFile()
tmp.write(diff.encode('utf-8'))
tmp.flush()
pager = os.environ.get('PAGER', ranger.DEFAULT_PAGER)
self.fm.run([pager, tmp.name])
else:
raise Exception("diff is empty")
class log(Command):
"""
:log
Displays the log of the current repo or files
"""
def execute(self):
from ranger.ext.vcs import VcsError
import tempfile
L = self.fm.thistab.get_selection()
if len(L) == 0: return
filelist = [f.path for f in L]
vcs = L[0].vcs
log = vcs.get_raw_log(filelist=filelist)
tmp = tempfile.NamedTemporaryFile()
tmp.write(log.encode('utf-8'))
tmp.flush()
pager = os.environ.get('PAGER', ranger.DEFAULT_PAGER)
self.fm.run([pager, tmp.name])
# vim: fdm=marker
| alejandrogallo/dotfiles | link_config/ranger/commands.py | Python | unlicense | 38,023 | [
"VASP"
] | 55834f4b7e1307e5e1683845dc8ae1b9ee110a2a690f1696488ab4c00721af90 |
#!/usr/bin/env python
from multiprocessing import Pool
import time
import os
import sys
import argparse
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
from Bio.SeqUtils import GC
from Bio.SeqRecord import SeqRecord
#from Bio.Seq import Seq
#from Bio.Alphabet import generic_dna
# Copyright(C) 2015 David Ream
# Released under GPL version 3 licence. http://www.gnu.org/licenses/lgpl.html
# Do not remove this comment
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description='Convert all genbank files found in a specified folder, and optionally a file containing the accessions that you wish to include, and create BLAST searchable databases from them.')
#parser.add_argument("-i", "--infolder", dest="infolder", metavar="DIRECTORY", default='./genomes/',
# help="Folder containing all genbank files for use by the program.")
parser.add_argument("-i", "--genbank_directory", dest="genbank_directory", metavar="DIRECTORY", default='./genomes/',
help="Folder containing all genbank files for use by the program.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="DIRECTORY", default='./db/',
help="Folder where the BLAST searchable databases will be stored.")
parser.add_argument("-f", "--filter", dest="filter", metavar="FILE", default='NONE',
help="File restrictiong which accession numbers this script will process. If no file is provided, filtering is not performed.")
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
parser.add_argument("-p", "--protein", dest="protein", default=True, action='store_false',
help="Flag to toggle mode from protein to DNA sequences for use as database construction. The default is protein.")
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", default=False,
help="Suppresses most program text outputs.")
return parser.parse_args()
def check_options(parsed_args):
'''
# section of code that checks the infolder entry
if os.path.isdir(parsed_args.infolder):
infolder = parsed_args.infolder
else:
print "The folder %s does not exist." % parsed_args.infolder
sys.exit()
'''
# check the genbank folder
if os.path.isdir(parsed_args.genbank_directory):
genbank_directory = parsed_args.genbank_directory
else:
print("The folder %s does not exist." % parsed_args.genbank_directory)
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
outfolder = parsed_args.outfolder
if outfolder[-1] != '/':
outfolder = outfolder + '/'
# Check the filter file
if parsed_args.filter == 'NONE' or os.path.exists(parsed_args.filter):
filter_file = parsed_args.filter
else:
print("The file %s does not exist." % parsed_args.filter)
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
do_protein = parsed_args.protein
quiet = parsed_args.quiet
#return infolder, outfolder, filter_file, num_proc, do_protein
return genbank_directory, outfolder, filter_file, num_proc, do_protein, quiet
#this function will return all of the files that are in a directory. os.walk is recursive traversal.
def returnRecursiveDirFiles(root_dir):
result = []
for path, dir_name, flist in os.walk(root_dir):
for f in flist:
fname = os.path.join(path, f)
if os.path.isfile(fname) and '.gbk' in fname:
result.append(fname)
return result
#################################################################
#### so yeah.... not gonna do this right now... #####
#### I feel there is no reason to, but keeping #####
#### the code in here just in case i am wrong about that #####
#################################################################
# I need to do an analysis on gc content, and gc skew.
# currently this will return the organism gc, (mean, SD, varience) of the GC in coding regions
# the results will we retained in a file the calling program opens tab delimited
def GCAnalysis(NC, organism, gc_list, seq, outfile):
handle = open(outfile, 'a')
organism_gc = "%3.2f" % GC(seq)
mean = "%3.2f" % numpy.mean(gc_list)
#mode = "%5.2f" % numpy.mode(gc_list)
var = "%5.2f" % numpy.var(gc_list)
std = "%5.2f" % numpy.std(gc_list)
handle.write('\t'.join([NC, organism, organism_gc, mean, var, std]) + NEW_LINE)
# take the genbank file specified by genbank path, and save the customized result file in the db_directory folder
#def convert_genbank(genbank_path, db_directory, error_fname): #, gc_outfile = 'gc_analysis.txt'):
def convert_genbank(genbank_tuple):
genbank_path, db_directory, error_fname, do_protein = genbank_tuple
record_list = []
seq_record = next(SeqIO.parse(open(genbank_path), "genbank"))
print((seq_record.annotations))
accession = seq_record.id
organism = seq_record.annotations['organism'].replace(' ', '_')
err_log = []
gc_list = [] # no need for this right now, but leaving in
# loop over the genbank file
for fnum, feature in enumerate(seq_record.features):
err_flag = False
error_in_field = False
if feature.type == 'CDS':
#print dir(feature.location)
try:
start = int(feature.location.start)
stop = int(feature.location.end)
except:
error_in_field = True
strand = feature.strand
dna_seq = seq_record.seq[start:stop]
#print "dna_seq", type(dna_seq), dna_seq
gc = GC(dna_seq)
gc_list.append(gc)
gc = "%3.2f" % gc
try:
locus = feature.qualifiers['locus_tag'][0]
except:
try:
locus = feature.qualifiers['gene'][0]
except:
locus = 'error'
print(("Error in the organism %s with NC # %s" % (organism, accession)))
err_flag = True
err_log.append([organism, accession])
if do_protein:
#seq = seq.translate()
#print type(seq)
#print feature.qualifiers.keys()
#seq = dir(feature)
try:
if 'translation' in list(feature.qualifiers.keys()):
prot_seq = Seq(''.join(feature.qualifiers['translation']), IUPAC.protein)
#print "prot_seq", type(prot_seq), prot_seq
if 'gene' in feature.qualifiers:
gene = feature.qualifiers['gene'][0]
#record_list.append(SeqRecord(prot_seq, id = '|'.join([accession, organism, locus, gene, str(start), str(stop), str(strand), gc]).replace(' ', ''), description = ''))
seq_rec_to_store = SeqRecord(prot_seq, id = '|'.join([accession, organism, locus, gene, str(start), str(stop), str(strand), gc]).replace(' ', ''), description = '')
else:
#record_list.append(SeqRecord(prot_seq, id = '|'.join([accession, organism, locus, 'unknown', str(start), str(stop), str(strand), gc]).replace(' ', ''),description = ''))
seq_rec_to_store = SeqRecord(prot_seq, id = '|'.join([accession, organism, locus, 'unknown', str(start), str(stop), str(strand), gc]).replace(' ', ''),description = '')
#print prot_seq
else:
print ("This was not a protein sequence")
error_in_field = True
#print "This was not a protein sequence"
except:
print("Error in function convert_genbank(genbank_tuple) from the format_db.py script, unhandled error in the genbank parse.")
error_in_field = True
else:
# put something in here that will deal with RNA later, if we plan to go that route.
pass
if not error_in_field:
record_list.append(seq_rec_to_store)
else:
print("a record was omitted")
'''
#print len(seq)
if len(seq) < 2:
#pass
print "len seq", len(seq)
elif do_protein:
if 'gene' in feature.qualifiers:
gene = feature.qualifiers['gene'][0]
record_list.append(SeqRecord(seq, id = '|'.join([accession, organism, locus, gene, str(start), str(stop), str(strand), gc]).replace(' ', ''),
description = ''))
else:
record_list.append(
SeqRecord(seq, id = '|'.join([accession, organism, locus, 'unknown', str(start), str(stop), str(strand), gc]).replace(' ', ''),
description = ''))
else:
if 'gene' in feature.qualifiers:
gene = feature.qualifiers['gene'][0]
record_list.append(SeqRecord(seq, id = '|'.join([accession, organism, locus, gene, str(start), str(stop), str(strand), gc]).replace(' ', ''),
description = ''))
else:
record_list.append(
SeqRecord(seq, id = '|'.join([accession, organism, locus, 'unknown', str(start), str(stop), str(strand), gc]).replace(' ', ''),
description = ''))
'''
#if os.path.isfile(gc_outfile):
# os.remove(gc_outfile)
#GCAnalysis(accession, organism, gc_list, seq_record.seq, gc_outfile)
handle = open(error_fname, 'a')
for i in err_log:
handle.write('\t'.join(i) + '\n')
handle.close()
if not err_flag:
outpath = db_directory + os.path.splitext(os.path.basename(genbank_path))[0] + '.ffc'
#print outpath
out_handle = open(outpath,"w")
SeqIO.write(record_list, out_handle, "fasta")
out_handle.close()
if do_protein:
cmd = "makeblastdb -in %s -dbtype prot" % (outpath)
#print "got here"
else:
cmd = "makeblastdb -in %s -dbtype prot" % (outpath)
os.system(cmd)
#print "Passed main loop"
return outpath, err_flag
def parallel_convert_genbank(file_list, outfolder, num_proc, do_protein, error_fname = "./error_log.txt"):
# Make sure that we have a new error log each time the program is run
if os.path.isfile(error_fname):
os.remove(error_fname)
# Package the variables for the convert_genbank function so everything can be run in parallel
tuple_list = [(i, outfolder, error_fname, do_protein) for i in file_list]
pool = Pool(processes = num_proc)
result = dict(pool.map(convert_genbank, tuple_list))
def main():
start = time.time()
parsed_args = parser_code()
#infolder, outfolder, filter_file, num_proc, do_protein = check_options(parsed_args)
genbank_directory, outfolder, filter_file, num_proc, do_protein, quiet = check_options(parsed_args)
#flist = returnRecursiveDirFiles(infolder)
flist = returnRecursiveDirFiles(genbank_directory)
if filter_file != 'NONE':
filter_list = [i.strip() for i in open(filter_file).readlines()]
file_list = [i for i in flist if i.split('/')[-1].split('.')[0] in filter_list]
else:
file_list = flist
#print "do_protein", do_protein
parallel_convert_genbank(file_list, outfolder, num_proc, do_protein)
if not quiet:
print(time.time() - start)
# A successful command could look like this:
# ./format_db.py -f ./phylo_order.txt
# ./format_db.py -i /home/dave/Desktop/all_genbank -o ./db1/ -f ./phylo_order.txt
if __name__ == '__main__':
main()
| nguyenngochuy91/Ancestral-Blocks-Reconstruction | format_db.py | Python | gpl-3.0 | 12,985 | [
"BLAST"
] | 19efdfe1e37a7599eb5501b7439d4e443302118bf5d14279dc979cd7f2e79035 |
"""
Functions to calculate the downstream water surface elevation by minimizing the
difference between flows calculated via the Manning Formula for discharge and
the historical peak flood values.
(https://en.wikipedia.org/wiki/Manning_formula)
(https://en.wikipedia.org/wiki/Volumetric_flow_rate)
Author:
Matthew A. Turner <maturner01@gmail.com>
Date:
19 April 2016
"""
from __future__ import print_function
import numpy as np
import os
import shutil
import subprocess
import time
from collections import namedtuple
from scipy.optimize import minimize_scalar
try:
from ripcas_dflow import ESRIAsc, Pol, ripcas, shear_mesh_to_asc, veg2n
except ImportError:
from .ripcas_dflow import ESRIAsc, Pol, ripcas, shear_mesh_to_asc, veg2n
class ModelRun(object):
"""
A single coupled run. First DFLOW then RipCAS. CoupledRunSequence will
encapsulate a series of coupled runs commencing with preparation of the
initial vegetation map for DFLOW. For now, assume that the vegetation map
is provided to the run_dflow method.
"""
def __init__(self): # , vegetation_ascii_path):
# have the boundary conditions been found?
self.bc_converged = False
# has ripcas been run yet?
self.vegetation_ascii = None
self.ripcas_has_run = False
self.ripcas_directory = None
# has DFLOW been run yet?
self.dflow_has_run = False
self.dflow_run_directory = None
self.dflow_shear_output = None
# generate boundry condition objects
self.upstream_bc = BoundaryCondition()
self.downstream_bc = BoundaryCondition()
self.bc_solution_info = BoundarySolutionInfo()
def calculate_bc(self, target_streamflow,
dbc_geometry_file, streambed_roughness, slope):
"""
Arguments:
target_streamflow (float): historical or other streamflow that
will be used to drive DFLOW model; this calculation recovers
an estimate for the Water Surface elevation (WS) for this given
streamflow.
dbc_geometry_file (str): path to the stream's cross-sectional
geometry xyz file
streambed_roughness (float): Manning's n-value for the streambed
slope (float): slope taken for the reach
Returns:
(BoundaryCondition, BoundaryCondition): tuple of upstream and
downstream BoundaryCondition instances
"""
dbc_geometry = Pol.from_river_geometry_file(dbc_geometry_file)
bc_solver = BoundaryConditionSolver(
target_streamflow, dbc_geometry, streambed_roughness, slope
)
bc_solution = bc_solver.solve()
self.bc_solution_info = bc_solution
self.bc_converged = bc_solution.success
self.downstream_bc.amplitude = bc_solution.ws_elev
self.upstream_bc.amplitude = bc_solution.streamflow
return (self.upstream_bc, self.downstream_bc)
def run_dflow(self, dflow_run_directory, vegetation_map,
veg_roughness_shearres_lookup, streambed_roughness,
clobber=True, pbs_script_name='dflow_mpi.pbs',
dflow_run_fun=None):
"""
Both input and output dflow files will go into the dflow_run_directory,
but in input/ and output/ subdirectories.
Arguments:
dflow_run_directory (str): directory where DFLOW files should be
put and where the dflow_run_fun will be run from
vegetation_map (str): path to the input vegetation.pol file. This
function assumes this has already been generated in the proper
format b/c this seems like the best separation of
responsibilities.
clobber (bool): whether or not to overwrite dflow_run_directory if
it exists
pbs_script_name (str): name of .pbs script w/o directory
dflow_run_fun (function): argument-free function to run DFLOW.
Ex. `dflow_run_fun=f` where `f` defined by
`def f: subprocess.call(['qsub', 'dflow_mpi.pbs'])`
Returns:
None
"""
if not self.bc_converged:
raise RuntimeError(
'Boundary conditions must be calculated before ' +
'DFLOW can be run'
)
if self.dflow_has_run:
raise RuntimeError(
'DFLOW has already been run for this CoupledRun'
)
if os.path.exists(dflow_run_directory):
if not clobber:
raise RuntimeError(
'DFLOW has already been run for this CoupledRun'
)
shutil.rmtree(dflow_run_directory)
self.dflow_run_directory = dflow_run_directory
os.mkdir(dflow_run_directory)
# write boundary conditions to file
bc_up_path = os.path.join(dflow_run_directory,
'boundriver_up_0001.cmp')
bc_down_path = os.path.join(dflow_run_directory,
'boundriverdown_0001.cmp')
self.upstream_bc.write(bc_up_path)
self.downstream_bc.write(bc_down_path)
self.vegetation_ascii = ESRIAsc(vegetation_map)
veg_path = os.path.join(dflow_run_directory, 'n.pol')
Pol.from_ascii(
veg2n(self.vegetation_ascii,
veg_roughness_shearres_lookup,
streambed_roughness)
).write(veg_path)
oj = os.path.join
pbs_path = oj(dflow_run_directory, pbs_script_name)
mdu_path = oj(dflow_run_directory, 'base.mdu')
net_path = oj(dflow_run_directory, 'base_net.nc')
ext_path = oj(dflow_run_directory, 'base.ext')
brd_path = oj(dflow_run_directory, 'boundriverdown.pli')
bru_path = oj(dflow_run_directory, 'boundriver_up.pli')
self.dflow_shear_output =\
os.path.join(dflow_run_directory,
'DFM_OUTPUT_base',
'base_map.nc')
with open(pbs_path, 'w') as f:
p = _join_data_dir(oj('dflow_inputs', 'dflow_mpi.pbs'))
s = open(p, 'r').read()
f.write(s)
with open(mdu_path, 'w') as f:
p = _join_data_dir(oj('dflow_inputs', 'base.mdu'))
s = open(p, 'r').read()
f.write(s)
with open(net_path, 'w') as f:
p = _join_data_dir(oj('dflow_inputs', 'base_net.nc'))
s = open(p, 'r').read()
f.write(s)
with open(ext_path, 'w') as f:
p = _join_data_dir(oj('dflow_inputs', 'base.ext'))
s = open(p, 'r').read()
f.write(s)
with open(brd_path, 'w') as f:
p = _join_data_dir(oj('dflow_inputs', 'boundriverdown.pli'))
s = open(p, 'r').read()
f.write(s)
with open(bru_path, 'w') as f:
p = _join_data_dir(oj('dflow_inputs', 'boundriver_up.pli'))
s = open(p, 'r').read()
f.write(s)
bkdir = os.getcwd()
os.chdir(dflow_run_directory)
if dflow_run_fun is None:
print('\n*****\nDry Run of DFLOW\n*****\n')
os.chdir(bkdir)
example_shear_path = 'jemez_r02_map.nc'
if os.path.exists(example_shear_path):
os.makedirs(os.path.dirname(self.dflow_shear_output))
shutil.copyfile(example_shear_path, self.dflow_shear_output)
else:
print('Get you a copy of a DFLOW output, yo! ' +
'Can\'t run RipCAS without it!')
with open('not_actually_output.nc', 'w') as f:
f.write('A FAKE NETCDF!!!')
self.dflow_has_run = True
else:
# in the case of running a process on CARC, the ret is a Popen inst
ret = dflow_run_fun()
os.chdir(bkdir)
self.dflow_has_run = True
return ret
def run_ripcas(self, zone_map_path, ripcas_required_data_path,
ripcas_directory, shear_asc=None, clobber=True):
if not self.dflow_has_run:
raise RuntimeError(
'DFLOW must run before ripcas can be run'
)
if os.path.exists(ripcas_directory):
if not clobber:
raise RuntimeError(
'DFLOW has already been run for this CoupledRun'
)
shutil.rmtree(ripcas_directory)
self.ripcas_directory = ripcas_directory
os.mkdir(ripcas_directory)
hdr = self.vegetation_ascii.header_dict()
if shear_asc is None:
shear_asc = shear_mesh_to_asc(self.dflow_shear_output, hdr)
else:
assert isinstance(shear_asc, ESRIAsc),\
'shear_asc must be of type ESRIAsc if provided'
shear_asc.write(
os.path.join(self.dflow_run_directory, 'shear_out.asc')
)
output_veg_ascii = ripcas(
self.vegetation_ascii, zone_map_path,
shear_asc, ripcas_required_data_path
)
output_vegetation_path = os.path.join(
ripcas_directory, 'vegetation.asc'
)
output_veg_ascii.write(output_vegetation_path)
self.ripcas_has_run = True
return output_veg_ascii
BoundarySolutionInfo = namedtuple(
'BoundarySolutionInfo', ['ws_elev', 'streamflow', 'error', 'success']
)
BoundarySolutionInfo.__new__.__defaults__ = (None, None, None, None)
class BoundaryConditionSolver:
def __init__(self,
historical_streamflow,
dbc_geometry,
streambed_roughness,
slope):
self.q_hist = historical_streamflow
self.geom = dbc_geometry
self.n = streambed_roughness
self.slope = slope
def solve(self):
def _streamflow_error(ws_elev):
calc =\
_calculate_streamflow(self.geom, self.n, ws_elev, self.slope)
return abs(calc - self.q_hist)
# generate initial guesses with wide-spaced points
result = minimize_scalar(_streamflow_error,
bounds=(self.geom.z.min(), self.geom.z.max()),
method='bounded',
options={'xatol': 1e-6, 'maxiter': 1000})
return BoundarySolutionInfo(
result.x,
_calculate_streamflow(self.geom, self.n, result.x, self.slope),
result.fun,
result.success
)
StreamflowTuple = namedtuple('StreamflowTuple', ['ws_elev', 'streamflow'])
def _calculate_streamflow(dbc_geometry, streambed_roughness,
water_surface_elevation, slope):
# have N points; get N-1 distances and
# N-1 Max/Min over those distances
x = dbc_geometry.x
y = dbc_geometry.y
z = dbc_geometry.z
dx = np.diff(x)
dy = np.diff(y)
xydist = np.sqrt(np.square(dx) + np.square(dy))
# station = np.cumsum(xydist)
zmax_by_segment = np.array(
[max(z[i], z[i+1]) for i in range(len(z)-1)]
)
zmin_by_segment = np.array(
[min(z[i], z[i+1]) for i in range(len(z)-1)]
)
# get N-1 vector taken from S = ('below', 'triangle', 'trap')
# for the three possible positions of the water surface and
# commensurate calculation methods for wetted perimeter
ws_location = np.array(
[
_get_ws_location(water_surface_elevation, _z[0], _z[1])
for _z in zip(zmax_by_segment, zmin_by_segment)
]
)
# calculate the cross-sectional area of the stream
# at the lower bound
area_vec = np.zeros(len(ws_location))
# wetted perimeter
wp_vec = np.zeros(len(ws_location))
ws_elev = water_surface_elevation
for idx, loc in enumerate(ws_location):
if loc == 'triangle':
zmin = zmin_by_segment[idx]
zmax = zmax_by_segment[idx]
xy = xydist[idx]
# calculate area
area_vec[idx] = 0.5 * (ws_elev - zmin) * xy
# calculate wetted perimeter
_da = ((ws_elev - zmin)/(zmax - zmin)) * xy
_db = ws_elev - zmin
wp_vec[idx] = np.sqrt(_da**2.0 + _db**2.0)
elif loc == 'trapezoid':
zmin = zmin_by_segment[idx]
zmax = zmax_by_segment[idx]
xy = xydist[idx]
area_vec[idx] = 0.5 * xy * (2*ws_elev - zmax - zmin)
wp_vec[idx] = np.sqrt(xy**2.0 + (zmax - zmin)**2.0)
area_sum = sum(area_vec)
wp_sum = sum(wp_vec)
n_inv = (1.0/streambed_roughness)
Q = n_inv * area_sum * (pow((area_sum/wp_sum), 2/3.0)) * np.sqrt(slope)
return Q
def _get_ws_location(water_surface_elev, zmax, zmin):
"""
Return one of three values depending on the location of the water surface
relative to the elevations of the discretized cross-section points.
Vectorized below.
Returns:
(str) one of the following: 'below' if above zmax (and automatically
zmin), 'triangle' if between zmax and zmin, and 'trapezoid'
if below zmin. This corresponds to the geometry that will be used
to calculate the wetted perimeter and area of the induced polygon.
"""
if water_surface_elev > zmax:
return 'trapezoid'
elif water_surface_elev <= zmax and water_surface_elev > zmin:
return 'triangle'
else:
return 'below'
class BoundaryCondition:
def __init__(self,
period=0.0, # (minutes)
amplitude=0.0, # (ISO)
phase=0.0): # (deg)
self.period = period
self.amplitude = amplitude
self.phase = phase
def write(self, out_path):
with open(out_path, 'w') as f:
f.write(self.__repr__())
def __repr__(self):
return '\n'.join([
'* COLUMN=3',
'* COLUMN1=Period (min) or Astronomical Componentname',
'* COLUMN2=Amplitude (ISO)',
'* COLUMN3=Phase (deg)',
'{0} {1} {2}'.format(self.period, self.amplitude, self.phase)
])
def mr_log(log_f, msg):
ta = time.asctime
log_f.write('[{0}] '.format(ta()) + msg)
log_f.flush()
os.fsync(log_f.fileno())
def modelrun_series(data_dir, initial_vegetation_map, vegzone_map,
veg_roughness_shearres_lookup, peak_flows_file,
geometry_file, streambed_roughness,
streambed_floodplain_roughness, streambed_slope,
dflow_run_fun=None, log_f=None, debug=False):
'''
Run a series of flow and succession models with peak flows given in
peak_flows_file.
Arguments:
data_dir (str): write directory for modelrun series. Must exist
initial_vegetation_map (str): location of year zero veg map
vegzone_map (str): vegetation zone map location
veg_roughness_shearres_lookup (str): Excel spreadsheet containing
conversion from vegetation code to roughness value and vegetation
code to shear stress resistance
peak_flow_file (str): location of text file record of peak flows in
cubic meters per second
geometry_file (str): location of channel geometry at the downstream
location for calculating streamflow
streambed_roughness (float): streambed roughness in channel only; used
when converting vegetation map to roughness map
streambed_floodplain_roughness (float): an average roughness of
stream channel and floodplain used in calculation of downstream
boundary condition for DFLOW
streambed_slope (float): rise over run of the channel used in
calculation of downstream boundary condition for DFLOW
dflow_run_fun (function): function delegate for the user to provide a
custom way to run DFLOW. If none is given, defaults to
submitting a PBS job as is done on CARC systems
log_f (str): log file. if none is given, defaults to `data_dir`.log
with dashes replacing slashes
debug (bool): whether or not to run in debug mode. If running in debug
mode, each DFLOW run returns fake data and
each RipCAS run takes cord/data/shear_out.asc as input
returns:
None
'''
# If standard run on CARC
if dflow_run_fun is None:
def dflow_fun():
import subprocess
# Send DFLOW run to the queue
return subprocess.Popen(
'qsub dflow_mpi.pbs', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Create log file is none exists
if log_f is None:
# get rid of / at begining of file path
first_char = data_dir[0]
root_log_f = first_char if first_char != '/' else ''
root_log_f += data_dir[1:].replace('/', '-')
log_f = open(root_log_f + '.log', 'w')
else:
log_f = open(log_f, 'w')
# create a list that contains the peak flows from input file
with open(peak_flows_file, 'r') as f:
l0 = f.readline().strip()
assert l0 == 'Peak.Flood', '{} not Peak.Flood'.format(l0)
peak_flows = [float(l.strip()) for l in f.readlines()]
# create a directory for global inputs
inputs_dir = os.path.join(data_dir, 'inputs')
# remove inputs directory if it already existed
if os.path.isdir(inputs_dir):
shutil.rmtree(inputs_dir)
# create inputs directory
os.mkdir(inputs_dir)
# brin all input files into the input directory
shutil.copy(initial_vegetation_map, inputs_dir)
shutil.copy(vegzone_map, inputs_dir)
shutil.copy(veg_roughness_shearres_lookup, inputs_dir)
shutil.copy(peak_flows_file, inputs_dir)
shutil.copy(geometry_file, inputs_dir)
roughness_slope_path = os.path.join(inputs_dir, 'roughness_slope.txt')
# create a text file with info on both streambed roughness and slope
with open(roughness_slope_path, 'w') as f:
f.write('roughness\tslope\n')
f.write('%s\t%s\n' % (streambed_roughness, streambed_slope))
# Iterate through all annual peak flows
for flow_idx, flow in enumerate(peak_flows):
# create a ModelRun object
mr = ModelRun()
# Run the boundary condition calculation method;
# produces upstream flow file and downstream stage file for DFLOW to use
mr.calculate_bc(
flow, geometry_file,
streambed_floodplain_roughness, streambed_slope
)
# Enter information into log file
mr_log(
log_f, 'Boundary conditions for flow index {0} finished\n'.format(
flow_idx
)
)
# Create new directory for this annual flow iteration of DFLOW
dflow_dir = os.path.join(data_dir, 'dflow-' + str(flow_idx))
# Get veg map
if flow_idx == 0:
veg_file = initial_vegetation_map
else:
# Take RipCAS outputs as DFLOW inputs from previous timestep
veg_file = os.path.join(
data_dir, 'ripcas-' + str(flow_idx - 1), 'vegetation.asc'
)
# Debug is for running on a local machine
if debug:
mr.run_dflow(dflow_dir, veg_file,
veg_roughness_shearres_lookup, streambed_roughness)
job_id = 'debug'
# If running on CARC
else:
# Send DFLOW run to CARC
p_ref = mr.run_dflow(dflow_dir, veg_file,
veg_roughness_shearres_lookup,
streambed_roughness,
dflow_run_fun=dflow_fun)
job_id = p_ref.communicate()[0].split('.')[0]
# Enter run start in log file
mr_log(log_f, 'Job ID {0} submitted for DFLOW run {1}\n'.format(
job_id, flow_idx
)
)
# check the status of the job by querying qstat; break loop when
# job no longer exists, giving nonzero poll() value
job_not_finished = True
while job_not_finished:
mr_log(
log_f,
'Job ID {0} not yet finished for DFLOW run {1}\n'.format(
job_id, flow_idx
)
)
if debug:
job_not_finished = False
else:
p = subprocess.Popen(
'qstat ' + job_id, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
poll = p.poll()
job_not_finished = poll == 0
time.sleep(600)
mr_log(
log_f, 'DFLOW run {0} finished, starting RipCAS\n'.format(
flow_idx
)
)
# Creat a directory for this annual iteration of RipCAS
ripcas_dir = os.path.join(data_dir, 'ripcas-' + str(flow_idx))
# Debug is for running on a local machine
if debug:
p = _join_data_dir('shear_out.asc')
mr.run_ripcas(vegzone_map, veg_roughness_shearres_lookup,
ripcas_dir, shear_asc=ESRIAsc(p))
else:
# if no explicit shear_asc is given, the method accesses
# the dflow_shear_output attribute. XXX TODO this method will
# need to be updated to build a shear_asc by stitching together
# the partitioned files using stitch_partitioned_output
# in cord/ripcas_dflow.py
mr.run_ripcas(vegzone_map, veg_roughness_shearres_lookup,
ripcas_dir)
# Note end of RipCAS in log file
mr_log(log_f, 'RipCAS run {0} finished\n'.format(flow_idx))
log_f.close()
def _join_data_dir(f):
'''
Join the filename, f, to the default data directory
'''
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, f)
| VirtualWatershed/CoRD | cord/modelrun.py | Python | bsd-3-clause | 22,297 | [
"NetCDF"
] | 6359f02afcdb3fba36ac282f2707865a2a09d397249303992535b81f27b7ec34 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.