text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
"""This module defines a linear response TDDFT-class.
"""
from math import sqrt
import sys
import numpy as np
from ase.units import Hartree
import _gpaw
import gpaw.mpi as mpi
MASTER = mpi.MASTER
from gpaw import debug
from gpaw.poisson import PoissonSolver
from gpaw.output import initialize_text_stream
from gpaw.lrtddft.excitation import Excitation, ExcitationList
from gpaw.lrtddft.kssingle import KSSingles
from gpaw.lrtddft.omega_matrix import OmegaMatrix
from gpaw.lrtddft.apmb import ApmB
##from gpaw.lrtddft.transition_density import TransitionDensity
from gpaw.utilities import packed_index
from gpaw.utilities.lapack import diagonalize
from gpaw.xc import XC
from gpaw.lrtddft.spectrum import spectrum
__all__ = ['LrTDDFT', 'photoabsorption_spectrum', 'spectrum']
class LrTDDFT(ExcitationList):
"""Linear Response TDDFT excitation class
Input parameters:
calculator:
the calculator object after a ground state calculation
nspins:
number of spins considered in the calculation
Note: Valid only for unpolarised ground state calculation
eps:
Minimal occupation difference for a transition (default 0.001)
istart:
First occupied state to consider
jend:
Last unoccupied state to consider
xc:
Exchange-Correlation approximation in the Kernel
derivative_level:
0: use Exc, 1: use vxc, 2: use fxc if available
filename:
read from a file
"""
def __init__(self,
calculator=None,
nspins=None,
eps=0.001,
istart=0,
jend=None,
energy_range=None,
xc=None,
derivative_level=1,
numscale=0.00001,
txt=None,
filename=None,
finegrid=2,
force_ApmB=False, # for tests
eh_comm=None # parallelization over eh-pairs
):
self.nspins = None
self.istart = None
self.jend = None
if isinstance(calculator, str):
ExcitationList.__init__(self, None, txt)
return self.read(calculator)
else:
ExcitationList.__init__(self, calculator, txt)
if filename is not None:
return self.read(filename)
self.filename = None
self.calculator = None
self.eps = None
self.xc = None
self.derivative_level = None
self.numscale = numscale
self.finegrid = finegrid
self.force_ApmB = force_ApmB
if eh_comm is None:
eh_comm = mpi.serial_comm
elif isinstance(eh_comm, (mpi.world.__class__,
mpi.serial_comm.__class__)):
# Correct type already.
pass
else:
# world should be a list of ranks:
eh_comm = mpi.world.new_communicator(np.asarray(eh_comm))
self.eh_comm = eh_comm
if calculator is not None:
calculator.converge_wave_functions()
if calculator.density.nct_G is None:
calculator.set_positions()
self.update(calculator, nspins, eps,
istart, jend, energy_range,
xc, derivative_level, numscale)
def analyse(self, what=None, out=None, min=0.1):
"""Print info about the transitions.
Parameters:
1. what: I list of excitation indicees, None means all
2. out : I where to send the output, None means sys.stdout
3. min : I minimal contribution to list (0<min<1)
"""
if what is None:
what = range(len(self))
elif isinstance(what, int):
what = [what]
if out is None:
out = sys.stdout
for i in what:
print >> out, str(i) + ':', self[i].analyse(min=min)
def update(self,
calculator=None,
nspins=None,
eps=0.001,
istart=0,
jend=None,
energy_range=None,
xc=None,
derivative_level=None,
numscale=0.001):
changed = False
if self.calculator != calculator or \
self.nspins != nspins or \
self.eps != eps or \
self.istart != istart or \
self.jend != jend :
changed = True
if not changed: return
self.calculator = calculator
self.nspins = nspins
self.eps = eps
self.istart = istart
self.jend = jend
self.xc = xc
self.derivative_level = derivative_level
self.numscale = numscale
self.kss = KSSingles(calculator=calculator,
nspins=nspins,
eps=eps,
istart=istart,
jend=jend,
energy_range=energy_range,
txt=self.txt)
if not self.force_ApmB:
Om = OmegaMatrix
name = 'LrTDDFT'
if self.xc:
xc = XC(self.xc)
if hasattr(xc, 'hybrid') and xc.hybrid > 0.0:
Om = ApmB
name = 'LrTDDFThyb'
else:
Om = ApmB
name = 'LrTDDFThyb'
self.Om = Om(self.calculator, self.kss,
self.xc, self.derivative_level, self.numscale,
finegrid=self.finegrid, eh_comm=self.eh_comm,
txt=self.txt)
self.name = name
## self.diagonalize()
def diagonalize(self, istart=None, jend=None, energy_range=None):
self.istart = istart
self.jend = jend
self.Om.diagonalize(istart, jend, energy_range)
# remove old stuff
while len(self): self.pop()
for j in range(len(self.Om.kss)):
self.append(LrTDDFTExcitation(self.Om,j))
def get_Om(self):
return self.Om
def read(self, filename=None, fh=None):
"""Read myself from a file"""
if fh is None:
if filename.endswith('.gz'):
try:
import gzip
f = gzip.open(filename)
except:
f = open(filename, 'r')
else:
f = open(filename, 'r')
self.filename = filename
else:
f = fh
self.filename = None
# get my name
s = f.readline().replace('\n','')
self.name = s.split()[1]
self.xc = f.readline().replace('\n','').split()[0]
values = f.readline().split()
self.eps = float(values[0])
if len(values) > 1:
self.derivative_level = int(values[1])
self.numscale = float(values[2])
self.finegrid = int(values[3])
else:
# old writing style, use old defaults
self.numscale = 0.001
self.kss = KSSingles(filehandle=f)
if self.name == 'LrTDDFT':
self.Om = OmegaMatrix(kss=self.kss, filehandle=f,
txt=self.txt)
else:
self.Om = ApmB(kss=self.kss, filehandle=f,
txt=self.txt)
self.Om.Kss(self.kss)
# check if already diagonalized
p = f.tell()
s = f.readline()
if s != '# Eigenvalues\n':
# go back to previous position
f.seek(p)
else:
# load the eigenvalues
n = int(f.readline().split()[0])
for i in range(n):
l = f.readline().split()
E = float(l[0])
me = [float(l[1]), float(l[2]), float(l[3])]
self.append(LrTDDFTExcitation(e=E, m=me))
if fh is None:
f.close()
# update own variables
self.istart = self.Om.fullkss.istart
self.jend = self.Om.fullkss.jend
def singlets_triplets(self):
"""Split yourself into a singlet and triplet object"""
slr = LrTDDFT(None, self.nspins, self.eps,
self.istart, self.jend, self.xc,
self.derivative_level, self.numscale)
tlr = LrTDDFT(None, self.nspins, self.eps,
self.istart, self.jend, self.xc,
self.derivative_level, self.numscale)
slr.Om, tlr.Om = self.Om.singlets_triplets()
for lr in [slr, tlr]:
lr.kss = lr.Om.fullkss
return slr, tlr
def single_pole_approximation(self, i, j):
"""Return the excitation according to the
single pole approximation. See e.g.:
Grabo et al, Theochem 501 (2000) 353-367
"""
for ij, kss in enumerate(self.kss):
if kss.i == i and kss.j == j:
return sqrt(self.Om.full[ij][ij]) * Hartree
return self.Om.full[ij][ij] / kss.energy * Hartree
def __str__(self):
string = ExcitationList.__str__(self)
string += '# derived from:\n'
string += self.kss.__str__()
return string
def write(self, filename=None, fh=None):
"""Write current state to a file.
'filename' is the filename. If the filename ends in .gz,
the file is automatically saved in compressed gzip format.
'fh' is a filehandle. This can be used to write into already
opened files.
"""
if mpi.rank == mpi.MASTER:
if fh is None:
if filename.endswith('.gz'):
try:
import gzip
f = gzip.open(filename,'wb')
except:
f = open(filename, 'w')
else:
f = open(filename, 'w')
else:
f = fh
f.write('# ' + self.name + '\n')
xc = self.xc
if xc is None: xc = 'RPA'
if self.calculator is not None:
xc += ' ' + self.calculator.get_xc_functional()
f.write(xc + '\n')
f.write('%g %d %g %d' % (self.eps, int(self.derivative_level),
self.numscale, int(self.finegrid)) + '\n')
self.kss.write(fh=f)
self.Om.write(fh=f)
if len(self):
f.write('# Eigenvalues\n')
istart = self.istart
if istart is None:
istart = self.kss.istart
jend = self.jend
if jend is None:
jend = self.kss.jend
f.write('%d %d %d'%(len(self), istart, jend) + '\n')
for ex in self:
f.write(ex.outstring())
f.write('# Eigenvectors\n')
for ex in self:
for w in ex.f:
f.write('%g '%w)
f.write('\n')
if fh is None:
f.close()
def d2Excdnsdnt(dup, ddn):
"""Second derivative of Exc polarised"""
res = [[0, 0], [0, 0]]
for ispin in range(2):
for jspin in range(2):
res[ispin][jspin]=np.zeros(dup.shape)
_gpaw.d2Excdnsdnt(dup, ddn, ispin, jspin, res[ispin][jspin])
return res
def d2Excdn2(den):
"""Second derivative of Exc unpolarised"""
res = np.zeros(den.shape)
_gpaw.d2Excdn2(den, res)
return res
class LrTDDFTExcitation(Excitation):
def __init__(self,Om=None,i=None,
e=None,m=None):
# define from the diagonalized Omega matrix
if Om is not None:
if i is None:
raise RuntimeError
ev = Om.eigenvalues[i]
if ev < 0:
# we reached an instability, mark it with a negative value
self.energy = -sqrt(-ev)
else:
self.energy = sqrt(ev)
self.f = Om.eigenvectors[i]
self.kss = Om.kss
self.me = 0.
for f,k in zip(self.f, self.kss):
self.me += f * k.me
return
# define from energy and matrix element
if e is not None:
if m is None:
raise RuntimeError
self.energy = e
self.me = m
return
raise RuntimeError
def density_change(self,paw):
"""get the density change associated with this transition"""
raise NotImplementedError
def outstring(self):
str = '%g ' % self.energy
str += ' '
for m in self.me:
str += ' %g' % m
str += '\n'
return str
def __str__(self):
m2 = np.sum(self.me * self.me)
m = sqrt(m2)
if m > 0:
me = self.me/m
else:
me = self.me
str = "<LrTDDFTExcitation> om=%g[eV] |me|=%g (%.2f,%.2f,%.2f)" % \
(self.energy * Hartree, m, me[0], me[1], me[2])
return str
def analyse(self,min=.1):
"""Return an analysis string of the excitation"""
s='E=%.3f'%(self.energy * Hartree)+' eV, f=%.3g'\
%(self.get_oscillator_strength()[0])+'\n'
def sqr(x): return x*x
spin = ['u','d']
min2 = sqr(min)
rest = np.sum(self.f**2)
for f,k in zip(self.f,self.kss):
f2 = sqr(f)
if f2>min2:
s += ' %d->%d ' % (k.i,k.j) + spin[k.pspin] + ' '
s += '%.3g \n'%f2
rest -= f2
s+=' rest=%.3g'%rest
return s
def photoabsorption_spectrum(excitation_list, spectrum_file=None,
e_min=None, e_max=None, delta_e = None,
folding='Gauss', width=0.1, comment=None):
"""Uniform absorption spectrum interface
Parameters:
================= ===================================================
``exlist`` ExcitationList
``spectrum_file`` File name for the output file, STDOUT if not given
``e_min`` min. energy, set to cover all energies if not given
``e_max`` max. energy, set to cover all energies if not given
``delta_e`` energy spacing
``energyunit`` Energy unit, default 'eV'
``folding`` Gauss (default) or Lorentz
``width`` folding width in terms of the chosen energyunit
================= ===================================================
all energies in [eV]
"""
spectrum(exlist=excitation_list, filename=spectrum_file,
emin=e_min, emax=e_max,
de=delta_e, energyunit='eV',
folding=folding, width=width,
comment=comment)
|
qsnake/gpaw
|
gpaw/lrtddft/__init__.py
|
Python
|
gpl-3.0
| 14,822
|
[
"ASE",
"GPAW"
] |
e3e72d7b386a02b245556ef6a5a8f3738f743a47193c0582eeb5c8ae8433cf30
|
#!/usr/bin/env python
## /*=========================================================================
## Program: Visualization Toolkit
## Module: HeaderTesting.py
## Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
## All rights reserved.
## See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
## =========================================================================*/
## .NAME HeaderTesting - a VTK style and validity checking utility
## .SECTION Description
## HeaderTesting is a script which checks the list of header files for
## validity based on VTK coding standard. It checks for proper super
## classes, number and style of include files, type macro, private
## copy constructor and assignment operator, broken constructors, and
## exsistence of PrintSelf method. This script should be run as a part
## of the dashboard checking of the Visualization Toolkit and related
## projects.
## .SECTION See Also
## http://www.vtk.org http://public.kitware.com/Dart/HTML/Index.shtml
## http://www.vtk.org/contribute.php#coding-standards
import sys
import re
import os
import stat
import string
# Get the path to the directory containing this script.
if __name__ == '__main__':
selfpath = os.path.abspath(sys.path[0] or os.curdir)
else:
selfpath = os.path.abspath(os.path.dirname(__file__))
# Load the list of names mangled by windows.h.
execfile(os.path.join(selfpath, 'WindowsMangleList.py'))
## If tested from dart, make sure to fix all the output strings
test_from_dart = 0
if os.environ.has_key("DART_TEST_FROM_DART"):
test_from_dart = 1
## For backward compatibility
def StringEndsWith(str1, str2):
l1 = len(str1)
l2 = len(str2)
if l1 < l2:
return 0
return (str1[(l1-l2):] == str2)
##
class TestVTKFiles:
def __init__(self):
self.FileName = ""
self.ErrorValue = 0;
self.Errors = {}
self.WarningValue = 0;
self.Warnings = {}
self.FileLines = []
self.Export = ""
self.UnnecessaryIncludes = [
"stdio.h",
"stdlib.h",
"string.h",
"iostream",
"iostream.h",
"strstream",
"strstream.h",
"fstream",
"fstream.h",
"windows.h"
]
pass
def SetExport(self, export):
self.Export = export
def Print(self, text=""):
rtext = text
if test_from_dart:
rtext = string.replace(rtext, "<", "<")
rtext = string.replace(rtext, ">", ">")
print rtext
def Error(self, error):
self.ErrorValue = 1
self.Errors[error] = 1
pass
def Warning(self, warning):
self.WarningValue = 1
self.Warnings[warning] = 1
pass
def PrintErrors(self):
if self.ErrorValue:
self.Print( )
self.Print( "There were errors:" )
for a in self.Errors.keys():
self.Print( "* %s" % a )
def PrintWarnings(self):
if self.WarningValue:
self.Print( )
self.Print( "There were warnings:" )
for a in self.Warnings.keys():
self.Print( "* %s" % a )
def TestFile(self, filename):
self.FileName = filename
self.FileLines = []
self.ClassName = ""
self.ParentName = ""
try:
file = open(filename)
self.FileLines = file.readlines()
file.close()
except:
self.Print( "Problem reading file: %s" % filename )
sys.exit(1)
return not self.CheckExclude()
def CheckExclude(self):
prefix = '// VTK-HeaderTest-Exclude:'
exclude = 0
for l in self.FileLines:
if l.startswith(prefix):
e = l[len(prefix):].strip()
if e == os.path.basename(self.FileName):
exclude += 1
else:
self.Error("Wrong exclusion: "+l.rstrip())
if exclude > 1:
self.Error("Multiple VTK-HeaderTest-Exclude lines")
return exclude > 0
def CheckIncludes(self):
count = 0
lines = []
nplines = []
unlines = []
includere = "^\s*#\s*include\s*[\"<]([^>\"]+)"
ignincludere = ".*\/\/.*"
regx = re.compile(includere)
regx1 = re.compile(ignincludere)
cc = 0
includeparent = 0
for a in self.FileLines:
line = string.strip(a)
rm = regx.match(line)
if rm and not regx1.match(line):
lines.append(" %4d: %s" % (cc, line))
file = rm.group(1)
if file == (self.ParentName + ".h"):
includeparent = 1
if not StringEndsWith(file, ".h"):
nplines.append(" %4d: %s" % (cc, line))
if file in self.UnnecessaryIncludes:
unlines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 1:
self.Print()
self.Print( "File: %s has %d includes: " %
( self.FileName, len(lines)) )
for a in lines:
self.Print( a )
self.Error("Multiple includes")
if len(nplines) > 0:
self.Print( )
self.Print( "File: %s has non-portable include(s): " % self.FileName )
for a in nplines:
self.Print( a )
self.Error("Non-portable includes")
if len(unlines) > 0:
self.Print( )
self.Print( "File: %s has unnecessary include(s): " % self.FileName )
for a in unlines:
self.Print( a )
self.Error("Unnecessary includes")
if not includeparent and self.ParentName:
self.Print()
self.Print( "File: %s does not include parent \"%s.h\"" %
( self.FileName, self.ParentName ) )
self.Error("Does not include parent")
pass
def CheckParent(self):
classre = "^class(\s+[^\s]*_EXPORT)?\s+(vtk[A-Z0-9_][^ :\n]*)\s*:\s*public\s+(vtk[^ \n\{]*)"
cname = ""
pname = ""
classlines = []
regx = re.compile(classre)
cc = 0
lastline = ""
for a in self.FileLines:
line = string.strip(a)
rm = regx.match(line)
if not rm and not cname:
rm = regx.match(lastline + line)
if rm:
export = rm.group(1)
export = string.strip(export)
cname = rm.group(2)
pname = rm.group(3)
classlines.append(" %4d: %s" % (cc, line))
if not export:
self.Print("File: %s defines 1 class with no export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Error("No export macro")
elif self.Export and self.Export != export:
self.Print("File: %s defines 1 class with wrong export macro:" % self.FileName)
self.Print(" %4d: %s" % (cc, line))
self.Print(" The export macro should be: %s" % (self.Export))
self.Error("Wrong export macro")
cc = cc + 1
lastline = a
if len(classlines) > 1:
self.Print()
self.Print( "File: %s defines %d classes: " %
(self.FileName, len(classlines)) )
for a in classlines:
self.Print( a )
self.Error("Multiple classes defined")
if len(classlines) < 1:
self.Print()
self.Print( "File: %s does not define any classes" % self.FileName )
self.Error("No class defined")
return
#self.Print( "Classname: %s ParentName: %s" % (cname, pname)
self.ClassName = cname
self.ParentName = pname
pass
def CheckTypeMacro(self):
count = 0
lines = []
oldlines = []
typere = "^\s*vtk(Abstract)?Type(Revision)*Macro\s*\(\s*(vtk[^ ,]+)\s*,\s*(vtk[^ \)]+)\s*\)\s*"
typesplitre = "^\s*vtk(Abstract)?Type(Revision)*Macro\s*\("
regx = re.compile(typere)
regxs = re.compile(typesplitre)
cc = 0
found = 0
for a in range(len(self.FileLines)):
line = string.strip(self.FileLines[a])
rm = regx.match(line)
if rm:
found = 1
if rm.group(2) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(3)
pname = rm.group(4)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
else:
# Maybe it is in two lines
rm = regxs.match(line)
if rm:
nline = line + " " + string.strip(self.FileLines[a+1])
line = string.strip(nline)
rm = regx.match(line)
if rm:
found = 1
if rm.group(2) == "Revision":
oldlines.append(" %4d: %s" % (cc, line))
cname = rm.group(3)
pname = rm.group(4)
if cname != self.ClassName or pname != self.ParentName:
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has broken type macro(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName) )
self.Error("Broken type macro")
if len(oldlines) > 0:
self.Print( "File: %s has legacy type-revision macro(s):" % self.FileName )
for a in oldlines:
self.Print( a )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName))
self.Error("Legacy style type-revision macro")
if not found:
self.Print( "File: %s does not have type macro" % self.FileName )
self.Print( "Should be:\n vtkTypeMacro(%s, %s)" %
(self.ClassName, self.ParentName))
self.Error("No type macro")
pass
def CheckForCopyAndAssignment(self):
if not self.ClassName:
return
count = 0
lines = []
oldlines = []
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*[iI]mplemented(\.)*" % ( self.ClassName, self.ClassName)
asgnoperator = "^\s*void\s*operator\s*=\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*[iI]mplemented(\.)*" % self.ClassName
#self.Print( copyoperator
regx1 = re.compile(copyoperator)
regx2 = re.compile(asgnoperator)
foundcopy = 0
foundasgn = 0
for a in self.FileLines:
line = string.strip(a)
if regx1.match(line):
foundcopy = foundcopy + 1
if regx2.match(line):
foundasgn = foundasgn + 1
lastline = ""
if foundcopy < 1:
for a in self.FileLines:
line = string.strip(a)
if regx1.match(lastline + line):
foundcopy = foundcopy + 1
lastline = a
lastline = ""
if foundasgn < 1:
for a in self.FileLines:
line = string.strip(a)
if regx2.match(lastline + line):
foundasgn = foundasgn + 1
lastline = a
if foundcopy < 1:
self.Print( "File: %s does not define copy constructor" %
self.FileName )
self.Print( "Should be:\n%s(const %s&); // Not implemented" %
(self.ClassName, self.ClassName) )
self.Error("No private copy constructor")
if foundcopy > 1:
self.Print( "File: %s defines multiple copy constructors" %
self.FileName )
self.Error("Multiple copy constructor")
if foundasgn < 1:
self.Print( "File: %s does not define assignment operator" %
self.FileName )
self.Print( "Should be:\nvoid operator=(const %s&); // Not implemented"
% self.ClassName )
self.Error("No private assignment operator")
if foundcopy > 1:
self.Print( "File: %s defines multiple assignment operators" %
self.FileName )
self.Error("Multiple assignment operators")
pass
def CheckWeirdConstructors(self):
count = 0
lines = []
oldlines = []
constructor = "^\s*%s\s*\(([^ )]*)\)" % self.ClassName
copyoperator = "^\s*%s\s*\(\s*const\s*%s\s*&\s*\)\s*;\s*\/\/\s*Not\s*implemented(\.)*" % ( self.ClassName, self.ClassName)
regx1 = re.compile(constructor)
regx2 = re.compile(copyoperator)
cc = 0
for a in self.FileLines:
line = string.strip(a)
rm = regx1.match(line)
if rm:
arg = string.strip(rm.group(1))
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has weird constructor(s):" % self.FileName )
for a in lines:
self.Print( a )
self.Print( "There should be only:\n %s();" % self.ClassName )
self.Error("Weird constructor")
pass
def CheckPrintSelf(self):
if not self.ClassName:
return
typere = "^\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
newtypere = "^\s*virtual\s*void\s*PrintSelf\s*\(\s*ostream\s*&\s*os*\s*,\s*vtkIndent\s*indent\s*\)"
regx1 = re.compile(typere)
regx2 = re.compile(newtypere)
found = 0
oldstyle = 0
for a in self.FileLines:
line = string.strip(a)
rm1 = regx1.match(line)
rm2 = regx2.match(line)
if rm1 or rm2:
found = 1
if rm1:
oldstyle = 1
if not found:
self.Print( "File: %s does not define PrintSelf method:" %
self.FileName )
self.Warning("No PrintSelf method")
pass
def CheckWindowsMangling(self):
lines = []
regx1 = WindowsMangleRegEx
regx2 = re.compile("^.*VTK_LEGACY.*$")
# This version will leave out comment lines but we probably do
# not want to refer to mangled (hopefully deprecated) methods
# in comments.
# regx2 = re.compile("^(\s*//|\s*\*|.*VTK_LEGACY).*$")
cc = 1
for a in self.FileLines:
line = string.strip(a)
rm = regx1.match(line)
if rm:
arg = string.strip(rm.group(1))
if arg and not regx2.match(line):
lines.append(" %4d: %s" % (cc, line))
cc = cc + 1
if len(lines) > 0:
self.Print( "File: %s has windows.h mangling violations:" % self.FileName )
for a in lines:
self.Print(a)
self.Error("Windows Mangling Violation - choose another name that does not conflict.")
pass
##
test = TestVTKFiles()
## Check command line arguments
if len(sys.argv) < 2:
print "Testing directory not specified..."
print "Usage: %s <directory> [ exception(s) ]" % sys.argv[0]
sys.exit(1)
dirname = sys.argv[1]
exceptions = sys.argv[2:]
if len(sys.argv) > 2:
export = sys.argv[2]
if export[:3] == "VTK" and export[len(export)-len("EXPORT"):] == "EXPORT":
print "Use export macro: %s" % export
exceptions = sys.argv[3:]
test.SetExport(export)
## Traverse through the list of files
for a in os.listdir(dirname):
## Skip non-header files
if not StringEndsWith(a, ".h"):
continue
## Skip non-vtk files
if not a.startswith('vtk'):
continue
## Skip exceptions
if a in exceptions:
continue
pathname = '%s/%s' % (dirname, a)
if pathname in exceptions:
continue
mode = os.stat(pathname)[stat.ST_MODE]
## Skip directories
if stat.S_ISDIR(mode):
continue
elif stat.S_ISREG(mode) and test.TestFile(pathname):
## Do all the tests
test.CheckParent()
test.CheckIncludes()
test.CheckTypeMacro()
test.CheckForCopyAndAssignment()
test.CheckWeirdConstructors()
test.CheckPrintSelf()
test.CheckWindowsMangling()
## Summarize errors
test.PrintWarnings()
test.PrintErrors()
sys.exit(test.ErrorValue)
|
ashray/VTK-EVM
|
Testing/Core/HeaderTesting.py
|
Python
|
bsd-3-clause
| 17,257
|
[
"VTK"
] |
e0b3befc5f5cb81e4cbaee4bf8840f7e42cdafde03f6f6a03fc1514e3bde7308
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
"""
Helper methods for generating gw input / and work flows.
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "mjvansetten@gmail.com"
__date__ = "May 2014"
import time
import os
import ast
import copy
import math
import shutil
import numpy as np
from pymatgen.core.units import eV_to_Ha
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.standard_transformations import OxidationStateRemovalTransformation, \
PrimitiveCellTransformation, SupercellTransformation
def now():
"""
helper to return a time string
"""
return time.strftime("%H:%M:%S %d/%m/%Y")
def read_extra_abivars():
ea = {}
if os.path.isfile('extra_abivars'):
f = open('extra_abivars')
ea = ast.literal_eval(f.read())
if not isinstance(ea, dict):
raise RuntimeError
return ea
def refine_structure(structure, symprec=1e-3):
remove_ox = OxidationStateRemovalTransformation()
structure = remove_ox.apply_transformation(structure)
sym_finder = SpacegroupAnalyzer(structure=structure, symprec=symprec)
structure = sym_finder.get_refined_structure()
get_prim = PrimitiveCellTransformation()
structure = get_prim.apply_transformation(structure)
m = structure.lattice.matrix
x_prod = np.dot(np.cross(m[0], m[1]), m[2])
if x_prod < 0:
print(x_prod)
trans = SupercellTransformation(((1, 0, 0), (0, 0, 1), (0, 1, 0)))
structure = trans.apply_transformation(structure)
m = structure.lattice.matrix
x_prod = np.dot(np.cross(m[0], m[1]), m[2])
print(x_prod)
if x_prod < 0:
raise RuntimeError
return structure
def s_name(structure):
if os.path.isfile('old'):
name_ = str(structure.composition.reduced_formula)
else:
name_ = str(structure.composition.reduced_formula) + '_' + str(structure.item)
return name_
def clean(some_string, uppercase=False):
"""
helper to clean up an input string
"""
if uppercase:
return some_string.strip().upper()
else:
return some_string.strip().lower()
def expand(tests, level):
from abipy.gw.codeinterfaces import get_all_ecuteps, get_all_nbands
new_tests = copy.deepcopy(tests)
for test in tests.keys():
if test in get_all_ecuteps():
ec = str(test)
ec_range = tests[ec]['test_range']
ec_step = ec_range[-1] - ec_range[-2]
if int(level / 2) == level / 2:
print('new ec wedge')
# even level of grid extension > new ec wedge
new_ec_range = (ec_range[-1] + int(level / 2 * ec_step),)
else:
print('new nb wedge')
# odd level of grid extension > new nb wedge
extension = tuple(range(ec_range[-1] + ec_step, ec_range[-1] + (1 + int((level - 1) / 2)) * ec_step, ec_step))
new_ec_range = ec_range + extension
new_tests[ec].update({'test_range': new_ec_range})
if test in get_all_nbands():
nb = str(test)
nb_range = tests[nb]['test_range']
nb_step = nb_range[-1] - nb_range[-2]
print(nb_step)
if int(level / 2) == level / 2:
# even level of grid extension > new ec wedge
extension = tuple(range(nb_range[-1] + nb_step, nb_range[-1] + (1 + int(level / 2)) * nb_step, nb_step))
new_nb_range = nb_range + extension
else:
# odd level of grid extension > new nb wedge
new_nb_range = (nb_range[-1] + int((level + 1) / 2 * nb_step),)
new_tests[nb].update({'test_range': new_nb_range})
print(new_tests)
return new_tests
def print_gnuplot_header(filename, title='', mode='convplot', filetype='jpeg'):
xl = 'set xlabel "nbands"\n'
yl = 'set ylabel "ecuteps (eV)"\n'
zl = 'set zlabel "gap (eV)"\n'
if mode == 'convplot':
f = open(filename, mode='a')
if filetype is not None:
f.write('set terminal '+filetype+'\n')
f.write('set title "'+title+'"\n')
f.write(xl)
f.write(yl)
f.write(zl)
f.close()
def read_grid_from_file(filename):
"""
Read the results of a full set of calculations from file
"""
try:
f = open(filename, mode='r')
full_res = ast.literal_eval(f.read())
f.close()
except SyntaxError:
print('Problems reading ', filename)
full_res = {'grid': 0, 'all_done': False}
except (OSError, IOError):
full_res = {'grid': 0, 'all_done': False}
return full_res
def is_converged(hartree_parameters, structure, return_values=False):
filename = s_name(structure) + ".conv_res"
to_return = {}
try:
f = open(filename, mode='r')
conv_res = ast.literal_eval(f.read())
f.close()
converged = True if True in conv_res['control'].values() else False
except (IOError, OSError, ValueError):
if return_values:
print('Inputfile ', filename, ' not found, the convergence calculation did not finish properly' \
' or was not parsed ...')
converged = False
return converged
if return_values and converged:
if hartree_parameters:
try:
conv_res['values']['ecut'] = 4 * math.ceil(conv_res['values']['ecut'] * eV_to_Ha / 4)
except (KeyError, ArithmeticError, FloatingPointError, SyntaxError):
pass
try:
conv_res['values']['ecuteps'] = 4 * math.ceil(conv_res['values']['ecuteps'] * eV_to_Ha / 4)
except (KeyError, ArithmeticError, FloatingPointError, SyntaxError):
pass
for k in conv_res['values'].keys():
if conv_res['values'][k] != 0 and conv_res['values'][k] != np.inf:
to_return.update({k: conv_res['values'][k]})
return to_return
else:
return converged
def store_conv_results(name, folder):
print("| Storing results for %s" % name)
if not os.path.isdir(folder):
os.mkdir(folder)
shutil.copy(name+'.full_res', os.path.join(folder, name+'.full_res'))
for data_file in ['conv_res', 'log', 'conv.log', 'str', 'fitdat', 'convdat', 'data']:
try:
os.rename(name+'.'+data_file, os.path.join(folder, name+'.'+data_file))
except OSError:
pass
def add_gg_gap(structure):
structure.vbm_l = "G"
structure.cbm_l = "G"
structure.cbm = (0.0, 0.0, 0.0)
structure.vbm = (0.0, 0.0, 0.0)
return structure
|
rousseab/pymatgen
|
pymatgen/io/abinitio/helpers.py
|
Python
|
mit
| 6,823
|
[
"pymatgen"
] |
fcd0f0618054997c7ea2520c48397d59efe3e14a5f76d0b16446f23381f92fa3
|
## \example em/pdb2density.py
# A simple example showing how to simulate density from a protein.
# IMP uses a Gaussian smoothing kernel. see SampledDensityMap::resample for documentation.
#
import IMP.em
import IMP.core
import IMP.atom
import sys
IMP.setup_from_argv(sys.argv, "pdb2density")
m = IMP.Model()
# read protein
sel = IMP.atom.NonWaterPDBSelector()
mh = IMP.atom.read_pdb(IMP.em.get_example_path("input.pdb"), m, sel)
# add radius info to each atom, otherwise the resampling would fail.
IMP.atom.add_radii(mh)
ps = IMP.core.get_leaves(mh)
# decide on resolution and spacing you would like to simulate to
resolution = 10.
apix = 1.5
dmap = IMP.em.particles2density(ps, resolution, apix)
# write out the map in the favorite format (xplor, mrc, em and spider are
# supported)
IMP.em.write_map(dmap, "example.mrc", IMP.em.MRCReaderWriter())
|
shanot/imp
|
modules/em/examples/pdb2density.py
|
Python
|
gpl-3.0
| 851
|
[
"Gaussian"
] |
bffb4288bf22348151913d852d2e2ba19cdb7f32727b883b1ecfff8f6f0e1680
|
import ckan.plugins as plugins
import ckan.plugins.toolkit as toolkit
import ckanext.thredds.logic.action as action
from ckanext.thredds import helpers
import ckan.logic.validators as val
from pylons import config
import urllib
import json
import datetime
class ThreddsPlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IConfigurer)
plugins.implements(plugins.IResourceView, inherit=True)
plugins.implements(plugins.IRoutes, inherit=True)
plugins.implements(plugins.IActions)
plugins.implements(plugins.ITemplateHelpers)
plugins.implements(plugins.IPackageController, inherit=True)
# IConfigurer
def update_config(self, config_):
toolkit.add_template_directory(config_, 'templates')
toolkit.add_public_directory(config_, 'public')
toolkit.add_resource('fanstatic', 'thredds')
#toolkit.add_resource('public', 'thredds-public')
## IResourceView
def info(self):
return {'name': 'thredds_wms_view',
'title': plugins.toolkit._('Thredds WMS'),
'icon': 'globe',
'iframed': False,
'requires_datastore': False,
'default_title': plugins.toolkit._('View'),
'preview_enabled':True,
'schema': {
#Anja,20.6.2018: Validators do not work correctly: Do not allow float although they are supposed to do
#'minimum': [toolkit.get_validator('ignore_empty'), val.natural_number_validator],
#'maximum': [toolkit.get_validator('ignore_empty'), val.natural_number_validator],
'minimum': [toolkit.get_validator('ignore_empty')],
'maximum': [toolkit.get_validator('ignore_empty')],
'num_colorbands': [toolkit.get_validator('ignore_empty'), val.is_positive_integer],
'logscale': [toolkit.get_validator('ignore_empty'), val.boolean_validator],
'default_layer': [toolkit.get_validator('ignore_empty')],
'default_level': [toolkit.get_validator('ignore_empty')],
'default_colormap': [toolkit.get_validator('ignore_empty')]
}
}
def can_view(self, data_dict):
resource = data_dict['resource']
format_lower = resource.get('format', '').lower()
if 'netcdf' in format_lower:
return True
else:
return False
def view_template(self, context, data_dict):
return 'wms_view.html'
def form_template(self, context, data_dict):
return 'wms_form.html'
def setup_template_variables(self, context, data_dict):
"""Setup variables available to templates"""
resource_id = data_dict['resource']['id']
resource = data_dict['resource']
#For subset
subset_params =''
#Anja 27.6.18 : Adapt_view to spatial extend
if data_dict['package']['spatial']:
spatial_params = data_dict['package']['spatial']
else:
spatial_params = ''
# Check subset
if '/subset/' in resource['url']:
#Get original resource id
package = data_dict['package']
is_part_of_id = [d for d in package['relations'] if d['relation'] == 'is_part_of']
if is_part_of_id:
try:
variables = str(','.join([var['name'] for var in package['variables']]))
except:
h.flash_error('Thredds View was not possible as the variables of the package are not defined correctly.')
redirect(h.url_for(controller='package', action='resource_read',
id=resource['package_id'], resource_id=resource['id']))
is_part_of_pkg = toolkit.get_action('package_show')(context, {'id': is_part_of_id[0]['id']})
# get netcdf resource id from parent
netcdf_resource = [res['id'] for res in is_part_of_pkg['resources'] if 'netcdf' in res['format'].lower()]
if netcdf_resource:
resource_id = netcdf_resource[0]
subset_params = helpers.get_query_params(package)
spatial_params = package['spatial']
if 'time_end' in subset_params and subset_params['time_end']: # Anja 14.6.18: Time not in Signal Change Indices
#End date will be excluded therefore increment it by one
corrected_end_time = subset_params['time_end']
date = datetime.datetime.strptime(corrected_end_time, '%Y-%m-%dT%H:%M:%S')
date += datetime.timedelta(days=1)
subset_params['time_end'] = str(date).replace(' ', 'T')
#Add Z ...
if not subset_params['time_start'].endswith('Z'):
subset_params['time_start'] = subset_params['time_start'] + 'Z'
if not subset_params['time_end'].endswith('Z'):
subset_params['time_end'] = subset_params['time_end'] + 'Z'
else: # this should not happen
subset_params ={}
subset_params['var'] = variables
spatial_params = package['spatial']
'''
IMPORTANT: The following After subset check of course
'''
#Anja, 5.7.2018 - check Vertical level; currently (July 2018) only pressure
meta_data = {}
try:
meta_data = toolkit.get_action('thredds_get_metadata_info')(context, {'id': resource_id})
except:
pass
#print json.dumps(meta_data,indent=4)
#Anja, 5.7 - drop down for layers
layers = []
try:
layers = toolkit.get_action('thredds_get_layers')(context, {'id': resource_id})
except:
pass
#print json.dumps(layers,indent=4)
if len(layers)>0:
if 'children' in layers[0]:
templ_layers = layers[0]['children']
else:
templ_layers = []
else:
templ_layers = []
if ('temporal_end' in meta_data):
time_included = 'True'
else:
time_included = 'False'
vertical_data ={}
vertical_data['name'] =''
vertical_data['values']=''
vertical_data['units']=''
if ('dimensions' in meta_data) and (len(meta_data['dimensions'])) > 3:
for dim in meta_data['dimensions']:
if dim['name'].lower()== "pressure":
vertical_data['name'] = dim['name']
# Create Select list
select_list= []
for v in dim['values']:
item={}
item['name'] = v
item['value'] = v
select_list.append(item)
vertical_data['values'] = select_list
vertical_data['units'] = dim['units']
tpl_variables = {
'resource_id': resource_id,
'subset_params' : subset_params,
'spatial_params' : spatial_params,
'vertical_data': vertical_data,
'vertical_level': data_dict['resource_view'].get('vertical_level', ''),
'default_level': data_dict['resource_view'].get('default_level', ''),
'minimum': data_dict['resource_view'].get('minimum', ''),
'maximum': data_dict['resource_view'].get('maximum', ''),
'num_colorbands': data_dict['resource_view'].get('num_colorbands', ''),
'logscale': data_dict['resource_view'].get('logscale', ''),
'layers':templ_layers,
'default_layer': data_dict['resource_view'].get('default_layer', ''),
'default_colormap': data_dict['resource_view'].get('default_colormap', ''),
'time_included': time_included
}
return tpl_variables
# IRoutes
def before_map(self, map):
# image upload
map.connect('thredds', '/thredds/{service}/{catalog}/{res_id_1}/{res_id_2}/{res_id_3}',
controller='ckanext.thredds.controllers.proxy:ThreddsProxyController',
action='tds_proxy')
map.connect('thredds', '/thredds/{service}/{catalog}/{res_id_1}/{res_id_2}/{res_id_3}/{extra}',
controller='ckanext.thredds.controllers.proxy:ThreddsProxyController',
action='tds_proxy')
map.connect('subset_create', '/subset/{resource_id}/create',
controller='ckanext.thredds.controllers.subset:SubsetController',
action='subset_create')
map.connect('subset_download', '/subset/{resource_id}/download',
controller='ckanext.thredds.controllers.subset:SubsetController',
action='subset_download')
map.connect('subset_get', '/subset/{resource_id}/get/{location}/{file_type}',
controller='ckanext.thredds.controllers.subset:SubsetController',
action='subset_get')
return map
# IActions
def get_actions(self):
actions = {'thredds_get_layers': action.thredds_get_layers,
'thredds_get_layerdetails': action.thredds_get_layerdetails,
'subset_create': action.subset_create,
'thredds_get_metadata_info': action.thredds_get_metadata_info,
'thredds_get_minmax': action.thredds_get_minmax}
return actions
# ITemplateHelpers
def get_helpers(self):
return {
'get_parent_dataset': helpers.get_parent_dataset,
'get_public_children_datasets': helpers.get_public_children_datasets,
'check_subset_uniqueness': helpers.check_subset_uniqueness,
'get_queries_from_user': helpers.get_queries_from_user,
'get_query_params': helpers.get_query_params,
'check_if_res_can_create_subset': helpers.check_if_res_can_create_subset,
'get_current_datetime': helpers.get_current_datetime,
'spatial_to_coordinates': helpers.spatial_to_coordinates
}
# IPackageController
def after_show(self, context, data_dict):
# Fix for relationship problem
data_dict.pop('relationships_as_object', None)
data_dict.pop('relationships_as_subject', None)
|
ccca-dc/ckanext-thredds
|
ckanext/thredds/plugin.py
|
Python
|
gpl-3.0
| 10,660
|
[
"NetCDF"
] |
57030de8c1062210a166a6e5f9222165b37c2f1c618cab72aa42bf2bada7eaa5
|
# Copyright (C) 2010-2014, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT (see COPYING file)
import numpy as np
from . import _convolve
from . import morph
from .internal import _get_output, _normalize_sequence, _verify_is_floatingpoint_type, _as_floating_point_array
from ._filters import mode2int, modes, _check_mode
__all__ = [
'convolve',
'convolve1d',
'daubechies',
'idaubechies',
'haar',
'ihaar',
'median_filter',
'rank_filter',
'template_match',
'gaussian_filter1d',
'gaussian_filter',
'wavelet_center',
'wavelet_decenter',
]
def convolve(f, weights, mode='reflect', cval=0.0, out=None, output=None):
'''
convolved = convolve(f, weights, mode='reflect', cval=0.0, out={new array})
Convolution of `f` and `weights`
Convolution is performed in `doubles` to avoid over/underflow, but the
result is then cast to `f.dtype`.
Parameters
----------
f : ndarray
input. Any dimension is supported
weights : ndarray
weight filter. If not of the same dtype as `f`, it is cast
mode : {'reflect' [default], 'nearest', 'wrap', 'mirror', 'constant', 'ignore'}
How to handle borders
cval : double, optional
If `mode` is constant, which constant to use (default: 0.0)
out : ndarray, optional
Output array. Must have same shape and dtype as `f` as well as be
C-contiguous.
Returns
-------
convolved : ndarray of same dtype as `f`
'''
if f.dtype != weights.dtype:
weights = weights.astype(f.dtype)
if f.ndim != weights.ndim:
raise ValueError('mahotas.convolve: `f` and `weights` must have the same dimensions')
output = _get_output(f, out, 'convolve', output=output)
_check_mode(mode, cval, 'convolve')
return _convolve.convolve(f, weights, output, mode2int[mode])
def convolve1d(f, weights, axis, mode='reflect', cval=0., out=None, output=None):
'''
convolved = convolve1d(f, weights, axis, mode='reflect', cval=0.0, out={new array})
Convolution of `f` and `weights` along axis `axis`.
Convolution is performed in `doubles` to avoid over/underflow, but the
result is then cast to `f.dtype`.
Parameters
----------
f : ndarray
input. Any dimension is supported
weights : 1-D ndarray
weight filter. If not of the same dtype as `f`, it is cast
axis : int
Axis along which to convolve
mode : {'reflect' [default], 'nearest', 'wrap', 'mirror', 'constant', 'ignore'}
How to handle borders
cval : double, optional
If `mode` is constant, which constant to use (default: 0.0)
out : ndarray, optional
Output array. Must have same shape and dtype as `f` as well as be
C-contiguous.
Returns
-------
convolved : ndarray of same dtype as `f`
See Also
--------
convolve : function
generic convolution
'''
weights = np.asanyarray(weights)
weights = weights.squeeze()
if weights.ndim != 1:
raise ValueError('mahotas.convolve1d: only 1-D sequences allowed')
_check_mode(mode, cval, 'convolve1d')
if f.flags.contiguous and len(weights) < f.shape[axis]:
weights = weights.astype(np.double)
indices = [a for a in range(f.ndim) if a != axis] + [axis]
rindices = [indices.index(a) for a in range(f.ndim)]
oshape = f.shape
f = f.transpose(indices)
tshape = f.shape
f = f.reshape((f.shape[0],-1))
out = _get_output(f, out, 'convolve1d')
_convolve.convolve1d(f, weights, out, mode2int[mode])
out = out.reshape(tshape)
out = out.transpose(rindices)
out = out.reshape(oshape)
return out
else:
index = [None] * f.ndim
index[axis] = slice(0, None)
weights = weights[tuple(index)]
return convolve(f, weights, mode=mode, cval=cval, output=output)
def median_filter(f, Bc=None, mode='reflect', cval=0.0, out=None, output=None):
'''
median = median_filter(f, Bc={square}, mode='reflect', cval=0.0, out={np.empty(f.shape, f.dtype})
Median filter
Parameters
----------
f : ndarray
input. Any dimension is supported
Bc : ndarray or int, optional
Defines the neighbourhood, default is a square of side 3.
mode : {'reflect' [default], 'nearest', 'wrap', 'mirror', 'constant', 'ignore'}
How to handle borders
cval : double, optional
If `mode` is constant, which constant to use (default: 0.0)
out : ndarray, optional
Output array. Must have same shape and dtype as `f` as well as be
C-contiguous.
Returns
-------
median : ndarray of same type and shape as ``f``
median[i,j] is the median value of the points in f close to (i,j)
'''
if Bc is None:
Bc = np.ones((3,) * len(f.shape), f.dtype)
elif f.dtype != Bc.dtype:
Bc = Bc.astype(f.dtype)
rank = Bc.sum()//2
output = _get_output(f, out, 'median_filter', output=output)
_check_mode(mode, cval, 'median_filter')
return _convolve.rank_filter(f, Bc, output, int(rank), mode2int[mode])
def rank_filter(f, Bc, rank, mode='reflect', cval=0.0, out=None, output=None):
'''
ranked = rank_filter(f, Bc, rank, mode='reflect', cval=0.0, out=None)
Rank filter. The value at ``ranked[i,j]`` will be the ``rank``th largest in
the neighbourhood defined by ``Bc``.
Parameters
----------
f : ndarray
input. Any dimension is supported
Bc : ndarray
Defines the neighbourhood. Must be explicitly passed, no default.
rank : integer
mode : {'reflect' [default], 'nearest', 'wrap', 'mirror', 'constant', 'ignore'}
How to handle borders
cval : double, optional
If `mode` is constant, which constant to use (default: 0.0)
out : ndarray, optional
Output array. Must have same shape and dtype as `f` as well as be
C-contiguous.
Returns
-------
ranked : ndarray of same type and shape as ``f``
ranked[i,j] is the ``rank``th value of the points in f close to (i,j)
See Also
--------
median_filter : A special case of rank_filter
'''
Bc = morph.get_structuring_elem(f, Bc)
output = _get_output(f, out, 'rank_filter', output=output)
_check_mode(mode, cval, 'rank_filter')
return _convolve.rank_filter(f, Bc, output, rank, mode2int[mode])
def template_match(f, template, mode='reflect', cval=0., out=None, output=None):
'''
match = template_match(f, template, mode='reflect', cval=0., out={np.empty_like(f)})
Match template.
The value at ``match[i,j]`` will be the difference (in squared euclidean
terms), between `template` and a same sized window on `f` centered on that
point.
Parameters
----------
f : ndarray
input. Any dimension is supported
template : ndarray
Template to match. Must be explicitly passed, no default.
mode : {'reflect' [default], 'nearest', 'wrap', 'mirror', 'constant', 'ignore'}
How to handle borders
cval : double, optional
If `mode` is constant, which constant to use (default: 0.0)
out : ndarray, optional
Output array. Must have same shape and dtype as `f` as well as be
C-contiguous.
Returns
-------
match : ndarray of same type and shape as ``f``
match[i,j] is the squared euclidean distance between
``f[i-s0:i+s0,j-s1:j+s1]`` and ``template`` (for appropriately defined
``s0`` and ``s1``).
'''
template = template.astype(f.dtype)
output = _get_output(f, out, 'template_match', output=output)
_check_mode(mode, cval, 'template_match')
return _convolve.template_match(f, template, output, mode2int[mode])
def gaussian_filter1d(array, sigma, axis=-1, order=0, mode='reflect', cval=0., out=None, output=None):
"""
filtered = gaussian_filter1d(array, sigma, axis=-1, order=0, mode='reflect', cval=0., out={np.empty_like(array)})
One-dimensional Gaussian filter.
Parameters
----------
array : ndarray
input array of a floating-point type
sigma : float
standard deviation for Gaussian kernel (in pixel units)
axis : int, optional
axis to operate on
order : {0, 1, 2, 3}, optional
An order of 0 corresponds to convolution with a Gaussian
kernel. An order of 1, 2, or 3 corresponds to convolution with
the first, second or third derivatives of a Gaussian. Higher
order derivatives are not implemented
mode : {'reflect' [default], 'nearest', 'wrap', 'mirror', 'constant', 'ignore'}
How to handle borders
cval : double, optional
If `mode` is constant, which constant to use (default: 0.0)
out : ndarray, optional
Output array. Must have same shape and dtype as `array` as well as be
C-contiguous.
Returns
-------
filtered : ndarray
Filtered version of `array`
"""
_verify_is_floatingpoint_type(array, 'gaussian_filter1d')
sigma = float(sigma)
s2 = sigma*sigma
# make the length of the filter equal to 4 times the standard
# deviations:
lw = int(4.0 * sigma + 0.5)
if lw <= 0:
raise ValueError('mahotas.gaussian_filter1d: sigma must be greater or equal to 0.125 [1/8]')
x = np.arange(2*lw+1, dtype=float)
x -= lw
weights = np.exp(x*x/(-2.*s2))
weights /= np.sum(weights)
# implement first, second and third order derivatives:
if order == 0:
pass
elif order == 1 : # first derivative
weights *= -x/s2
elif order == 2: # second derivative
weights *= (x*x/s2-1.)/s2
elif order == 3: # third derivative
weights *= (3.0 - x*x/s2)*x/(s2*s2)
else:
raise ValueError('mahotas.convolve.gaussian_filter1d: Order outside 0..3 not implemented')
return convolve1d(array, weights, axis, mode, cval, out=output)
def gaussian_filter(array, sigma, order=0, mode='reflect', cval=0., out=None, output=None):
"""
filtered = gaussian_filter(array, sigma, order=0, mode='reflect', cval=0., out={np.empty_like(array)})
Multi-dimensional Gaussian filter.
Parameters
----------
array : ndarray
input array, any dimension is supported. If the array is an integer
array, it will be converted to a double array.
sigma : scalar or sequence of scalars
standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
order : {0, 1, 2, 3} or sequence from same set, optional
The order of the filter along each axis is given as a sequence
of integers, or as a single number. An order of 0 corresponds
to convolution with a Gaussian kernel. An order of 1, 2, or 3
corresponds to convolution with the first, second or third
derivatives of a Gaussian. Higher order derivatives are not
implemented
mode : {'reflect' [default], 'nearest', 'wrap', 'mirror', 'constant', 'ignore'}
How to handle borders
cval : double, optional
If `mode` is constant, which constant to use (default: 0.0)
out : ndarray, optional
Output array. Must have same shape as `array` as well as be
C-contiguous. If `array` is an integer array, this must be a double
array; otherwise, it must have the same type as `array`.
Returns
-------
filtered : ndarray
Filtered version of `array`
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
"""
array = _as_floating_point_array(array)
output = _get_output(array, out, 'gaussian_filter', output=output)
orders = _normalize_sequence(array, order, 'gaussian_filter')
sigmas = _normalize_sequence(array, sigma, 'gaussian_filter')
output[...] = array[...]
noutput = None
for axis in range(array.ndim):
sigma = sigmas[axis]
order = orders[axis]
noutput = gaussian_filter1d(output, sigma, axis, order, mode, cval, noutput)
output,noutput = noutput,output
return output
def _wavelet_array(f, inline, func):
f = _as_floating_point_array(f)
if f.ndim != 2:
raise ValueError('mahotas.convolve.%s: Only works for 2D images' % func)
if not inline:
return f.copy()
return f
def _wavelet_center_compute(oshape, border=0, dtype=None, cval=0.0):
for c in range(1, 16+border):
nshape = 2**(np.floor(np.log2(oshape))+c)
delta = nshape - oshape
delta //= 2
if np.min(delta) <= border:
continue
position = []
for d,e in zip(delta, oshape):
position.append( slice(d, d + e) )
return nshape, position
def wavelet_center(f, border=0, dtype=float, cval=0.0):
'''
fc = wavelet_center(f, border=0, dtype=float, cval=0.0)
``fc`` is a centered version of ``f`` with a shape that is composed of
powers of 2.
Parameters
----------
f : ndarray
input image
border : int, optional
The border to use (default is no border)
dtype : type, optional
Type of ``fc``
cval : float, optional
Which value to fill the border with (default is 0)
Returns
-------
fc : ndarray
See Also
--------
wavelet_decenter : function
Reverse function
'''
nshape, position = _wavelet_center_compute(f.shape, border)
nimage = np.zeros(nshape, dtype=dtype)
nimage += cval
nimage[position] = f
return nimage
def wavelet_decenter(w, oshape, border=0):
'''
f = wavelet_decenter(w, oshape, border=0)
Undoes the effect of ``wavelet_center``
Parameters
----------
w : ndarray
Wavelet array
oshape : tuple
Desired shape
border : int, optional
The desired border. This **must** be the same value as was used for
``wavelet_center``
Returns
-------
f : ndarray
This will have shape ``oshape``
See Also
--------
wavelet_center : function
Forward function
'''
nshape, position = _wavelet_center_compute(oshape, border)
return w[position]
def haar(f, preserve_energy=True, inline=False):
'''
t = haar(f, preserve_energy=True, inline=False)
Haar transform
Parameters
----------
f : 2-D ndarray
Input image
preserve_energy : bool, optional
Whether to normalise the result so that energy is preserved (the
default).
inline : bool, optional
Whether to write the results to the input image. By default, a new
image is returned. Integer images are always converted to floating
point and copied.
See Also
--------
ihaar : function
Reverse Haar transform
'''
f = _wavelet_array(f, inline, 'haar')
_convolve.haar(f)
_convolve.haar(f.T)
if preserve_energy:
f /= 2.0
return f
_daubechies_codes = [('D%s' % ci) for ci in range(2,21,2)]
def _daubechies_code(c):
try:
return _daubechies_codes.index(c)
except:
raise ValueError('mahotas.convolve: Known daubechies codes are {0}. You passed in {1}.'.format(_daubechies_codes, c))
def daubechies(f, code, inline=False):
'''
filtered = daubechies(f, code, inline=False)
Daubechies wavelet transform
This function works best if the image sizes are powers of 2!
Parameters
----------
f : ndarray
2-D image
code : str
One of 'D2', 'D4', ... 'D20'
inline : bool, optional
Whether to write the results to the input image. By default, a new
image is returned. Integer images are always converted to floating
point and copied.
See Also
--------
haar : function
Haar transform (equivalent to D2)
'''
f = _wavelet_array(f, inline, 'daubechies')
code = _daubechies_code(code)
_convolve.daubechies(f, code)
_convolve.daubechies(f.T, code)
return f
def idaubechies(f, code, inline=False):
'''
rfiltered = idaubechies(f, code, inline=False)
Daubechies wavelet inverse transform
Parameters
----------
f : ndarray
2-D image
code : str
One of 'D2', 'D4', ... 'D20'
inline : bool, optional
Whether to write the results to the input image. By default, a new
image is returned. Integer images are always converted to floating
point and copied.
See Also
--------
haar : function
Haar transform (equivalent to D2)
'''
f = _wavelet_array(f, inline, 'idaubechies')
code = _daubechies_code(code)
_convolve.idaubechies(f.T, code)
_convolve.idaubechies(f, code)
return f
def ihaar(f, preserve_energy=True, inline=False):
'''
t = ihaar(f, preserve_energy=True, inline=False)
Reverse Haar transform
``ihaar(haar(f))`` is more or less equal to ``f`` (equal, except for
possible rounding issues).
Parameters
----------
f : 2-D ndarray
Input image. If it is an integer image, it is converted to floating
point (double).
preserve_energy : bool, optional
Whether to normalise the result so that energy is preserved (the
default).
inline : bool, optional
Whether to write the results to the input image. By default, a new
image is returned. Integer images are always converted to floating
point and copied.
Returns
-------
f : ndarray
See Also
--------
haar : function
Forward Haar transform
'''
f = _wavelet_array(f, inline, 'ihaar')
_convolve.ihaar(f)
_convolve.ihaar(f.T)
if preserve_energy:
f *= 2.0
return f
|
fabianvaccaro/pygums
|
pythonLibs/mahotas-1.1.0/mahotas/convolve.py
|
Python
|
gpl-2.0
| 18,219
|
[
"Gaussian"
] |
7928c5721e3ba6cb7ba0f56dc1074250bbdaefb161ccb263005171328fa8ba63
|
"""Functions to plot epochs data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from collections import deque
from functools import partial
import numpy as np
from ..utils import create_chunks, verbose
from ..io.pick import pick_types, channel_type
from ..fixes import Counter
from ..time_frequency import compute_epochs_psd
from .utils import tight_layout, _prepare_trellis, figure_nobar
from ..defaults import _handle_default
def plot_image_epochs(epochs, picks=None, sigma=0.3, vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r'):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs
picks : int | array-like of int | None
The indices of the channels to consider. If None, all good
data channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers
colorbar : bool
Display or not a colorbar
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15)`
cmap : matplotlib colormap
Colormap.
Returns
-------
figs : the list of matplotlib figures
One figure per channel displayed
"""
from scipy import ndimage
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
if set(units.keys()) != set(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
picks = np.atleast_1d(picks)
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
this_fig = plt.figure()
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None:
this_data = this_data[this_order]
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
im = plt.imshow(this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
0, len(data)],
aspect='auto', origin='lower',
vmin=vmin, vmax=vmax, cmap=cmap)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel('Epochs')
ax1.axis('auto')
ax1.axis('tight')
ax1.axvline(0, color='m', linewidth=3, linestyle='--')
ax2.plot(1e3 * evoked.times, scalings[ch_type] * evoked.data[i])
ax2.set_xlabel('Time (ms)')
ax2.set_ylabel(units[ch_type])
ax2.set_ylim([vmin, vmax])
ax2.axvline(0, color='m', linewidth=3, linestyle='--')
if colorbar:
plt.colorbar(im, cax=ax3)
tight_layout(fig=this_fig)
if show:
plt.show()
return figs
def _drop_log_stats(drop_log, ignore=['IGNORED']):
"""
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
ignore : list
The drop reasons to ignore.
Returns
-------
perc : float
Total percentage of epochs dropped.
"""
# XXX: This function should be moved to epochs.py after
# removal of perc return parameter in plot_drop_log()
if not isinstance(drop_log, list) or not isinstance(drop_log[0], list):
raise ValueError('drop_log must be a list of lists')
perc = 100 * np.mean([len(d) > 0 for d in drop_log
if not any(r in ignore for r in d)])
return perc
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=['IGNORED'],
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
perc = _drop_log_stats(drop_log, ignore)
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
fig = plt.figure()
if perc < threshold or len(ch_names) == 0:
plt.text(0, 0, 'No drops')
return fig
counts = 100 * np.array(list(scores.values()), dtype=float) / len(drop_log)
n_plot = min(n_max_plot, len(ch_names))
order = np.flipud(np.argsort(counts))
plt.title('%s: %0.1f%%' % (subject, perc))
x = np.arange(n_plot)
plt.bar(x, counts[order[:n_plot]], color=color, width=width)
plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
horizontalalignment='right')
plt.tick_params(axis='x', which='major', labelsize=10)
plt.ylabel('% of epochs rejected')
plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
plt.grid(True, axis='y')
if show:
plt.show()
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Aux functioin"""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
[l.set_data(times, d) for l, d in zip(ax.lines, data_[good_ch_idx])]
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
[l.set_data(times, d) for l, d in zip(bad_lines,
data_[bad_ch_idx])]
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks([])
ax.set_xticks([])
if vars(ax)[this]['reject'] is True:
# memorizing reject
[l.set_color((0.8, 0.8, 0.8)) for l in ax.lines]
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
[l.set_color('k') for l in ax.lines[:len(good_ch_idx)]]
if bad_ch_idx is not None:
[l.set_color('r') for l in ax.lines[-len(bad_ch_idx):]]
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Aux function"""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop_epochs(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Aux function"""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
[l.set_color(reject_color) for l in ax.lines]
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
[l.set_color('k') for l in good_lines]
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
[l.set_color('r') for l in bad_lines]
here['reject'] = False
ax.get_figure().canvas.draw()
def plot_epochs(epochs, epoch_idx=None, picks=None, scalings=None,
title_str='#%003i', show=True, block=False):
""" Visualize single trials using Trellis plot.
Parameters
----------
epochs : instance of Epochs
The epochs object
epoch_idx : array-like | int | None
The epochs to visualize. If None, the first 20 epochs are shown.
Defaults to None.
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to:
`dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4, emg=1e-3,
ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)`
title_str : None | str
The string formatting to use for axes titles. If None, no titles
will be shown. Defaults expand to ``#001, #002, ...``
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on a
sub plot.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
scalings = _handle_default('scalings_plot_raw', scalings)
if np.isscalar(epoch_idx):
epoch_idx = [epoch_idx]
if epoch_idx is None:
n_events = len(epochs.events)
epoch_idx = list(range(n_events))
else:
n_events = len(epoch_idx)
epoch_idx = epoch_idx[:n_events]
idx_handler = deque(create_chunks(epoch_idx, 20))
if picks is None:
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude=[])
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
times = epochs.times * 1e3
n_channels = epochs.info['nchan']
types = [channel_type(epochs.info, idx) for idx in
picks]
# preallocation needed for min / max scaling
data = np.zeros((len(epochs.events), n_channels, len(times)))
for ii, epoch in enumerate(epochs.get_data()):
for jj, (this_type, this_channel) in enumerate(zip(types, epoch)):
data[ii, jj] = this_channel / scalings[this_type]
n_events = len(epochs.events)
epoch_idx = epoch_idx[:n_events]
idx_handler = deque(create_chunks(epoch_idx, 20))
# handle bads
bad_ch_idx = None
ch_names = epochs.ch_names
bads = epochs.info['bads']
if any(ch_names[k] in bads for k in picks):
ch_picked = [k for k in ch_names if ch_names.index(k) in picks]
bad_ch_idx = [ch_picked.index(k) for k in bads if k in ch_names]
good_ch_idx = [p for p in picks if p not in bad_ch_idx]
else:
good_ch_idx = np.arange(n_channels)
fig, axes = _prepare_trellis(len(data[idx_handler[0]]), max_col=5)
axes_handler = deque(list(range(len(idx_handler))))
for ii, data_, ax in zip(idx_handler[0], data[idx_handler[0]], axes):
ax.plot(times, data_[good_ch_idx].T, color='k')
if bad_ch_idx is not None:
ax.plot(times, data_[bad_ch_idx].T, color='r')
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks([])
ax.set_xticks([])
vars(ax)[axes_handler[0]] = {'idx': ii, 'reject': False}
# initialize memory
for this_view, this_inds in zip(axes_handler, idx_handler):
for ii, ax in zip(this_inds, axes):
vars(ax)[this_view] = {'idx': ii, 'reject': False}
tight_layout(fig=fig)
navigation = figure_nobar(figsize=(3, 1.5))
from matplotlib import gridspec
gs = gridspec.GridSpec(2, 2)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[0, 1])
ax3 = plt.subplot(gs[1, :])
params = {
'fig': fig,
'idx_handler': idx_handler,
'epochs': epochs,
'picks': picks,
'times': times,
'scalings': scalings,
'good_ch_idx': good_ch_idx,
'bad_ch_idx': bad_ch_idx,
'axes': axes,
'back': mpl.widgets.Button(ax1, 'back'),
'next': mpl.widgets.Button(ax2, 'next'),
'reject-quit': mpl.widgets.Button(ax3, 'reject-quit'),
'title_str': title_str,
'reject_idx': [],
'axes_handler': axes_handler,
'data': data,
'navigation': navigation,
}
fig.canvas.mpl_connect('button_press_event',
partial(_epochs_axes_onclick, params=params))
navigation.canvas.mpl_connect('button_press_event',
partial(_epochs_navigation_onclick,
params=params))
if show:
plt.show(block=block)
return fig
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, proj=False, n_fft=256,
picks=None, ax=None, color='black', area_mode='std',
area_alpha=0.33, n_overlap=0,
dB=True, n_jobs=1, show=True, verbose=None):
"""Plot the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
from .raw import _set_psd_plot_params
fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
epochs.info, proj, picks, ax, area_mode)
for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
ax_list)):
psds, freqs = compute_epochs_psd(epochs, picks=picks, fmin=fmin,
fmax=fmax, n_fft=n_fft,
n_overlap=n_overlap, proj=proj,
n_jobs=n_jobs)
# Convert PSDs to dB
if dB:
psds = 10 * np.log10(psds)
unit = 'dB'
else:
unit = 'power'
# mean across epochs and channels
psd_mean = np.mean(psds, axis=0).mean(axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(np.mean(psds, axis=0), axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
np.max(np.mean(psds, axis=0), axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Freq (Hz)')
if ii == len(picks_list) // 2:
ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
ax.set_title(title)
ax.set_xlim(freqs[0], freqs[-1])
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
if show:
plt.show()
return fig
|
Odingod/mne-python
|
mne/viz/epochs.py
|
Python
|
bsd-3-clause
| 19,613
|
[
"Gaussian"
] |
4fca814025f5a96aa84fb8a14a9c859ecab1ca34f3cc734550f159ebe8d6594a
|
# -*- coding: utf-8 -*-
import unicodedata
import json
import arrow
import scrapy
from scrapyproject.showingspiders.showing_spider import ShowingSpider
from scrapyproject.items import (ShowingLoader, init_show_booking_loader)
from scrapyproject.utils import TohoUtil
class TohoV2Spider(ShowingSpider):
"""
Toho site spider version 2.
Improve crawling speed as we grab data from json api instead of site page.
useful json api:
theater list:
https://hlo.tohotheater.jp/responsive/json/theater_list.json?_dc=1488106193
movies showing now:
https://hlo.tohotheater.jp/data_net/json/movie/TNPI3090.JSON
movies coming soon:
https://hlo.tohotheater.jp/data_net/json/movie/TNPI3080.JSON
time schedule table:
https://hlo.tohotheater.jp/net/schedule/TNPI3070J02.do?
__type__=json&movie_cd=014174&vg_cd=028&term=99&seq_disp_term=7
&site_cd=&enter_kbn=&_dc=1488106557
detail schedule table for movie:
https://hlo.tohotheater.jp/net/schedule/TNPI3070J01.do?
__type__=json&movie_cd=014174&vg_cd=028&show_day=20170226
&term=99&isMember=&site_cd=028&enter_kbn=&_dc=1488106558
cinema schedult table:
https://hlo.tohotheater.jp/net/schedule/TNPI3050J02.do?
__type__=html&__useResultInfo__=no&vg_cd=076&show_day=20170226
&term=99&isMember=&enter_kbn=&_dc=1488120297
Visit page example:
https://www.tohotheater.jp/theater/find.html
https://hlo.tohotheater.jp/net/movie/TNPI3090J01.do
https://hlo.tohotheater.jp/net/movie/TNPI3060J01.do?sakuhin_cd=014174
https://hlo.tohotheater.jp/net/ticket/034/TNPI2040J03.do
We will first crawl cinema list, then crawl each cinema's schedule data,
and generate booking page urls to crawl exact booking number
"""
name = "toho_v2"
allowed_domains = ["hlo.tohotheater.jp", "www.tohotheater.jp"]
start_urls = [
'https://hlo.tohotheater.jp/responsive/json/theater_list.json'
]
def parse(self, response):
"""
crawl theater list data first
"""
try:
theater_list = json.loads(response.text)
except json.JSONDecodeError:
return
if (not theater_list):
return
for curr_cinema in theater_list:
cinema_name_list = self.get_cinema_name_list(curr_cinema)
if not self.is_cinema_crawl(cinema_name_list):
continue
site_cd = curr_cinema['VIT_GROUP_CD']
show_day = self.date
curr_cinema_url = self.generate_cinema_schedule_url(
site_cd, show_day)
request = scrapy.Request(curr_cinema_url,
callback=self.parse_cinema)
yield request
def get_cinema_name_list(self, curr_cinema):
# replace full width text before compare
vit_group_nm = unicodedata.normalize('NFKC',
curr_cinema['VIT_GROUP_NM'])
theater_name = unicodedata.normalize('NFKC',
curr_cinema['THEATER_NAME'])
theater_name_english = unicodedata.normalize(
'NFKC', curr_cinema['THEATER_NAME_ENGLISH'])
site_name = unicodedata.normalize('NFKC', curr_cinema['SITE_NM'])
return [vit_group_nm, theater_name, theater_name_english, site_name]
def generate_cinema_schedule_url(self, site_cd, show_day):
"""
json data url for single cinema, all movies of curr cinema
"""
url = 'https://hlo.tohotheater.jp/net/schedule/TNPI3050J02.do?'\
'__type__=html&__useResultInfo__=no'\
'&vg_cd={site_cd}&show_day={show_day}&term=99'.format(
site_cd=site_cd, show_day=show_day)
return url
def parse_cinema(self, response):
# some cinemas may not open and will return empty response
try:
schedule_data = json.loads(response.text)
except json.JSONDecodeError:
return
if (not schedule_data):
return
result_list = []
for curr_cinema in schedule_data:
showing_url_parameter = {}
date_str = curr_cinema['showDay']['date']
showing_url_parameter['show_day'] = arrow.get(
date_str, 'YYYYMMDD').replace(tzinfo='UTC+9')
for sub_cinema in curr_cinema['list']:
self.parse_sub_cinema(
response, sub_cinema, showing_url_parameter, result_list)
for result in result_list:
if result:
yield result
def parse_sub_cinema(self, response, sub_cinema,
showing_url_parameter, result_list):
site_cd = sub_cinema['code']
showing_url_parameter['site_cd'] = site_cd
data_proto = ShowingLoader(response=response)
data_proto.add_cinema_name(sub_cinema['name'])
cinema_site = TohoUtil.generate_cinema_homepage_url(site_cd)
data_proto.add_cinema_site(cinema_site, sub_cinema['name'])
data_proto.add_value('source', self.name)
for curr_movie in sub_cinema['list']:
self.parse_movie(response, curr_movie, showing_url_parameter,
data_proto, result_list)
def parse_movie(self, response, curr_movie,
showing_url_parameter, data_proto, result_list):
"""
parse movie showing data
movie may have different versions
"""
movie_data_proto = ShowingLoader(response=response)
movie_data_proto.add_value(None, data_proto.load_item())
movie_data_proto.add_title(
title=curr_movie['name'], title_en=curr_movie['ename'])
title_list = movie_data_proto.get_title_list()
if not self.is_movie_crawl(title_list):
return
showing_url_parameter['movie_cd'] = curr_movie['code']
for curr_screen in curr_movie['list']:
self.parse_screen(response, curr_screen, showing_url_parameter,
movie_data_proto, result_list)
def parse_screen(self, response, curr_screen,
showing_url_parameter, data_proto, result_list):
showing_url_parameter['theater_cd'] = curr_screen['theaterCd']
showing_url_parameter['screen_cd'] = curr_screen['code']
screen_data_proto = ShowingLoader(response=response)
screen_data_proto.add_value(None, data_proto.load_item())
screen_data_proto.add_screen_name(curr_screen['ename'])
for curr_showing in curr_screen['list']:
# filter empty showing
if not curr_showing['unsoldSeatInfo']:
continue
self.parse_showing(response, curr_showing, showing_url_parameter,
screen_data_proto, result_list)
def parse_showing(self, response, curr_showing,
showing_url_parameter, data_proto, result_list):
def parse_time(time_str):
"""
ex. "24:40"
"""
time = time_str.split(":")
return (int(time[0]), int(time[1]))
showing_url_parameter['showing_cd'] = curr_showing['code']
showing_data_proto = ShowingLoader(response=response)
showing_data_proto.add_value(None, data_proto.load_item())
# time like 24:40 can not be directly parsed,
# so we need to shift time properly
start_hour, start_minute = parse_time(curr_showing['showingStart'])
showing_data_proto.add_value('start_time', self.get_time_from_text(
start_hour, start_minute))
end_hour, end_minute = parse_time(curr_showing['showingEnd'])
showing_data_proto.add_value('end_time', self.get_time_from_text(
end_hour, end_minute))
showing_data_proto.add_value('seat_type', 'NormalSeat')
# query screen number from database
showing_data_proto.add_total_seat_count()
# check whether need to continue crawl booking data or stop now
if not self.crawl_booking_data:
result_list.append(showing_data_proto.load_item())
return
booking_data_proto = init_show_booking_loader(response=response)
booking_data_proto.add_value('showing', showing_data_proto.load_item())
book_status = curr_showing['unsoldSeatInfo']['unsoldSeatStatus']
booking_data_proto.add_book_status(book_status, util=TohoUtil)
book_status = booking_data_proto.get_output_value('book_status')
if book_status in ['SoldOut', 'NotSold']:
# sold out or not sold
total_seat_count = showing_data_proto.get_output_value(
'total_seat_count')
book_seat_count = (
total_seat_count if book_status == 'SoldOut' else 0)
booking_data_proto.add_value('book_seat_count', book_seat_count)
booking_data_proto.add_time_data()
result_list.append(booking_data_proto.load_item())
return
else:
# normal, need to crawl book number on order page
url = self.generate_showing_url(**showing_url_parameter)
request = scrapy.Request(url,
callback=self.parse_normal_showing)
request.meta["data_proto"] = booking_data_proto.load_item()
result_list.append(request)
def generate_showing_url(self, site_cd, show_day, theater_cd, screen_cd,
movie_cd, showing_cd):
"""
generate showing url from given data
:param show_day: arrow object
"""
# example: javascript:ScheduleUtils.purchaseTicket(
# "20170212", "076", "013132", "0761", "11", "2")
# example: https://hlo.tohotheater.jp/net/ticket/076/TNPI2040J03.do
# ?site_cd=076&jyoei_date=20170209&gekijyo_cd=0761&screen_cd=10
# &sakuhin_cd=014183&pf_no=5&fnc=1&pageid=2000J01&enter_kbn=
day_str = show_day.format('YYYYMMDD')
return "https://hlo.tohotheater.jp/net/ticket/{site_cd}/"\
"TNPI2040J03.do?site_cd={site_cd}&jyoei_date={jyoei_date}"\
"&gekijyo_cd={gekijyo_cd}&screen_cd={screen_cd}"\
"&sakuhin_cd={sakuhin_cd}&pf_no={pf_no}&fnc={fnc}"\
"&pageid={pageid}&enter_kbn={enter_kbn}".format(
site_cd=site_cd, jyoei_date=day_str,
gekijyo_cd=theater_cd, screen_cd=screen_cd,
sakuhin_cd=movie_cd, pf_no=showing_cd,
fnc="1", pageid="2000J01", enter_kbn="")
def parse_normal_showing(self, response):
booked_seat_count = len(response.css('[alt~="購入済(選択不可)"]'))
result = init_show_booking_loader(
response=response, item=response.meta["data_proto"])
result.add_value('book_seat_count', booked_seat_count)
result.add_time_data()
yield result.load_item()
|
gas1121/JapanCinemaStatusSpider
|
scrapyproject/showingspiders/toho_v2.py
|
Python
|
mit
| 10,931
|
[
"VisIt"
] |
90cc771a385cf9f40a4c0d38121951bedcee4fdb0e0c276fa1b240e94b180eec
|
import tempfile
import unittest
import PIL.Image
import pillowfight
class TestGaussian(unittest.TestCase):
def test_gaussian(self):
with tempfile.NamedTemporaryFile(suffix='.jpg') as tmpfile:
in_img = PIL.Image.open("tests/data/crappy_background.jpg")
out_img = pillowfight.gaussian(in_img, sigma=20.0, nb_stddev=10)
in_img.close()
# beware of JPG compression
self.assertEqual(out_img.mode, "RGB")
out_img.save(tmpfile.name)
out_img.close()
out_img = PIL.Image.open(tmpfile.name)
expected_img = PIL.Image.open(
"tests/data/crappy_background_gaussian.jpg"
)
self.assertEqual(out_img.tobytes(), expected_img.tobytes())
expected_img.close()
|
jflesch/libpillowfight
|
tests/tests_gaussian.py
|
Python
|
gpl-2.0
| 796
|
[
"Gaussian"
] |
222f7d9beb56be1a84a1e9c9398e66bd21801bb1af004045a37ff5cf67b5e7b4
|
# mako/parsetree.py
# Copyright 2006-2020 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""defines the parse tree components for Mako templates."""
import re
from mako import ast
from mako import compat
from mako import exceptions
from mako import filters
from mako import util
class Node(object):
"""base class for a Node in the parse tree."""
def __init__(self, source, lineno, pos, filename):
self.source = source
self.lineno = lineno
self.pos = pos
self.filename = filename
@property
def exception_kwargs(self):
return {
"source": self.source,
"lineno": self.lineno,
"pos": self.pos,
"filename": self.filename,
}
def get_children(self):
return []
def accept_visitor(self, visitor):
def traverse(node):
for n in node.get_children():
n.accept_visitor(visitor)
method = getattr(visitor, "visit" + self.__class__.__name__, traverse)
method(self)
class TemplateNode(Node):
"""a 'container' node that stores the overall collection of nodes."""
def __init__(self, filename):
super(TemplateNode, self).__init__("", 0, 0, filename)
self.nodes = []
self.page_attributes = {}
def get_children(self):
return self.nodes
def __repr__(self):
return "TemplateNode(%s, %r)" % (
util.sorted_dict_repr(self.page_attributes),
self.nodes,
)
class ControlLine(Node):
"""defines a control line, a line-oriented python line or end tag.
e.g.::
% if foo:
(markup)
% endif
"""
has_loop_context = False
def __init__(self, keyword, isend, text, **kwargs):
super(ControlLine, self).__init__(**kwargs)
self.text = text
self.keyword = keyword
self.isend = isend
self.is_primary = keyword in ["for", "if", "while", "try", "with"]
self.nodes = []
if self.isend:
self._declared_identifiers = []
self._undeclared_identifiers = []
else:
code = ast.PythonFragment(text, **self.exception_kwargs)
self._declared_identifiers = code.declared_identifiers
self._undeclared_identifiers = code.undeclared_identifiers
def get_children(self):
return self.nodes
def declared_identifiers(self):
return self._declared_identifiers
def undeclared_identifiers(self):
return self._undeclared_identifiers
def is_ternary(self, keyword):
"""return true if the given keyword is a ternary keyword
for this ControlLine"""
return keyword in {
"if": set(["else", "elif"]),
"try": set(["except", "finally"]),
"for": set(["else"]),
}.get(self.keyword, [])
def __repr__(self):
return "ControlLine(%r, %r, %r, %r)" % (
self.keyword,
self.text,
self.isend,
(self.lineno, self.pos),
)
class Text(Node):
"""defines plain text in the template."""
def __init__(self, content, **kwargs):
super(Text, self).__init__(**kwargs)
self.content = content
def __repr__(self):
return "Text(%r, %r)" % (self.content, (self.lineno, self.pos))
class Code(Node):
"""defines a Python code block, either inline or module level.
e.g.::
inline:
<%
x = 12
%>
module level:
<%!
import logger
%>
"""
def __init__(self, text, ismodule, **kwargs):
super(Code, self).__init__(**kwargs)
self.text = text
self.ismodule = ismodule
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return self.code.declared_identifiers
def undeclared_identifiers(self):
return self.code.undeclared_identifiers
def __repr__(self):
return "Code(%r, %r, %r)" % (
self.text,
self.ismodule,
(self.lineno, self.pos),
)
class Comment(Node):
"""defines a comment line.
# this is a comment
"""
def __init__(self, text, **kwargs):
super(Comment, self).__init__(**kwargs)
self.text = text
def __repr__(self):
return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos))
class Expression(Node):
"""defines an inline expression.
${x+y}
"""
def __init__(self, text, escapes, **kwargs):
super(Expression, self).__init__(**kwargs)
self.text = text
self.escapes = escapes
self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs)
self.code = ast.PythonCode(text, **self.exception_kwargs)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
# TODO: make the "filter" shortcut list configurable at parse/gen time
return self.code.undeclared_identifiers.union(
self.escapes_code.undeclared_identifiers.difference(
set(filters.DEFAULT_ESCAPES.keys())
)
).difference(self.code.declared_identifiers)
def __repr__(self):
return "Expression(%r, %r, %r)" % (
self.text,
self.escapes_code.args,
(self.lineno, self.pos),
)
class _TagMeta(type):
"""metaclass to allow Tag to produce a subclass according to
its keyword"""
_classmap = {}
def __init__(cls, clsname, bases, dict_):
if getattr(cls, "__keyword__", None) is not None:
cls._classmap[cls.__keyword__] = cls
super(_TagMeta, cls).__init__(clsname, bases, dict_)
def __call__(cls, keyword, attributes, **kwargs):
if ":" in keyword:
ns, defname = keyword.split(":")
return type.__call__(
CallNamespaceTag, ns, defname, attributes, **kwargs
)
try:
cls = _TagMeta._classmap[keyword]
except KeyError:
raise exceptions.CompileException(
"No such tag: '%s'" % keyword,
source=kwargs["source"],
lineno=kwargs["lineno"],
pos=kwargs["pos"],
filename=kwargs["filename"],
)
return type.__call__(cls, keyword, attributes, **kwargs)
class Tag(compat.with_metaclass(_TagMeta, Node)):
"""abstract base class for tags.
e.g.::
<%sometag/>
<%someothertag>
stuff
</%someothertag>
"""
__keyword__ = None
def __init__(
self,
keyword,
attributes,
expressions,
nonexpressions,
required,
**kwargs
):
r"""construct a new Tag instance.
this constructor not called directly, and is only called
by subclasses.
:param keyword: the tag keyword
:param attributes: raw dictionary of attribute key/value pairs
:param expressions: a set of identifiers that are legal attributes,
which can also contain embedded expressions
:param nonexpressions: a set of identifiers that are legal
attributes, which cannot contain embedded expressions
:param \**kwargs:
other arguments passed to the Node superclass (lineno, pos)
"""
super(Tag, self).__init__(**kwargs)
self.keyword = keyword
self.attributes = attributes
self._parse_attributes(expressions, nonexpressions)
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
"Missing attribute(s): %s"
% ",".join([repr(m) for m in missing]),
**self.exception_kwargs
)
self.parent = None
self.nodes = []
def is_root(self):
return self.parent is None
def get_children(self):
return self.nodes
def _parse_attributes(self, expressions, nonexpressions):
undeclared_identifiers = set()
self.parsed_attributes = {}
for key in self.attributes:
if key in expressions:
expr = []
for x in re.compile(r"(\${.+?})", re.S).split(
self.attributes[key]
):
m = re.compile(r"^\${(.+?)}$", re.S).match(x)
if m:
code = ast.PythonCode(
m.group(1).rstrip(), **self.exception_kwargs
)
# we aren't discarding "declared_identifiers" here,
# which we do so that list comprehension-declared
# variables aren't counted. As yet can't find a
# condition that requires it here.
undeclared_identifiers = undeclared_identifiers.union(
code.undeclared_identifiers
)
expr.append("(%s)" % m.group(1))
else:
if x:
expr.append(repr(x))
self.parsed_attributes[key] = " + ".join(expr) or repr("")
elif key in nonexpressions:
if re.search(r"\${.+?}", self.attributes[key]):
raise exceptions.CompileException(
"Attibute '%s' in tag '%s' does not allow embedded "
"expressions" % (key, self.keyword),
**self.exception_kwargs
)
self.parsed_attributes[key] = repr(self.attributes[key])
else:
raise exceptions.CompileException(
"Invalid attribute for tag '%s': '%s'"
% (self.keyword, key),
**self.exception_kwargs
)
self.expression_undeclared_identifiers = undeclared_identifiers
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
return self.expression_undeclared_identifiers
def __repr__(self):
return "%s(%r, %s, %r, %r)" % (
self.__class__.__name__,
self.keyword,
util.sorted_dict_repr(self.attributes),
(self.lineno, self.pos),
self.nodes,
)
class IncludeTag(Tag):
__keyword__ = "include"
def __init__(self, keyword, attributes, **kwargs):
super(IncludeTag, self).__init__(
keyword,
attributes,
("file", "import", "args"),
(),
("file",),
**kwargs
)
self.page_args = ast.PythonCode(
"__DUMMY(%s)" % attributes.get("args", ""), **self.exception_kwargs
)
def declared_identifiers(self):
return []
def undeclared_identifiers(self):
identifiers = self.page_args.undeclared_identifiers.difference(
set(["__DUMMY"])
).difference(self.page_args.declared_identifiers)
return identifiers.union(
super(IncludeTag, self).undeclared_identifiers()
)
class NamespaceTag(Tag):
__keyword__ = "namespace"
def __init__(self, keyword, attributes, **kwargs):
super(NamespaceTag, self).__init__(
keyword,
attributes,
("file",),
("name", "inheritable", "import", "module"),
(),
**kwargs
)
self.name = attributes.get("name", "__anon_%s" % hex(abs(id(self))))
if "name" not in attributes and "import" not in attributes:
raise exceptions.CompileException(
"'name' and/or 'import' attributes are required "
"for <%namespace>",
**self.exception_kwargs
)
if "file" in attributes and "module" in attributes:
raise exceptions.CompileException(
"<%namespace> may only have one of 'file' or 'module'",
**self.exception_kwargs
)
def declared_identifiers(self):
return []
class TextTag(Tag):
__keyword__ = "text"
def __init__(self, keyword, attributes, **kwargs):
super(TextTag, self).__init__(
keyword, attributes, (), ("filter"), (), **kwargs
)
self.filter_args = ast.ArgumentList(
attributes.get("filter", ""), **self.exception_kwargs
)
def undeclared_identifiers(self):
return self.filter_args.undeclared_identifiers.difference(
filters.DEFAULT_ESCAPES.keys()
).union(self.expression_undeclared_identifiers)
class DefTag(Tag):
__keyword__ = "def"
def __init__(self, keyword, attributes, **kwargs):
expressions = ["buffered", "cached"] + [
c for c in attributes if c.startswith("cache_")
]
super(DefTag, self).__init__(
keyword,
attributes,
expressions,
("name", "filter", "decorator"),
("name",),
**kwargs
)
name = attributes["name"]
if re.match(r"^[\w_]+$", name):
raise exceptions.CompileException(
"Missing parenthesis in %def", **self.exception_kwargs
)
self.function_decl = ast.FunctionDecl(
"def " + name + ":pass", **self.exception_kwargs
)
self.name = self.function_decl.funcname
self.decorator = attributes.get("decorator", "")
self.filter_args = ast.ArgumentList(
attributes.get("filter", ""), **self.exception_kwargs
)
is_anonymous = False
is_block = False
@property
def funcname(self):
return self.function_decl.funcname
def get_argument_expressions(self, **kw):
return self.function_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.function_decl.allargnames
def undeclared_identifiers(self):
res = []
for c in self.function_decl.defaults:
res += list(
ast.PythonCode(
c, **self.exception_kwargs
).undeclared_identifiers
)
return (
set(res)
.union(
self.filter_args.undeclared_identifiers.difference(
filters.DEFAULT_ESCAPES.keys()
)
)
.union(self.expression_undeclared_identifiers)
.difference(self.function_decl.allargnames)
)
class BlockTag(Tag):
__keyword__ = "block"
def __init__(self, keyword, attributes, **kwargs):
expressions = ["buffered", "cached", "args"] + [
c for c in attributes if c.startswith("cache_")
]
super(BlockTag, self).__init__(
keyword,
attributes,
expressions,
("name", "filter", "decorator"),
(),
**kwargs
)
name = attributes.get("name")
if name and not re.match(r"^[\w_]+$", name):
raise exceptions.CompileException(
"%block may not specify an argument signature",
**self.exception_kwargs
)
if not name and attributes.get("args", None):
raise exceptions.CompileException(
"Only named %blocks may specify args", **self.exception_kwargs
)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
)
self.name = name
self.decorator = attributes.get("decorator", "")
self.filter_args = ast.ArgumentList(
attributes.get("filter", ""), **self.exception_kwargs
)
is_block = True
@property
def is_anonymous(self):
return self.name is None
@property
def funcname(self):
return self.name or "__M_anon_%d" % (self.lineno,)
def get_argument_expressions(self, **kw):
return self.body_decl.get_argument_expressions(**kw)
def declared_identifiers(self):
return self.body_decl.allargnames
def undeclared_identifiers(self):
return (
self.filter_args.undeclared_identifiers.difference(
filters.DEFAULT_ESCAPES.keys()
)
).union(self.expression_undeclared_identifiers)
class CallTag(Tag):
__keyword__ = "call"
def __init__(self, keyword, attributes, **kwargs):
super(CallTag, self).__init__(
keyword, attributes, ("args"), ("expr",), ("expr",), **kwargs
)
self.expression = attributes["expr"]
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.difference(
self.code.declared_identifiers
)
class CallNamespaceTag(Tag):
def __init__(self, namespace, defname, attributes, **kwargs):
super(CallNamespaceTag, self).__init__(
namespace + ":" + defname,
attributes,
tuple(attributes.keys()) + ("args",),
(),
(),
**kwargs
)
self.expression = "%s.%s(%s)" % (
namespace,
defname,
",".join(
[
"%s=%s" % (k, v)
for k, v in self.parsed_attributes.items()
if k != "args"
]
),
)
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
)
def declared_identifiers(self):
return self.code.declared_identifiers.union(self.body_decl.allargnames)
def undeclared_identifiers(self):
return self.code.undeclared_identifiers.difference(
self.code.declared_identifiers
)
class InheritTag(Tag):
__keyword__ = "inherit"
def __init__(self, keyword, attributes, **kwargs):
super(InheritTag, self).__init__(
keyword, attributes, ("file",), (), ("file",), **kwargs
)
class PageTag(Tag):
__keyword__ = "page"
def __init__(self, keyword, attributes, **kwargs):
expressions = [
"cached",
"args",
"expression_filter",
"enable_loop",
] + [c for c in attributes if c.startswith("cache_")]
super(PageTag, self).__init__(
keyword, attributes, expressions, (), (), **kwargs
)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
)
self.filter_args = ast.ArgumentList(
attributes.get("expression_filter", ""), **self.exception_kwargs
)
def declared_identifiers(self):
return self.body_decl.allargnames
|
chromium/chromium
|
third_party/mako/mako/parsetree.py
|
Python
|
bsd-3-clause
| 19,411
|
[
"VisIt"
] |
7a91a2e702ad640f0b729cddac45e5fe38cf18fbee3be223b834806282c0a786
|
"""
Basic image manipulation algorithms such as Gaussian smoothing and free rebinning.
:requires: NumPy
:requires: SciPy
:author: Sami-Matias Niemi
:contact: sniemi@unc.edu
:version: 0.1
"""
import numpy as np
import scipy.signal as s
import scipy
def frebin(image, nsout, nlout=1, total=False):
"""
Shrink or expand the size of an array an arbitary amount using interpolation.
Conserves flux by ensuring that each input pixel is equally represented
in the output array.
.. todo:: one could do the binning faster if old and new outputs are modulo 0
.. Note:: modelled after the IDL code frebin.pro, so this may not be the fastest solution
:param image: input image, 1-d or 2-d ndarray
:param nsout: number of samples in the output image, numeric scalar
:param nlout: number of lines (ydir) in the output image, numeric scalar
:param total: Use of the total conserves surface flux. If True, the output pixels
will be the sum of pixels within the appropriate box of the input image.
Otherwise they will be the average.
:return: binned array
:rtype: ndarray
"""
shape = image.shape
if nlout != 1:
nl = shape[0]
ns = shape[1]
else:
nl = nlout
ns = shape[0]
sbox = ns / float(nsout)
lbox = nl / float(nlout)
ns1 = ns - 1
nl1 = nl - 1
if nl == 1:
#1D case
result = np.zeros(nsout)
for i in range(nsout):
rstart = i * sbox
istart = int(rstart)
rstop = rstart + sbox
if int(rstop) < ns1:
istop = int(rstop)
else:
istop = ns1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
#add pixel valeus from istart to istop and subtract fraction pixel
#from istart to rstart and fraction pixel from rstop to istop
result[i] = np.sum(image[istart:istop + 1]) - frac1 * image[istart] - frac2 * image[istop]
if total:
return result
else:
return result / (float(sbox) * lbox)
else:
#2D case, first bin in second dimension
temp = np.zeros((nlout, ns))
result = np.zeros((nsout, nlout))
#first lines
for i in range(nlout):
rstart = i * lbox
istart = int(rstart)
rstop = rstart + lbox
if int(rstop) < nl1:
istop = int(rstop)
else:
istop = nl1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
if istart == istop:
temp[i, :] = (1.0 - frac1 - frac2) * image[istart, :]
else:
temp[i, :] = np.sum(image[istart:istop + 1, :], axis=0) -\
frac1 * image[istart, :] - frac2 * image[istop, :]
temp = np.transpose(temp)
#then samples
for i in range(nsout):
rstart = i * sbox
istart = int(rstart)
rstop = rstart + sbox
if int(rstop) < ns1:
istop = int(rstop)
else:
istop = ns1
frac1 = float(rstart) - istart
frac2 = 1.0 - (rstop - istop)
if istart == istop:
result[i, :] = (1. - frac1 - frac2) * temp[istart, :]
else:
result[i, :] = np.sum(temp[istart:istop + 1, :], axis=0) -\
frac1 * temp[istart, :] - frac2 * temp[istop, :]
if total:
return np.transpose(result)
else:
return np.transpose(result) / (sbox * lbox)
def congrid(a, newdims, method='linear', centre=False, minusone=False):
"""
Arbitrary resampling of source array to new dimension sizes.
Currently only supports maintaining the same number of dimensions.
To use 1-D arrays, first promote them to shape (x,1).
Uses the same parameters and creates the same co-ordinate lookup points
as IDL''s congrid routine, which apparently originally came from a VAX/VMS
routine of the same name.
method:
neighbour - closest value from original data
nearest and linear - uses n x 1-D interpolations using scipy.interpolate.interp1d
(see Numerical Recipes for validity of use of n 1-D interpolations)
spline - uses ndimage.map_coordinates
centre:
True - interpolation points are at the centres of the bins
False - points are at the front edge of the bin
minusone:
For example- inarray.shape = (i,j) & new dimensions = (x,y)
False - inarray is resampled by factors of (i/x) * (j/y)
True - inarray is resampled by(i-1)/(x-1) * (j-1)/(y-1)
This prevents extrapolation one element beyond bounds of input array.
"""
if not a.dtype in [np.float64, np.float32]:
a = np.cast[float](a)
m1 = np.cast[int](minusone)
ofs = np.cast[int](centre) * 0.5
old = np.array(a.shape)
ndims = len(a.shape)
if len(newdims) != ndims:
print "[congrid] dimensions error. "\
"This routine currently only support "\
"rebinning to the same number of dimensions."
return None
newdims = np.asarray(newdims, dtype=float)
dimlist = []
if method == 'neighbour':
for i in range(ndims):
base = np.indices(newdims)[i]
dimlist.append((old[i] - m1) / (newdims[i] - m1)\
* (base + ofs) - ofs)
cd = np.array(dimlist).round().astype(int)
newa = a[list(cd)]
return newa
elif method in ['nearest', 'linear']:
# calculate new dims
for i in range(ndims):
base = np.arange(newdims[i])
dimlist.append((old[i] - m1) / (newdims[i] - m1)\
* (base + ofs) - ofs)
# specify old dims
olddims = [np.arange(i, dtype=np.float) for i in list(a.shape)]
# first interpolation - for ndims = any
mint = scipy.interpolate.interp1d(olddims[-1], a, kind=method)
newa = mint(dimlist[-1])
trorder = [ndims - 1] + range(ndims - 1)
for i in range(ndims - 2, -1, -1):
newa = newa.transpose(trorder)
mint = scipy.interpolate.interp1d(olddims[i], newa, kind=method)
newa = mint(dimlist[i])
if ndims > 1:
# need one more transpose to return to original dimensions
newa = newa.transpose(trorder)
return newa
elif method in ['spline']:
oslices = [slice(0, j) for j in old]
oldcoords = np.ogrid[oslices]
nslices = [slice(0, j) for j in list(newdims)]
newcoords = np.mgrid[nslices]
newcoords_dims = range(np.rank(newcoords))
#make first index last
newcoords_dims.append(newcoords_dims.pop(0))
newcoords_tr = newcoords.transpose(newcoords_dims)
# makes a view that affects newcoords
newcoords_tr += ofs
deltas = (np.asarray(old) - m1) / (newdims - m1)
newcoords_tr *= deltas
newcoords_tr -= ofs
newa = scipy.ndimage.map_coordinates(a, newcoords)
return newa
else:
print "Congrid error: Unrecognized interpolation type.\n",\
"Currently only \'neighbour\', \'nearest\',\'linear\',",\
"and \'spline\' are supported."
return None
def rebinFactor(a, newshape):
"""
"""
assert len(a.shape) == len(newshape)
assert not sometrue(mod(a.shape, newshape))
slices = [slice(None, None, old / new) for old, new in zip(a.shape, newshape)]
return a[slices]
def rebin(a, *args):
"""
rebin ndarray data into a smaller ndarray of the same rank whose dimensions
are factors of the original dimensions. eg. An array with 6 columns and 4 rows
can be reduced to have 6,3,2 or 1 columns and 4,2 or 1 rows.
example usages:
>>> a=rand(6,4); b=rebin(a,3,2)
>>> a=rand(6); b=rebin(a,2)
"""
shape = a.shape
lenShape = len(shape)
factor = np.asarray(shape) / np.asarray(args)
evList = ['a.reshape('] +\
['args[%d],factor[%d],' % (i, i) for i in range(lenShape)] +\
[')'] + ['.sum(%d)' % (i + 1) for i in range(lenShape)] +\
['/factor[%d]' % i for i in range(lenShape)]
#print ''.join(evList)
return eval(''.join(evList))
def gaussianKernel(size, sizey=None):
"""
Returns a normalized 2D gauss kernel array for convolutions.
"""
size = int(size)
if not sizey:
sizey = size
else:
sizey = int(sizey)
x, y = np.mgrid[-size:size + 1, -sizey:sizey + 1]
g = np.exp(-(x ** 2 / float(size) + y ** 2 / float(sizey)))
return g / g.sum()
def blurImage(im, n, ny=None):
"""
blurs the image by convolving with a gaussian kernel of typical
size np. The optional keyword argument ny allows for a different
size in the y direction.
"""
g = gaussianKernel(n, sizey=ny)
improc = s.convolve(im, g, mode='valid')
return improc
|
sniemi/SamPy
|
image/manipulation.py
|
Python
|
bsd-2-clause
| 9,087
|
[
"Gaussian"
] |
52e1aea6cdcc76b41251ff9bdcaba28e9a8cb96eb6fb790190bc9b3b645b36fa
|
"""
This package implements the No-U-Turn Sampler (NUTS) algorithm 6 from the NUTS
paper (Hoffman & Gelman, 2011).
Content
-------
The package mainly contains:
nuts6 return samples using the NUTS
test_nuts6 example usage of this package
and subroutines of nuts6:
build_tree the main recursion in NUTS
find_reasonable_epsilon Heuristic for choosing an initial value of epsilon
leapfrog Perform a leapfrog jump in the Hamiltonian space
stop_criterion Compute the stop condition in the main loop
A few words about NUTS
----------------------
Hamiltonian Monte Carlo or Hybrid Monte Carlo (HMC) is a Markov chain Monte
Carlo (MCMC) algorithm that avoids the random walk behavior and sensitivity to
correlated parameters, biggest weakness of many MCMC methods. Instead, it takes
a series of steps informed by first-order gradient information.
This feature allows it to converge much more quickly to high-dimensional target
distributions compared to simpler methods such as Metropolis, Gibbs sampling
(and derivatives).
However, HMC's performance is highly sensitive to two user-specified
parameters: a step size, and a desired number of steps. In particular, if the
number of steps is too small then the algorithm will just exhibit random walk
behavior, whereas if it is too large it will waste computations.
Hoffman & Gelman introduced NUTS or the No-U-Turn Sampler, an extension to HMC
that eliminates the need to set a number of steps. NUTS uses a recursive
algorithm to find likely candidate points that automatically stops when it
starts to double back and retrace its steps. Empirically, NUTS perform at
least as effciently as and sometimes more effciently than a well tuned standard
HMC method, without requiring user intervention or costly tuning runs.
Moreover, Hoffman & Gelman derived a method for adapting the step size
parameter on the fly based on primal-dual averaging. NUTS can thus be used
with no hand-tuning at all.
In practice, the implementation still requires a number of steps, a burning
period and a stepsize. However, the stepsize will be optimized during the
burning period, and the final values of all the user-defined values will be
revised by the algorithm.
reference: arXiv:1111.4246
"The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte
Carlo", Matthew D. Hoffman & Andrew Gelman
"""
import numpy as np
from numpy import log, exp, sqrt
import sys, time, os, pickle
__all__ = ['nuts6']
class Trajectory(object):
"""Keep track of trajectories"""
def __init__(self, ndim, bufsize=1000):
"""Initialize the trajectory object"""
self.ndim = ndim
self.bufadd = bufsize
self.bufsize_plus = bufsize
self.bufsize_minus = bufsize
self.trajlen_plus = 0
self.trajlen_minus = 0
self.trajbuf_plus = np.zeros((self.bufsize_plus, self.ndim))
self.trajind_plus = np.zeros(self.bufsize_plus)
self.trajbuf_minus = np.zeros((self.bufsize_minus, self.ndim))
self.trajind_minus = np.zeros(self.bufsize_minus)
def increase_buf(self, which='plus'):
"""Increase the buffer on the positive or the negative side"""
addbuf = np.zeros((self.bufadd, self.ndim))
addind = np.zeros(self.bufadd)
if which == 'plus':
self.trajbuf_plus = np.append(self.trajbuf_plus, addbuf, axis=0)
self.trajind_plus = np.append(self.trajind_plus, addind)
self.bufsize_plus += self.bufadd
elif which == 'minus':
self.trajbuf_minus = np.append(self.trajbuf_minus, addbuf, axis=0)
self.trajind_minus = np.append(self.trajind_minus, addind)
self.bufsize_minus += self.bufadd
def reset(self):
"""Reset the trajectory object"""
self.trajlen_plus = 0
self.trajlen_minus = 0
def add_sample(self, theta, ind, which='plus'):
"""Add a sample on the positive or the negative branch"""
if which == 'plus':
if self.trajlen_plus >= self.bufsize_plus:
self.increase_buf(which='plus')
self.trajbuf_plus[self.trajlen_plus, :] = theta
self.trajind_plus[self.trajlen_plus] = ind
self.trajlen_plus += 1
elif which == 'minus':
if self.trajlen_minus >= self.bufsize_minus:
self.increase_buf(which='minus')
self.trajbuf_minus[self.trajlen_minus, :] = theta
self.trajind_minus[self.trajlen_minus] = ind
self.trajlen_minus += 1
def length(self):
"""Function that returns the current trajectory length"""
return self.trajlen_plus + self.trajlen_minus
def get_trajectory(self, which='both'):
if which == 'both':
return np.append(self.trajbuf_minus[:self.trajlen_minus:-1,:],
self.trajbuf_plus[:self.trajlen_plus,:], axis=0), \
np.append(self.trajind_minus[:self.trajlen_minus:-1],
self.trajind_plus[:self.trajlen_plus])
elif which == 'plus':
return self.trajbuf_plus[:self.trajlen_plus], \
self.trajind_plus[:self.trajlen_plus]
elif which == 'minus':
return self.trajbuf_minus[:self.trajlen_minus], \
self.trajind_minus[:self.trajlen_minus]
def get_used_trajectory(self, ind):
"""For index ind, get the trajectory that gets us there"""
tiplus = self.trajind_plus[:self.trajlen_plus]
timinus = self.trajind_minus[:self.trajlen_minus]
if ind in tiplus:
index = np.where(tiplus == ind)[0][0] + 1
return self.trajbuf_plus[:index,:]
elif ind in timinus:
index = np.where(timinus == ind)[0][0] + 1
return np.append(self.trajbuf_plus[:1,:],
self.trajbuf_minus[:index,:], axis=0)
else:
raise ValueError("Index not found")
def leapfrog(theta, r, grad, epsilon, f):
""" Perfom a leapfrog jump in the Hamiltonian space
INPUTS
------
theta: ndarray[float, ndim=1]
initial parameter position
r: ndarray[float, ndim=1]
initial momentum
grad: float
initial gradient value
epsilon: float
step size
f: callable
it should return the log probability and gradient evaluated at theta
logp, grad = f(theta)
OUTPUTS
-------
thetaprime: ndarray[float, ndim=1]
new parameter position
rprime: ndarray[float, ndim=1]
new momentum
gradprime: float
new gradient
logpprime: float
new lnp
"""
# make half step in r
rprime = r + 0.5 * epsilon * grad
# make new step in theta
thetaprime = theta + epsilon * rprime
#compute new gradient
logpprime, gradprime = f(thetaprime)
# make half step in r again
rprime = rprime + 0.5 * epsilon * gradprime
return thetaprime, rprime, gradprime, logpprime
def find_reasonable_epsilon(theta0, grad0, logp0, f):
""" Heuristic for choosing an initial value of epsilon """
epsilon = 1.
r0 = np.random.normal(0., 1., len(theta0))
# Figure out what direction we should be moving epsilon.
_, rprime, gradprime, logpprime = leapfrog(theta0, r0, grad0, epsilon, f)
# brutal! This trick make sure the step is not huge leading to infinite
# values of the likelihood. This could also help to make sure theta stays
# within the prior domain (if any)
k = 1.
while np.isinf(logpprime) or np.isinf(gradprime).any():
k *= 0.5
_, rprime, _, logpprime = leapfrog(theta0, r0, grad0, epsilon * k, f)
epsilon = 0.5 * k * epsilon
acceptprob = np.exp(logpprime - logp0 - 0.5 * (np.dot(rprime, rprime.T) - np.dot(r0, r0.T)))
a = 2. * float((acceptprob > 0.5)) - 1.
# Keep moving epsilon in that direction until acceptprob crosses 0.5.
while ( (acceptprob ** a) > (2. ** (-a))):
epsilon = epsilon * (2. ** a)
_, rprime, _, logpprime = leapfrog(theta0, r0, grad0, epsilon, f)
acceptprob = np.exp(logpprime - logp0 - 0.5 * ( np.dot(rprime, rprime.T) - np.dot(r0, r0.T)))
print "find_reasonable_epsilon=", epsilon
return epsilon
def stop_criterion(thetaminus, thetaplus, rminus, rplus, force_trajlen, index):
""" Compute the stop condition in the main loop
dot(dtheta, rminus) >= 0 & dot(dtheta, rplus >= 0)
INPUTS
------
thetaminus, thetaplus: ndarray[float, ndim=1]
under and above position
rminus, rplus: ndarray[float, ndim=1]
under and above momentum
OUTPUTS
-------
criterion: bool
return if the condition is valid
"""
dtheta = thetaplus - thetaminus
orig = (np.dot(dtheta, rminus.T) >= 0) & (np.dot(dtheta, rplus.T) >= 0)
if force_trajlen is not None:
cont = index < force_trajlen
else:
cont = orig
#print("SC = ", cont, orig, extra)
return cont
def build_tree(theta, r, grad, logu, v, j, epsilon, f, joint0, ind, traj, force_trajlen):
"""The main recursion."""
if (j == 0):
# Base case: Take a single leapfrog step in the direction v.
thetaprime, rprime, gradprime, logpprime = leapfrog(theta, r, grad, v * epsilon, f)
joint = logpprime - 0.5 * np.dot(rprime, rprime.T)
# Is the new point in the slice?
nprime = int(logu < joint)
# Is the simulation wildly inaccurate?
sprime = int((logu - 1000.) < joint)
# Set the return values---minus=plus for all things here, since the
# "tree" is of depth 0.
thetaminus = thetaprime[:]
thetaplus = thetaprime[:]
rminus = rprime[:]
rplus = rprime[:]
gradminus = gradprime[:]
gradplus = gradprime[:]
# Compute the acceptance probability.
alphaprime = min(1., np.exp(joint - joint0))
#alphaprime = min(1., np.exp(logpprime - 0.5 * np.dot(rprime, rprime.T) - joint0))
nalphaprime = 1
if v == 1:
ind_plus, ind_minus = ind+1, ind
traj.add_sample(thetaprime, ind_plus, which='plus')
ind_prime = ind_plus
else:
ind_plus, ind_minus = ind, ind+1
traj.add_sample(thetaprime, ind_minus, which='minus')
ind_prime = ind_minus
else:
# Recursion: Implicitly build the height j-1 left and right subtrees.
if (v == 1):
thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alphaprime, nalphaprime, ind_plus, ind_minus, ind_prime = build_tree(theta, r, grad, logu, v, j - 1, epsilon, f, joint0, ind, traj, force_trajlen)
else:
thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alphaprime, nalphaprime, ind_plus, ind_minus, ind_prime = build_tree(theta, r, grad, logu, v, j - 1, epsilon, f, joint0, ind, traj, force_trajlen)
# No need to keep going if the stopping criteria were met in the first subtree.
if (sprime == 1):
if (v == -1):
thetaminus, rminus, gradminus, _, _, _, thetaprime2, gradprime2, logpprime2, nprime2, sprime2, alphaprime2, nalphaprime2, ind_plus, ind_minus, ind_prime2 = build_tree(thetaminus, rminus, gradminus, logu, v, j - 1, epsilon, f, joint0, ind_minus, traj, force_trajlen)
else:
_, _, _, thetaplus, rplus, gradplus, thetaprime2, gradprime2, logpprime2, nprime2, sprime2, alphaprime2, nalphaprime2, ind_plus, ind_minus, ind_prime2 = build_tree(thetaplus, rplus, gradplus, logu, v, j - 1, epsilon, f, joint0, ind_plus, traj, force_trajlen)
# Choose which subtree to propagate a sample up from.
if (np.random.uniform() < (float(nprime2) / max(float(int(nprime) + int(nprime2)), 1.))):
thetaprime = thetaprime2[:]
gradprime = gradprime2[:]
logpprime = logpprime2
ind_prime = ind_prime2
# Update the number of valid points.
nprime = int(nprime) + int(nprime2)
# Update the stopping criterion.
sprime = int(sprime and sprime2 and stop_criterion(thetaminus, thetaplus, rminus, rplus, force_trajlen, max(ind_plus, ind_minus)))
# Update the acceptance probability statistics.
alphaprime = alphaprime + alphaprime2
nalphaprime = nalphaprime + nalphaprime2
return thetaminus, rminus, gradminus, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alphaprime, nalphaprime, ind_plus, ind_minus, ind_prime
def nuts6(f, M, Madapt, theta0, delta=0.6, verbose=True, outFile=None, pickleFile=None,
trajectoryDir=None, force_epsilon=None, force_trajlen=None, write_burnin=False):
"""
Implements the No-U-Turn Sampler (NUTS) algorithm 6 from from the NUTS
paper (Hoffman & Gelman, 2011).
Runs Madapt steps of burn-in, during which it adapts the step size
parameter epsilon, then starts generating samples to return.
Note the initial step size is tricky and not exactly the one from the
initial paper. In fact the initial step size could be given by the user in
order to avoid potential problems
INPUTS
------
epsilon: float
step size
see nuts8 if you want to avoid tuning this parameter
f: callable
it should return the log probability and gradient evaluated at theta
logp, grad = f(theta)
M: int
number of samples to generate.
Madapt: int
the number of steps of burn-in/how long to run the dual averaging
algorithm to fit the step size epsilon.
theta0: ndarray[float, ndim=1]
initial guess of the parameters.
KEYWORDS
--------
delta: float
targeted acceptance fraction
OUTPUTS
-------
samples: ndarray[float, ndim=2]
M x D matrix of samples generated by NUTS.
note: samples[0, :] = theta0
"""
if len(np.shape(theta0)) > 1:
raise ValueError('theta0 is expected to be a 1-D array')
if pickleFile and os.path.isfile(pickleFile + '.pickle'):
# reloading pickle
pickledict = pickle.load(open(pickleFile + '.pickle','rb'))
(logp, grad, M, Madapt, theta0, delta, burnin, D,
force_epsilon, epsilon, gamma, t0,
kappa, mu, epsilonbar, Hbar, traj, mstart) = [pickledict[d] for d in ["logp", "grad", "M", "Madapt", "theta0", "delta", "burnin", "D",
"force_epsilon", "epsilon", "gamma", "t0",
"kappa", "mu", "epsilonbar", "Hbar", "traj",
"mstart"]]
# reloading numpy arrays
samples = np.load(pickleFile + '-samples.npy')
lnprob = np.load(pickleFile + '-lnprob.npy')
# chop the output file to the pickled length
written = mstart if write_burnin else mstart - Madapt
if mstart > written:
outfile = open(outFile,'r')
choppedfile = open(outFile + '-tmp','w')
for i in range(written):
choppedfile.write(outfile.readline())
choppedfile.close()
outfile.close()
os.rename(outFile + '-tmp',outFile)
else:
burnin = True
D = len(theta0)
samples = np.empty((M + Madapt, D), dtype=float)
lnprob = np.empty(M + Madapt, dtype=float)
logp, grad = f(theta0)
samples[0, :] = theta0
lnprob[0] = logp
# Choose a reasonable first epsilon by a simple heuristic.
if force_epsilon is None:
epsilon = find_reasonable_epsilon(theta0, grad, logp, f)
else:
epsilon = force_epsilon
# Parameters to the dual averaging algorithm.
gamma = 0.05
t0 = 10
kappa = 0.75
mu = log(10. * epsilon)
# Initialize dual averaging algorithm.
epsilonbar = 1
Hbar = 0
if outFile is not None:
chainfile = open(outFile, 'w')
chainfile.close()
if trajectoryDir is not None:
if os.path.isfile(trajectoryDir):
raise IOError("Not a directory: {0}".format(trajectoryDir))
elif not os.path.isdir(trajectoryDir):
os.mkdir(trajectoryDir)
# Initialize trajectory memory
traj = Trajectory(D, bufsize=1000)
mstart = 0
# Starting time
tstart = time.time()
ptime = tstart
for m in range(mstart + 1, M + Madapt):
# Resample momenta (Use mass matrix here for more efficiency).
r0 = np.random.normal(0, 1, D)
#joint lnp of theta and momentum r (Again, use mass matrix here).
joint = logp - 0.5 * np.dot(r0, r0.T)
# Resample u ~ uniform([0, exp(joint)]).
# Equivalent to (log(u) - joint) ~ exponential(1).
logu = float(joint - np.random.exponential(1, size=1))
# if all fails, the next sample will be the previous one
samples[m, :] = samples[m - 1, :]
lnprob[m] = lnprob[m - 1]
# initialize the tree
thetaminus = samples[m - 1, :]
thetaplus = samples[m - 1, :]
rminus = r0[:]
rplus = r0[:]
gradminus = grad[:]
gradplus = grad[:]
j = 0 # initial heigth j = 0
n = 1 # Initially the only valid point is the initial point.
s = 1 # Main loop: will keep going until s == 0.
# Reset the trajectory buffer
traj.reset()
traj.add_sample(thetaminus, traj.length())
trajind, trajind_minus, trajind_plus, trajind_prime = 0, 0, 0, 0
while (s == 1):
# Choose a direction. -1 = backwards, 1 = forwards.
v = int(2 * (np.random.uniform() < 0.5) - 1)
# Double the size of the tree.
if (v == -1):
thetaminus, rminus, gradminus, _, _, _, thetaprime, gradprime, logpprime, nprime, sprime, alpha, nalpha, trajind_plus, trajind_minus, trajind_prime = build_tree(thetaminus, rminus, gradminus, logu, v, j, epsilon, f, joint, trajind_minus, traj, force_trajlen)
else:
_, _, _, thetaplus, rplus, gradplus, thetaprime, gradprime, logpprime, nprime, sprime, alpha, nalpha, trajind_plus, trajind_minus, trajind_prime = build_tree(thetaplus, rplus, gradplus, logu, v, j, epsilon, f, joint, trajind_plus, traj, force_trajlen)
# Use Metropolis-Hastings to decide whether or not to move to a
# point from the half-tree we just generated.
_tmp = min(1, float(nprime) / float(n))
if (sprime == 1) and (np.random.uniform() < _tmp):
samples[m, :] = thetaprime[:]
lnprob[m] = logpprime
logp = logpprime
grad = gradprime[:]
trajind = trajind_prime
# Update number of valid points we've seen.
n += nprime
# Decide if it's time to stop.
s = sprime and stop_criterion(thetaminus, thetaplus, rminus, rplus, force_trajlen, max(trajind_plus, trajind_minus))
# Increment depth.
j += 1
# Do adaptation of epsilon if we're still doing burn-in.
if force_epsilon is None:
eta = 1. / float(m + t0)
Hbar = (1. - eta) * Hbar + eta * (delta - alpha / float(nalpha))
if (m <= Madapt):
epsilon = exp(mu - sqrt(m) / gamma * Hbar)
eta = m ** -kappa
epsilonbar = exp((1. - eta) * log(epsilonbar) + eta * log(epsilon))
else:
epsilon = epsilonbar
if verbose and time.time()-ptime > 0.5:
ptime = time.time()
if (m <= Madapt):
sys.stdout.write('\r')
sys.stdout.write('Burnin finished %2.2f percent in %f s epsilon = %e'
% (float(m) / Madapt * 100.0, ptime-tstart,
epsilon))
else:
if burnin is True:
burnin = False
sys.stdout.write('\n')
sys.stdout.write('\r')
sys.stdout.write('Finished %2.2f percent in %f s epsilon = %e'
% (float(m-Madapt) / M * 100.0, ptime-tstart,
epsilon))
sys.stdout.flush()
if outFile is not None and (m > Madapt or write_burnin):
# Write the new sample to file
chainfile = open(outFile, 'a+')
chainfile.write('\t'.join(['%22.22f' % (samples[m, kk])
for kk in range(D)]))
chainfile.write('\t%f\t %f' % (lnprob[m], epsilon))
chainfile.write('\n')
chainfile.close()
if trajectoryDir is not None:
# Write the whole trajectory to file
# for m in range(1, M + Madapt):
if m <= Madapt and write_burnin:
trajfile_plus = os.path.join(trajectoryDir,
'burnin-plus-{num:06d}.txt'.format(num=m))
trajfile_minus = os.path.join(trajectoryDir,
'burnin-minus-{num:06d}.txt'.format(num=m))
trajfile_used = os.path.join(trajectoryDir,
'burnin-used-{num:06d}.txt'.format(num=m))
np.savetxt(trajfile_plus, traj.get_trajectory(which='plus')[0])
np.savetxt(trajfile_minus, traj.get_trajectory(which='minus')[0])
np.savetxt(trajfile_used, traj.get_used_trajectory(trajind))
elif m > Madapt:
trajfile_plus = os.path.join(trajectoryDir,
'plus-{num:06d}.txt'.format(num=m-Madapt))
trajfile_minus = os.path.join(trajectoryDir,
'minus-{num:06d}.txt'.format(num=m-Madapt))
trajfile_used = os.path.join(trajectoryDir,
'used-{num:06d}.txt'.format(num=m-Madapt))
np.savetxt(trajfile_plus, traj.get_trajectory(which='plus')[0])
np.savetxt(trajfile_minus, traj.get_trajectory(which='minus')[0])
np.savetxt(trajfile_used, traj.get_used_trajectory(trajind))
if m % 100 == 0:
pickledict = {"logp": logp, "grad": grad,
"M": M, "Madapt": Madapt, "theta0": theta0, "delta": delta,
"burnin": burnin, "D": D, "force_epsilon": force_epsilon,
"epsilon": epsilon, "gamma": gamma, "t0": t0,
"kappa": kappa, "mu": mu, "epsilonbar": epsilonbar,
"Hbar": Hbar, "traj": traj, "mstart": m}
pickle.dump(pickledict,open(pickleFile + '-tmp.pickle','wb'))
np.save(pickleFile + '-samples-tmp',samples)
np.save(pickleFile + '-lnprob-tmp',lnprob)
os.rename(pickleFile + '-tmp.pickle', pickleFile + '.pickle')
os.rename(pickleFile + '-samples-tmp.npy',pickleFile + '-samples.npy')
os.rename(pickleFile + '-lnprob-tmp.npy', pickleFile + '-lnprob.npy')
samples = samples[Madapt:, :]
lnprob = lnprob[Madapt:]
return samples, lnprob, epsilon
def test_nuts6():
""" Example usage of nuts6: sampling a 2d highly correlated Gaussian distribution """
def correlated_normal(theta):
"""
Example of a target distribution that could be sampled from using NUTS.
(Although of course you could sample from it more efficiently)
Doesn't include the normalizing constant.
"""
# Precision matrix with covariance [1, 1.98; 1.98, 4].
# A = np.linalg.inv( cov )
A = np.asarray([[50.251256, -24.874372],
[-24.874372, 12.562814]])
grad = -np.dot(theta, A)
logp = 0.5 * np.dot(grad, theta.T)
return logp, grad
D = 2
M = 5000
Madapt = 5000
theta0 = np.random.normal(0, 1, D)
delta = 0.2
mean = np.zeros(2)
cov = np.asarray([[1, 1.98],
[1.98, 4]])
print('Running HMC with dual averaging and trajectory length %0.2f...' % delta)
samples, lnprob, epsilon = nuts6(correlated_normal, M, Madapt, theta0, delta)
print('Done. Final epsilon = %f.' % epsilon)
samples = samples[1::10, :]
print('Percentiles')
print (np.percentile(samples, [16, 50, 84], axis=0))
print('Mean')
print (np.mean(samples, axis=0))
print('Stddev')
print (np.std(samples, axis=0))
import pylab as plt
temp = np.random.multivariate_normal(mean, cov, size=500)
plt.plot(temp[:, 0], temp[:, 1], '.')
plt.plot(samples[:, 0], samples[:, 1], 'r+')
plt.show()
|
vhaasteren/piccard
|
outliers/nutstrajectory.py
|
Python
|
gpl-3.0
| 25,000
|
[
"Gaussian"
] |
24443bff754687fc7c96c2f1310df9efa3435dd52b9ed2c0ac78f2688b17fc6f
|
"""Loading icons."""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _
import gtk
from logging import warn
import math
def load_icon(icon_path, icon_width=None, icon_height=None):
"""Load icon from path. Icon MUST be in PNG format.
@param icon_path: pathname of icon, or None to load nothing
@return: a GdkPixbuf, or None on failure"""
if not icon_path:
return None
def size_prepared_cb(loader, width, height):
dest_width = icon_width or width
dest_height = icon_height or height
if dest_width == width and dest_height == height:
return
ratio_width = float(dest_width) / width
ratio_height = float(dest_height) / height
ratio = min(ratio_width, ratio_height)
# preserve original ration
if ratio_width != ratio:
dest_width = int(math.ceil(width * ratio))
elif ratio_height != ratio:
dest_height = int(math.ceil(height * ratio))
loader.set_size(int(dest_width), int(dest_height))
# Restrict icon formats to avoid attacks
try:
loader = gtk.gdk.PixbufLoader('png')
if icon_width or icon_height:
loader.connect('size-prepared', size_prepared_cb)
try:
loader.write(file(icon_path).read())
finally:
loader.close()
return loader.get_pixbuf()
except Exception, ex:
warn(_("Failed to load cached PNG icon: %s") % ex)
return None
|
pombredanne/zero-install
|
zeroinstall/gtkui/icon.py
|
Python
|
lgpl-2.1
| 1,367
|
[
"VisIt"
] |
ae77bf62cc6bc1a89398435e68bb5ebb904e09edaa097fc3c2081b1834ce1398
|
"""Example to solve 3D aqueous foam pipe flow using rheological
Herschel-Bulkley power law for bulk and wall shear stress dependent
slip velocity law for wall layer
"""
from simphony.core.cuba import CUBA
from simphony.api import CUDS, Simulation
from simphony.cuds.meta import api
from simphony.engine import EngineInterface
from foam_controlwrapper import create_block_mesh
from mayavi.scripts import mayavi2
import pipe_mesh
import tempfile
import time
start = time.time()
case_name = 'aqueous_foam'
mesh_name = 'aqueous_foam_mesh'
cuds = CUDS(name=case_name)
# physics model
cfd = api.Cfd(name='default model')
# these are already bt default set in CFD
cfd.thermal_model = api.IsothermalModel(name='isothermal')
cfd.turbulence_model = api.LaminarFlowModel(name='laminar')
cfd.compressibility_model = api.IncompressibleFluidModel(name='incompressible')
# material
foam = api.Material(name='foam')
foam.data[CUBA.DENSITY] = 250.0
foam.data[CUBA.DYNAMIC_VISCOSITY] = 4.37
cuds.add([foam])
# use Herschel Bulkley viscosity model for aqueous foam
hb = api.HerschelBulkleyModel(name='foam_rheology')
hb.initial_viscosity = 0.01748 * foam.data[CUBA.DENSITY]
hb.relaxation_time = 0.0148 * foam.data[CUBA.DENSITY]
hb.linear_constant = 0.00268 * foam.data[CUBA.DENSITY]
hb.power_law_index = 0.5
hb.material = cuds.get_by_name('foam').uid
cfd.rheology_model = hb
cuds.add([cfd])
# time setting
sim_time = api.IntegrationTime(name='simulation_time',
current=0.0,
final=1000,
size=1)
cuds.add([sim_time])
sol_par = api.SolverParameter(name='steady_state')
sol_par.data[CUBA.STEADY_STATE] = True
cuds.add([sol_par])
end = time.time()
print "Time spend in initialization: ", end-start
start = time.time()
# create computational mesh
mesh = create_block_mesh(tempfile.mkdtemp(), mesh_name,
pipe_mesh.blockMeshDict)
end = time.time()
print "Time spend in blockmesh: ", end-start
start = time.time()
cuds.add([mesh])
end = time.time()
print "Time spend in add mesh to cuds: ", end-start
start = time.time()
# boundary conditions
vel_inlet = api.Dirichlet(foam, name='vel_inlet')
vel_inlet.data[CUBA.VARIABLE] = CUBA.VELOCITY
vel_inlet.data[CUBA.VELOCITY] = (0, 0, 0.53)
pres_inlet = api.Neumann(foam, name='pres_inlet')
pres_inlet.data[CUBA.VARIABLE] = CUBA.PRESSURE
vel_outlet = api.Neumann(foam, name='vel_outlet')
vel_outlet.data[CUBA.VARIABLE] = CUBA.VELOCITY
pres_outlet = api.Dirichlet(foam, name='pres_outlet')
pres_outlet.data[CUBA.VARIABLE] = CUBA.PRESSURE
pres_outlet.data[CUBA.PRESSURE] = 0.0
vel_walls = api.ShearStressPowerLawSlipVelocity(foam,
density=250.0,
linear_constant=3.1e-3,
power_law_index=1.16,
name='vel_walls')
vel_walls.data[CUBA.VARIABLE] = CUBA.VELOCITY
pres_walls = api.Neumann(foam, name='pres_walls')
pres_walls.data[CUBA.VARIABLE] = CUBA.PRESSURE
inlet = api.Boundary(name='inlet', condition=[vel_inlet, pres_inlet])
walls = api.Boundary(name='walls', condition=[vel_walls, pres_walls])
outlet = api.Boundary(name='outlet', condition=[vel_outlet, pres_outlet])
cuds.add([inlet, walls, outlet])
end = time.time()
print "Time spend in boundary settings: ", end-start
start = time.time()
sim = Simulation(cuds, 'OpenFOAM', engine_interface=EngineInterface.Internal)
end = time.time()
print "Time spend in Simulation initialization: ", end-start
start = time.time()
sim.run()
end = time.time()
print "Time spend in run: ", end-start
start = time.time()
mesh_in_engine = cuds.get_by_name(mesh_name)
print "Working directory ", mesh_in_engine.path
average_pressure = 0.0
for cell in mesh_in_engine.get_boundary_cells(inlet.name):
average_pressure += cell.data[CUBA.PRESSURE]
average_pressure /= len(mesh_in_engine._boundaries[inlet.name])
end = time.time()
print "Time spend in post processing: ", end-start
print "Average pressure on inlet: ", average_pressure
@mayavi2.standalone
def view():
from mayavi.modules.surface import Surface
from simphony_mayavi.sources.api import CUDSSource
mayavi.new_scene() # noqa
src = CUDSSource(cuds=mesh_in_engine)
mayavi.add_source(src) # noqa
s = Surface()
mayavi.add_module(s) # noqa
if __name__ == '__main__':
view()
|
simphony/simphony-openfoam
|
foam_internalwrapper/examples/aqueous_foam_pipe_flow_steady_state.py
|
Python
|
gpl-2.0
| 4,455
|
[
"Mayavi"
] |
f4f5c42afa6e36ff922ead784f7f1a5f9675329b995bdc0db21851b8af7a9dc3
|
import simtk.openmm.openmm as openmm
import wcadimer
system, _ = wcadimer.WCADimer()
temperature = wcadimer.temperature
collision_rate = wcadimer.collision_rate
timestep = 2.0 * wcadimer.stable_timestep
integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
# Serialize openmm objects
with open('system.xml', 'w') as f:
f.write(openmm.XmlSerializer.serialize(system))
with open('integrator.xml', 'w') as f:
f.write(openmm.XmlSerializer.serialize(integrator))
|
nrego/westpa
|
lib/examples/wca-dimer_openmm/we_exec/build_system.py
|
Python
|
gpl-3.0
| 495
|
[
"OpenMM"
] |
bf98ba8b60f06f3cc09368ff8f2386bb2319acb8c7c89ce07af41fd86e2f5f8b
|
import string
import sys
from xml.sax import saxexts
from xml.sax import saxlib
from UserStack import UserStack
class XElement:
def __init__(self, name=None, attrs=None):
self.name = name
self.attrs = attrs
self.children = []
self.text = ''
def initialize(self):
pass
def finalize(self, parent):
pass
def linkTo(self, element):
if isinstance(element,XElement):
self.children.append(element)
def cdata(self, text):
self.text = self.text + text
def printBFS(self, depth=0):
print " " * depth, str(self)
for node in self.children:
node.printBFS(depth+1)
def visit(self, depth):
print " " * depth, str(self)
def walkBFS(self, depth=0):
self.doWalkBFS(depth)
def doWalkBFS(self, depth=0):
self.visit(depth)
for node in self.children:
node.doWalkBFS(depth+1)
def getName(self):
return self.name
def getText(self):
return self.text
def getChildren(self, klass=None):
if not klass:
return self.children[:]
children = []
if type(klass) == type(''):
for node in self.children:
if node.__class__.__name__ == klass:
children.append(node)
else:
for node in self.children:
if isinstance(node, klass):
children.append(node)
return children
def __str__(self):
if not self.name:
attrRep = ''
repr = '<>'
else:
attrRep = ''
for n in range(0,len(self.attrs)):
attrRep = attrRep + " " + self.attrs.getName(n) + "=..."
repr = '<' + self.name + attrRep + '>'
if len(self.children) > 0:
repr = repr + ': ' + `len(self.children)` + ' children ' + '<'
sep = ''
for node in self.children:
repr = repr + sep + node.__class__.__name__
sep = ', '
repr = repr + '>'
repr = repr + ' text <' + self.text + '>'
return self.__class__.__name__ + ': ' + repr
class XTreeHandler(saxlib.DocumentHandler):
def __init__(self, **options):
self.elems=0
self.attrs=0
self.pis=0
self.contextStack = UserStack([])
self.contextStack.push("x")
self.document = XDocumentRoot()
self.contextStack.push(self.document)
self.elementMap = {}
self.ignoreWhiteSpace = options.has_key('IgnoreWhiteSpace') \
and options['IgnoreWhiteSpace'] in ['true','yes',1,'1']
self.removeWhiteSpace = options.has_key('RemoveWhiteSpace') \
and options['RemoveWhiteSpace'] in ['true','yes',1,'1']
self.createElementMap = options.has_key('CreateElementMap') \
and options['CreateElementMap'] in ['true','yes',1,'1']
def getElementMap(self):
return self.elementMap
def startElement(self, name, attrs):
stmt = 'element = ' + name + '(name,attrs)'
try:
exec stmt
except:
element = XElement(name,attrs)
if self.createElementMap:
if not self.elementMap.has_key(name):
self.elementMap[name] = []
self.elementMap[name].append(element)
element.initialize()
self.contextStack.top().linkTo(element)
self.contextStack.push(element)
self.elems=self.elems+1
self.attrs=self.attrs+len(attrs)
def endElement(self, name):
popElement = self.contextStack.pop()
popElement.finalize(self.contextStack.top())
def characters(self, ch, start, length):
tos = self.contextStack.top()
if self.removeWhiteSpace:
text = ch[start:start+length]
splitText = string.split(text)
if len(tos.text) > 0:
pad = ' '
else:
pad = ''
for item in splitText:
tos.cdata(pad + item)
pad = ' '
else:
tos.cdata(ch[start:start+length])
def ignorableWhitespace(self, ch, start, length):
print "ignorable ws encountered"
if not self.ignoreWhiteSpace:
self.contextStack.top().cdata(ch[start:start+length])
def getDocument(self):
return self.document
def processingInstruction(self,target,data):
self.pis=self.pis+1
class XDocumentRoot(XElement):
def __init__(self):
XElement.__init__(self)
def endElement(self):
print "Document contains %d elements." % (len(self.children))
class Abstract(XElement):
def __init__(self,name,attrs):
XElement.__init__(self,name,attrs)
class Outline(XElement):
def __init__(self,name,attrs):
self.allText = ''
XElement.__init__(self,name,attrs)
def getAllText(self):
return self.allText
def addText(self, text):
if len(self.allText):
self.allText = self.allText + ' ' + text
else:
self.allText = text
class Item(XElement):
def __init__(self,name,attrs):
self.allText = ''
XElement.__init__(self,name,attrs)
def addText(self, text):
if len(self.allText):
self.allText = self.allText + ' ' + text
else:
self.allText = text
def finalize(self, parent):
parent.addText(self.text)
class Content(XElement):
def __init__(self,name,attrs):
print "creating Content XElement instance"
XElement.__init__(self,name,attrs)
class Requires(XElement):
def __init__(self,name,attrs):
XElement.__init__(self,name,attrs)
class Uses(XElement):
def __init__(self,name,attrs):
XElement.__init__(self,name,attrs)
class Defines(XElement):
def __init__(self,name,attrs):
XElement.__init__(self,name,attrs)
class Keywords(XElement):
def __init__(self,name,attrs):
XElement.__init__(self,name,attrs)
class Topic(XElement):
def __init__(self,name,attrs):
XElement.__init__(self,name,attrs)
def finalize(self, parent):
self.id = self.attrs.get('id','none specified')
abstract = self.getChildren(Abstract)
requires = self.getChildren(Requires)
outline = self.getChildren(Outline)
uses = self.getChildren(Uses)
keywords = self.getChildren(Keywords)
if len(abstract):
self.abstract = abstract[0].getText()
else: self.abstract = ''
if len(requires):
self.requires = string.split(requires[0].getText())
else: self.requires = []
if len(outline):
self.words = string.split(outline[0].getAllText())
else: self.words = ''
if len(uses):
self.uses = string.split(uses[0].getText())
else: self.uses = []
if len(keywords):
self.keywords = string.split(keywords[0].getText())
else: self.keywords = []
def __str__(self):
return 'Topic %s\nAbstract\n%s\nkeywords\n%s\nreq\n%s\nuses\n%s\nwords used\n%s\n' % (self.id, self.abstract, `self.keywords`, `self.requires`, `self.uses`, `self.words`)
def analyzeTopics(topics):
kwMap = {}
wordMap = {}
usedMap = {}
undefMap = {}
for topic in topics:
print "* Analyzing Topic %s" % topic.id
for word in topic.requires:
print "\t- %s:" % word
if kwMap.has_key(word):
print "\t\t+ defined in <keyword> section for these topics:"
for whereSeen in kwMap[word]:
if whereSeen != topic: print "\t\t\t%s" % whereSeen.id
else:
if wordMap.has_key(word):
print "\t\t= not defined but found in <outline> in earlier topic(s):"
for whereSeen in wordMap[word]:
if whereSeen != topic: print "\t\t\t%s" % whereSeen.id
elif usedMap.has_key(word):
print "\t\t- not defined but found used (section <used>) in earlier topic(s):"
for whereSeen in usedMap[word]:
if whereSeen != topic: print "\t\t\t%s" % whereSeen.id
elif undefMap.has_key(word):
print "\t\t- already reported undefined for earlier topic(s):"
for whereSeen in undefMap[word]:
if whereSeen != topic: print "\t\t\t%s" % whereSeen.id
else:
print "\t\t- not found in any preceding topic (in any section)"
if not undefMap.has_key(word):
entry = undefMap[word] = []
else:
entry = undefMap[word]
entry.append(topic)
print
for word in topic.keywords:
if not kwMap.has_key(word):
entry = kwMap[word] = []
else:
entry = kwMap[word]
entry.append(topic)
for word in topic.words:
if not wordMap.has_key(word):
entry = wordMap[word] = []
else:
entry = wordMap[word]
entry.append(topic)
for word in topic.uses:
if not entry:
entry = usedMap[word] = []
else:
entry = usedMap[word]
entry.append(topic)
print "* All Defined Topics"
for kw in kwMap.keys():
print "\t",kw
print "* All Undefined Topics"
for kw in undefMap.keys():
print "\t",kw
print "* All Used Topics"
for kw in usedMap.keys():
print "\t",kw
print "* Checking for undefined Topics that got defined later:"
print " each line output is (missing word, where identified, where defined)"
for word in kwMap.keys():
if undefMap.has_key(word):
for topic_i in undefMap[word]:
for topic_j in kwMap[word]:
print "\t(%s, %s, %s)" % (word, topic_i.id, topic_j.id)
del(undefMap[word])
print "* New List of Undefined Topics"
for kw in undefMap.keys():
print "\t",kw
def go():
if len(sys.argv) < 2:
print "Usage: python saxtree.py <document>"
print
print " <document>: file name of the document to parse"
sys.exit(1)
p = saxexts.make_parser()
xth = XTreeHandler(IgnoreWhiteSpace='yes',RemoveWhiteSpace='yes',CreateElementMap='yes')
p.setDocumentHandler(xth)
Ok=None
try:
p.parse(sys.argv[1])
print "Parse complete:"
print " Elements: %d" % xth.elems
print " Attributes: %d" % xth.attrs
print " Proc instrs: %d" % xth.pis
print " elements: %s" % `xth.getElementMap().keys()`
document = xth.getDocument()
print " Document has %d children " % len(document.getChildren())
content = document.getChildren('Content')
topics = content[0].getChildren('Topic')
print "%d content objects" % len(content)
print "%d topic objects" % len(topics)
topics = content[0].getChildren(Topic)
print "%d topic objects" % len(topics)
analyzeTopics(topics)
except IOError,e:
print "\nERROR: "+sys.argv[1]+": "+str(e)
except saxlib.SAXException,e:
print "\nERROR: "+str(e)
# Main Program
if __name__ == '__main__':
go()
|
aarestad/gradschool-stuff
|
xml-class/python-xml/OutlineTool/outlinetool.py
|
Python
|
gpl-2.0
| 11,442
|
[
"VisIt"
] |
d44684a4cf1c20483e5ea78e87fbc8c2424b4c3263de5c2bc3a13934351df62c
|
#============================================================================
#
# Copyright (c) Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#============================================================================
import imp, sys, os, unittest
from __main__ import vtk, qt, ctk, slicer
import Widgets
import json
class Workflow:
def __init__(self, parent):
parent.title = "Workflow"
parent.categories = ["", "TubeTK"]
parent.dependencies = []
parent.contributors = ["Julien Finet (Kitware), Johan Andruejol (Kitware)"]
parent.helpText = """
Step by step workflow to show the vessels of an image. See the <a href=\"http://public.kitware.com/Wiki/TubeTK\">TubeTK wiki</a> for more information.
"""
parent.acknowledgementText = """
This work is supported by the National Institute of Health
"""
self.parent = parent
parent.icon = qt.QIcon("%s/VesselViewLogo.svg" % Widgets.ICON_DIR)
#
# Workflow widget
class WorkflowWidget:
def __init__(self, parent = None):
self.moduleName = 'Workflow'
self._layouts = []
self.maximumNumberOfInput = 3
self._CurrentViewID = 1
self._CurrentViewNodes = {}
for i in range(1, self.maximumNumberOfInput + 1):
subDictionnary = {
'Background' : '',
'Foreground' : '',
'Label' : '',
}
self._CurrentViewNodes['Input%i' %i] = subDictionnary
if not parent:
self.parent = slicer.qMRMLWidget()
self.parent.setLayout(qt.QVBoxLayout())
self.parent.setMRMLScene(slicer.mrmlScene)
else:
self.parent = parent
self.layout = self.parent.layout()
if not parent:
self.setup()
self.parent.show()
self._setupLayouts()
def setup(self):
self.level = 0
self.workflow = ctk.ctkWorkflow(self.parent)
#workflowWidget = ctk.ctkWorkflowStackedWidget()
workflowWidget = ctk.ctkWorkflowWidget()
workflowWidget.setWorkflow( self.workflow )
self.workflowWidget = workflowWidget
self.Settings = None
workflowWidget.buttonBoxWidget().hideInvalidButtons = True
workflowWidget.buttonBoxWidget().hideGoToButtons = True
workflowWidget.buttonBoxWidget().backButtonFormat = '[<-]{back:#}"/"{!#}") "{back:name}(back:description)'
workflowWidget.buttonBoxWidget().nextButtonFormat = '{next:#}"/"{!#}") "{next:name}(next:description)[->]'
workflowWidget.workflowGroupBox().titleFormat = '[current:icon]{#}"/"{!#}") "{current:name}'
workflowWidget.workflowGroupBox().hideWidgetsOfNonCurrentSteps = True
#Creating each step of the workflow
self.steps = [Widgets.InitialStep(),
Widgets.LoadDataStep(),
Widgets.ResampleStep(),
Widgets.RegisterStep(),
Widgets.SegmentationStep(),
Widgets.VesselEnhancementStep(),
Widgets.VesselExtractionStep(),
]
i = 0
for step in self.steps:
# \todo: b) steps should be able to access the workflow widget automatically
step.Workflow = self
# \todo: f) have an option to setup all the gui at startup
step.createUserInterface()
#Connecting the created steps of the workflow
if i != 0:
self.workflow.addTransition(self.steps[i-1], self.steps[i])
i += 1
self.layout.addWidget(workflowWidget)
# Settings
self.Settings = self.loadUi('WorkflowSettingsPanel.ui')
# Display settings
opacitySlider = self.findWidget(self.Settings, 'OpacityRatioDoubleSlider')
opacitySlider.connect('valueChanged(double)', self.setOpacityRatio)
self.setOpacityRatio(opacitySlider.value)
# Hide and disable until step 1 is validated
self.setDisplaySettingsVisible(False)
self.setDisplaySettingsEnabled(False)
# Advanced settings
levelComboBox = self.findWidget(self.Settings, 'WorkflowLevelComboBox')
levelComboBox.connect('currentIndexChanged(int)', self.setWorkflowLevel)
self.setWorkflowLevel(levelComboBox.currentIndex)
self.reloadButton = self.findWidget(self.Settings, 'ReloadPushButton')
self.reloadButton.connect('clicked()', self.reloadModule)
# Add CLI progress bar
self.CLIProgressBar = slicer.qSlicerCLIProgressBar()
self.CLIProgressBar.setStatusVisibility(self.CLIProgressBar.VisibleAfterCompletion)
self.CLIProgressBar.setProgressVisibility(self.CLIProgressBar.HiddenWhenIdle)
self.Settings.layout().insertWidget(1, self.CLIProgressBar) # insert after spacer
# Insert settings before workflow's buttons
collapsibleGroupBox = self.findWidget(self.workflowWidget.workflowGroupBox(), 'CollapsibleButton')
collapsibleGroupBox.layout().addWidget(self.Settings)
# Init naming and jsons.
self.step('Initial').onPresetSelected()
# Starting and showing the module in layout
self.workflow.start()
def step(self, stepid):
for s in self.steps:
if s.stepid == stepid:
return s
def reloadModule(self,moduleName=None):
"""Generic reload method for any scripted module.
ModuleWizard will subsitute correct default moduleName.
"""
import imp, sys, os, slicer, qt
if moduleName == None:
moduleName = self.moduleName
widgetName = moduleName + "Widget"
# reload the source code
# - set source file path
# - load the module to the global space
filePath = eval('slicer.modules.%s.path' % moduleName.lower())
p = os.path.dirname(filePath)
if not sys.path.__contains__(p):
sys.path.insert(0,p)
fp = open(filePath, "r")
globals()[moduleName] = imp.load_module(
moduleName, fp, filePath, ('.py', 'r', imp.PY_SOURCE))
fp.close()
# rebuild the widget
# - find and hide the existing widget
# - create a new widget in the existing parent
parent = slicer.util.findChildren(name='%s Reload' % moduleName)[0].parent()
for child in parent.children():
try:
child.hide()
except AttributeError:
pass
self.layout.removeWidget(self.workflowWidget)
self.workflowWidget.deleteLater()
self.workflowWidget = None
# Remove spacer items
item = parent.layout().itemAt(0)
while item:
parent.layout().removeItem(item)
item = parent.layout().itemAt(0)
# create new widget inside existing parent
globals()[widgetName.lower()] = eval(
'globals()["%s"].%s(parent)' % (moduleName, widgetName))
globals()[widgetName.lower()].setup()
def loadUi(self, uiFileName):
loader = qt.QUiLoader()
moduleName = 'Workflow'
scriptedModulesPath = eval('slicer.modules.%s.path' % moduleName.lower())
scriptedModulesPath = os.path.dirname(scriptedModulesPath)
path = os.path.join(scriptedModulesPath, 'Widgets', 'Resources', 'UI', uiFileName)
qfile = qt.QFile(path)
qfile.open(qt.QFile.ReadOnly)
widget = loader.load(qfile)
widget.setAutoFillBackground(False)
widget.setPalette(slicer.app.palette())
return widget
def findWidget(self, widget, objectName):
if widget.objectName == objectName:
return widget
else:
children = []
for w in widget.children():
resulting_widget = self.findWidget(w, objectName)
if resulting_widget:
return resulting_widget
return None
def setWorkflowLevel(self, level):
self.level = level
for step in self.steps:
step.setWorkflowLevel(level)
def setOpacityRatio(self, ratio):
# 0 == all background <-> 1 == all foreground
sliceCompositeNodes = slicer.mrmlScene.GetNodesByClass("vtkMRMLSliceCompositeNode")
sliceCompositeNodes.SetReferenceCount(sliceCompositeNodes.GetReferenceCount()-1)
for i in range(0, sliceCompositeNodes.GetNumberOfItems()):
sliceCompositeNode = sliceCompositeNodes.GetItemAsObject(i)
sliceCompositeNode.SetForegroundOpacity(ratio)
sliceCompositeNode.SetLabelOpacity(ratio)
def getProgressBar( self ):
return self.CLIProgressBar
def enter(self):
currentStep = self.step(self.workflow.currentStep().id())
currentStep.updateHelp()
self.updateLayout(self._CurrentViewID)
for s in self.steps:
s.updateFromCLIParameters()
def getJsonParameters( self, module ):
presets = self.step('Initial').getPresets()
parameters = {}
try:
jsonFilePath = presets[module.name]
except KeyError:
return parameters
jsonData = open(jsonFilePath)
try:
data = json.load(jsonData)
except ValueError:
print 'Could not read JSON file %s. Make sure the file is valid' % jsonFilePath
return parameters
# For all the parameters not already there, add the json parameters
# Try to be as robust as possible
i = 1
while True:
try:
jsonParametersList = data['ParameterGroups'][i]['Parameters']
except IndexError:
break
for p in jsonParametersList:
try:
parameters[p['Name']] = p['Value']
except KeyError:
print 'Could not find value for %s. Passing.' % p['Name']
continue
i = i + 1
return parameters
def updateConfiguration(self):
config = self.step('Initial').getConfigurationData()
if not config:
return
for step in self.steps:
step.updateConfiguration(config)
def setDisplaySettingsVisible( self, visible ):
if not self.Settings:
return
displayGroupbox = self.findWidget(self.Settings, 'DisplaySettingsCollapsibleGroupBox')
displayGroupbox.setVisible(visible)
def setDisplaySettingsEnabled( self, enabled ):
if not self.Settings:
return
opacitySlider = self.findWidget(self.Settings, 'OpacityRatioDoubleSlider')
opacitySlider.setEnabled(enabled)
def setViews( self, nodes ):
if not nodes:
return
showDisplaySettings = False
for i in range(1, self.maximumNumberOfInput + 1):
input = 'Input%i' %i
id = 'vtkMRMLSliceCompositeNode%s' %input
sliceCompositeNode = slicer.mrmlScene.GetNodeByID(id)
if not sliceCompositeNode:
continue
sliceCompositeNode.SetDoPropagateVolumeSelection(True)
numberOfVolumeTypeVisible = 0
for volumeType in ['Background', 'Foreground', 'Label']:
try:
self._CurrentViewNodes[input][volumeType] = nodes[input][volumeType]
except:
pass
id = self._CurrentViewNodes[input][volumeType]
getattr(sliceCompositeNode, 'Set%sVolumeID' % volumeType)(id)
if id:
numberOfVolumeTypeVisible = numberOfVolumeTypeVisible + 1
showDisplaySettings = showDisplaySettings or numberOfVolumeTypeVisible > 1
sliceCompositeNode.SetDoPropagateVolumeSelection(False)
slicer.app.applicationLogic().FitSliceToAll()
self.setDisplaySettingsVisible(showDisplaySettings)
self.setDisplaySettingsEnabled(showDisplaySettings)
def onNumberOfInputsChanged( self, numberOfInputs ):
'''This function calls the 'onNumberOfInputsChanged' on all the steps. This
should only be called on the workflow by the LoadData module.'''
for step in self.steps:
# Make sure that the steps have widgets already
if hasattr(step, 'widget'):
step.onNumberOfInputsChanged(numberOfInputs)
def updateLayout( self, numberOfViews ):
if numberOfViews not in range(1, len(self._layouts) + 1):
print 'This should not happen, the number of inputs should be in [1, %i[' %(len(self._layouts) + 1)
return
layoutNode = slicer.mrmlScene.GetNthNodeByClass(0, "vtkMRMLLayoutNode")
if layoutNode is None:
return
newLayout = slicer.vtkMRMLLayoutNode().SlicerLayoutUserView + numberOfViews
self._CurrentViewID = numberOfViews
layoutNode.SetViewArrangement(newLayout)
slicer.app.applicationLogic().FitSliceToAll()
def _setupLayouts( self ):
layoutNode = slicer.mrmlScene.GetNthNodeByClass(0, "vtkMRMLLayoutNode")
if layoutNode is None:
return
if not self._layouts:
tag = 'Input'
for i in range(1, self.maximumNumberOfInput + 1):
self._layouts.append(self._inputLayout(tag, i))
# Add the special 1 slice-1 3D view layout
self._layouts.append(self._inputLayout(tag, 1, True))
oldView = layoutNode.GetViewArrangement()
# The slice composite node are created when the layout is used.
# To be able to manipulate thgem correctly, we'll select the layouts
# after their creation
for i, layout in enumerate(self._layouts, start=1):
view = slicer.vtkMRMLLayoutNode().SlicerLayoutUserView + i
layoutNode.AddLayoutDescription(view, layout)
layoutNode.SetViewArrangement(view)
# Prevent the layout slice composite node to update when a node is added
sliceCompositeNode = slicer.mrmlScene.GetNodeByID('vtkMRMLSliceCompositeNode' + tag + str(i))
if sliceCompositeNode:
sliceCompositeNode.SetDoPropagateVolumeSelection(False)
def _inputLayout( self, tag, numberOfInputs, with3D = False ):
sliceItems = ''
for i in range(1, numberOfInputs + 1):
sliceItems = sliceItems + self._sliceItemLayout(tag + str(i), 'Axial', '#a9a9a9')
if (with3D):
sliceItems = sliceItems + self._3DViewItemLayout(tag)
return (
"<layout type=\"vertical\" split=\"true\" >"
"<item>"
"<layout type=\"horizontal\">"
"%s"
"</layout>"
" </item>"
"</layout>"
) % sliceItems
def _3DViewItemLayout( self, tag ):
return (
"<item>"
"<view class=\"vtkMRMLViewNode\" singletontag=\"%s3D\">"
"<property name=\"viewlabel\" action=\"default\">%s</property>"
"</view>"
"</item>"
) % (tag, tag)
def _sliceItemLayout( self, tag, axe, color ):
return (
"<item>"
"<view class=\"vtkMRMLSliceNode\" singletontag=\"%s\">"
"<property name=\"orientation\" action=\"default\">%s</property>"
"<property name=\"viewlabel\" action=\"default\">%s</property>"
"<property name=\"viewcolor\" action=\"default\">%s</property>"
"</view>"
"</item>"
) % (tag, axe, tag, color)
|
matthieuheitz/VesselView
|
Modules/Scripted/Workflow/Workflow.py
|
Python
|
apache-2.0
| 14,629
|
[
"VTK"
] |
5ee5be125a272362b45539eb0089d2902278d63f7e15166594b34408ad17f683
|
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
"""
This example shows how to integrate MDAnalysis in ESPResSo
"""
import espressomd
from espressomd import MDA_ESP
import numpy as np
import MDAnalysis as mda
# set up a minimal sample system
system = espressomd.System(box_l=[10.0, 10.0, 10.0])
system.set_random_state_PRNG()
#system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
np.random.seed(seed=system.seed)
system.time_step = 0.001
system.cell_system.skin = 0.1
for i in range(10):
system.part.add(id=i, pos=np.random.random(3) * system.box_l,
v=np.random.random(3))
for i in range(5, 10):
system.part[i].q = 1.0
system.part[i].type = 1
#
# ========================================================="
# Example #1: prepare the stream and access various "
# quantities from MDAnalysis "
# ========================================================="
#
eos = MDA_ESP.Stream(system)
u = mda.Universe(eos.topology, eos.trajectory)
# let's have a look at the universe
print(u)
# Inspect atoms
print(u.atoms)
print("Positions:")
print(u.atoms.positions)
print("Velocities:")
print(u.atoms.velocities)
print("Forces:")
print(u.atoms.forces)
print("Names:")
print(u.atoms.names)
print("IDs:")
print(u.atoms.ids)
print("Types:")
print(u.atoms.types)
print("Charges:")
print(u.atoms.charges)
#
# ========================================================="
# Example #2: Write the configuration on a PDB file "
# ========================================================="
#
u.atoms.write("system.pdb")
print("===> The initial configuration has been written on system.pdb ")
#
# ========================================================="
# Example #3: Calculate a radial distribution function "
# ========================================================="
#
from MDAnalysis.analysis.rdf import InterRDF
charged = u.select_atoms("prop charge > 0")
rdf = InterRDF(charged, charged, nbins=7, range=(0, 10))
# This runs so far only over the single frame we have loaded.
# Multiframe averaging must be done by hand
rdf.run()
#
# ========================================================="
# Example #4: Saving frames to a GROMACS's TRR trajectory
# ========================================================="
#
from MDAnalysis.coordinates.TRR import TRRWriter
W = TRRWriter("traj.trr", n_atoms=len(system.part))
for i in range(100):
# integrate
system.integrator.run(1)
# replace last frame
# TODO loading new frames will be automated in future versions
u.load_new(eos.trajectory)
# append it to the .trr trajectory
W.write_next_timestep(u.trajectory.ts)
print("===> The trajectory has been saved in the traj.trr file")
|
mkuron/espresso
|
samples/MDAnalysisIntegration.py
|
Python
|
gpl-3.0
| 3,536
|
[
"ESPResSo",
"Gromacs",
"MDAnalysis"
] |
dd189e72ce147148dbbdfcb7f455e157c2845bf25bd3e7a740f1ffc796e8e8d4
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import moldesign as mdt
from moldesign import units as u
class ForcefieldParams(object):
""" Stores the forcefield parameters for a specific molecule.
This is a shim around a parmed object.
Args:
mol (mdt.Molecule): Molecule this force field is for
ffobj (parmed.Structure OR mdt.AmberParms OR Forcefield): forcefield specification
Attributes:
parmed_obj (parmed.Structure): parmed object containing the forcefield parameters
"""
def __init__(self, mol, ffobj):
import parmed
self.mol = mol
if isinstance(ffobj, parmed.Structure):
self.parmed_obj = copy.copy(ffobj)
self.sourcedata = ffobj
elif hasattr(ffobj, 'to_parmed'):
self.sourcedata = ffobj
self.parmed_obj = ffobj.to_parmed()
else:
raise ValueError('Unrecognized force field class "%s"' % ffobj.__class__.__name__)
def copy_to(self, mol):
mol.ff = self.__class__(mol, copy.copy(self.parmed_obj))
return mol.ff
def __setstate__(self, state):
""" Workaround for https://github.com/ParmEd/ParmEd/issues/874
This function can be removed once parmed 0.7.4 is released - AMV 5.19.17
"""
self.__dict__.update(state)
self.parmed_obj.initialize_topology()
def get_atom_terms(self, atom):
return AtomTerms(atom, self.parmed_obj.atoms[atom.index])
def get_bond_term(self, bond_or_atom1, atom2=None):
if atom2 is None:
bond = bond_or_atom1
else:
bond = mdt.Bond(bond_or_atom1, atom2)
pmdatom = self.parmed_obj.atoms[bond.a1.index]
indices = [bond.a1.index, bond.a2.index]
for pmdbond in pmdatom.bonds:
pmdindices = sorted((pmdbond.atom1.idx, pmdbond.atom2.idx))
if pmdindices == indices:
assert self.parmed_obj.atoms[pmdindices[0]].element == bond.a1.atnum
assert self.parmed_obj.atoms[pmdindices[1]].element == bond.a2.atnum
return BondTerm(bond, pmdbond)
else:
raise ValueError("No ForceField term found for bond: %s" % bond)
def get_term(self, *atoms):
pmdterm = self._get_pmd_term(*atoms)
return LEN_TO_TERM[len(atoms)](atoms, pmdterm)
def _get_pmd_term(self, *atoms):
termlist = getattr(self.parmed_obj.atoms[atoms[0].index],
LEN_TO_TERM[len(atoms)].pmdlist)
searchatoms = [self.parmed_obj.atoms[atom.index] for atom in atoms]
for term in termlist:
if term.same_atoms(searchatoms):
return term
else:
raise ValueError("No ForceField term found with atoms: %s" % atoms)
class ParmedAtomAttribute(object):
def __init__(self, attrname, units):
self.attrname = attrname
self.units = units
@staticmethod
def _get_pmd_obj(instance):
return instance.pmdobj
def __get__(self, instance, owner):
obj = self._get_pmd_obj(instance)
return getattr(obj, self.attrname) * self.units
def __set__(self, instance, value):
obj = self._get_pmd_obj(instance)
setattr(obj, self.attrname, value.value_in(self.units))
class ParmedTermAttribute(ParmedAtomAttribute):
@staticmethod
def _get_pmd_obj(instance):
return instance.pmdobj.type
def __set__(self, *args):
# these are defined by *TYPE*, so will need to create singleton classes
# when modifying these terms.
raise NotImplementedError()
class ForceFieldTerm(object):
""" MDT-like proxy for a forcefield term
"""
def __init__(self, mdtobj, pmdobj):
self.mdtobj = mdtobj
self.pmdobj = pmdobj
def __str__(self):
return "%s for %s, ParmEd params %s" % (self.__class__.__name__,
self.mdtobj,
str(self.pmdobj.type)[1:-1]) # strips brackets
def __repr__(self):
return '<%s>' % str(self)
class AtomTerms(ForceFieldTerm):
partial_charge = ParmedAtomAttribute('charge', u.q_e)
ljsigma = ParmedAtomAttribute('sigma', u.angstrom)
ljepsilon = ParmedAtomAttribute('epsilon', u.kcalpermol)
class BondTerm(ForceFieldTerm):
pmdlist = 'bonds'
force_constant = ParmedTermAttribute('k', u.kcalpermol / u.angstrom)
equilibrium_length = ParmedTermAttribute('req', u.angstrom)
class AngleTerm(ForceFieldTerm):
pmdlist = 'angles'
force_constant = ParmedTermAttribute('k', u.kcalpermol / u.degrees)
equilibrium_length = ParmedTermAttribute('req', u.angstrom)
class DihedralTerm(ForceFieldTerm):
pmdlist = 'dihedrals'
force_constant = ParmedTermAttribute('phi_k', u.kcalpermol / u.degrees)
periodicity = ParmedTermAttribute('per', 1)
phase = ParmedTermAttribute('phase', u.degrees)
coulomb_14_factor = ParmedTermAttribute('scee', 1.0)
lj_14_factor = ParmedTermAttribute('scnb', 1.0)
LEN_TO_TERM = {1: AtomTerms, 2: BondTerm, 3: AngleTerm, 4: DihedralTerm}
class FFParameters(object):
""" DEPRACATED: will be removed in favor of ParmEd interface
This object contains assigned force field parameters for a specific system
The base focuses on the AMBER / CHARMM - type force
"""
# TODO: this needs to describe attenuation for things close together
# TODO: deal with nonbonded exceptions
def __init__(self, bonds, angles, dihedrals, partial_charges, lennard_jones):
self.bonds = bonds
self.angles = angles
self.dihedrals = dihedrals
self.partial_charges = partial_charges # maps atoms to their partial charges
self.lennard_jones = lennard_jones # maps atoms to LJ terms
# lookups
self.bond_term = {term.bond: term for term in self.bonds}
self.angle_term = {tuple(term.atoms): term for term in self.angles}
for term in self.angles: self.angle_term[tuple(reversed(term.atoms))] = term
self.dihedral_term = {}
for term in self.dihedrals:
self.dihedral_term.setdefault(tuple(term.atoms), []).append(term)
self.dihedral_term.setdefault(tuple(reversed(term.atoms)), []).append(term)
|
Autodesk/molecular-design-toolkit
|
moldesign/forcefields/ffparams.py
|
Python
|
apache-2.0
| 7,010
|
[
"Amber",
"CHARMM"
] |
a4193fbde0dff1789ea1fc3d8dd2328eafc0e63cd7595abcbfd5d2e4d336d683
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RLumi(RPackage):
"""BeadArray Specific Methods for Illumina Methylation and Expression
Microarrays
The lumi package provides an integrated solution for the Illumina
microarray data analysis. It includes functions of Illumina BeadStudio
(GenomeStudio) data input, quality control, BeadArray-specific variance
stabilization, normalization and gene annotation at the probe level. It
also includes the functions of processing Illumina methylation microarrays,
especially Illumina Infinium methylation microarrays."""
homepage = "https://bioconductor.org/packages/release/bioc/html/lumi.html"
git = "https://git.bioconductor.org/packages/lumi"
version('2.42.0', commit='a643b3ba46fee951b8566ddd8216af7e6c92f6f6')
version('2.38.0', commit='321d480d44ce9a0c02ce5af1bddc1f549abdea59')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-biobase@2.5.5:', type=('build', 'run'))
depends_on('r-affy@1.23.4:', type=('build', 'run'))
depends_on('r-methylumi@2.3.2:', type=('build', 'run'))
depends_on('r-genomicfeatures', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-annotate', type=('build', 'run'))
depends_on('r-lattice', type=('build', 'run'))
depends_on('r-mgcv@1.4-0:', type=('build', 'run'))
depends_on('r-nleqslv', type=('build', 'run'))
depends_on('r-kernsmooth', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-rsqlite', type=('build', 'run'))
depends_on('r-dbi', type=('build', 'run'))
depends_on('r-annotationdbi', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-lumi/package.py
|
Python
|
lgpl-2.1
| 1,923
|
[
"Bioconductor"
] |
facdf3e4f2822887d0463317f855c58a083277e747f71af31b74f0ee07d792bc
|
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','net.uchidak.ticustombgservice.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','NetUchidakTicustombgserviceModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignoreExt=[]):
if not os.path.exists(dir): return
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] in ignoreExt: continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, '%s/%s'%(basepath,dir), 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
zip_dir(zf,'assets',modulepath,['.pyc','.js'])
zip_dir(zf,'example',modulepath,['.pyc'])
zip_dir(zf,'platform',modulepath,['.pyc','.js'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
|
uchidaknet/TiCustomBGService
|
build.py
|
Python
|
mit
| 6,822
|
[
"VisIt"
] |
b3af31daa8f9371f60724e05b0f33d039d3bee141c5e01f0840d99f2ef96e233
|
#!/usr/bin/env python
# coding: utf-8
# # Basic ideas of the Principal Component Analysis (PCA)
#
# The principal component analysis deals with the problem of fitting a
# low-dimensional affine subspace $S$ of dimension $d$ much smaller than
# the total dimension $D$ of the problem at hand (our data
# set). Mathematically it can be formulated as a statistical problem or
# a geometric problem. In our discussion of the theorem for the
# classical PCA, we will stay with a statistical approach.
# Historically, the PCA was first formulated in a statistical setting in order to estimate the principal component of a multivariate random variable.
#
# We have a data set defined by a design/feature matrix $\boldsymbol{X}$ (see below for its definition)
# * Each data point is determined by $p$ extrinsic (measurement) variables
#
# * We may want to ask the following question: Are there fewer intrinsic variables (say $d << p$) that still approximately describe the data?
#
# * If so, these intrinsic variables may tell us something important and finding these intrinsic variables is what dimension reduction methods do.
#
# A good read is for example [Vidal, Ma and Sastry](https://www.springer.com/gp/book/9780387878102).
#
#
#
# ## Introducing the Covariance and Correlation functions
#
# Before we discuss the PCA theorem, we need to remind ourselves about
# the definition of the covariance and the correlation function. These are quantities
#
# Suppose we have defined two vectors
# $\hat{x}$ and $\hat{y}$ with $n$ elements each. The covariance matrix $\boldsymbol{C}$ is defined as
# $$
# \boldsymbol{C}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} \mathrm{cov}[\boldsymbol{x},\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] \\
# \mathrm{cov}[\boldsymbol{y},\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{y},\boldsymbol{y}] \\
# \end{bmatrix},
# $$
# where for example
# $$
# \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] =\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})(y_i- \overline{y}).
# $$
# With this definition and recalling that the variance is defined as
# $$
# \mathrm{var}[\boldsymbol{x}]=\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})^2,
# $$
# we can rewrite the covariance matrix as
# $$
# \boldsymbol{C}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} \mathrm{var}[\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] \\
# \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] & \mathrm{var}[\boldsymbol{y}] \\
# \end{bmatrix}.
# $$
# The covariance takes values between zero and infinity and may thus
# lead to problems with loss of numerical precision for particularly
# large values. It is common to scale the covariance matrix by
# introducing instead the correlation matrix defined via the so-called
# correlation function
# $$
# \mathrm{corr}[\boldsymbol{x},\boldsymbol{y}]=\frac{\mathrm{cov}[\boldsymbol{x},\boldsymbol{y}]}{\sqrt{\mathrm{var}[\boldsymbol{x}] \mathrm{var}[\boldsymbol{y}]}}.
# $$
# The correlation function is then given by values $\mathrm{corr}[\boldsymbol{x},\boldsymbol{y}]
# \in [-1,1]$. This avoids eventual problems with too large values. We
# can then define the correlation matrix for the two vectors $\boldsymbol{x}$
# and $\boldsymbol{y}$ as
# $$
# \boldsymbol{K}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} 1 & \mathrm{corr}[\boldsymbol{x},\boldsymbol{y}] \\
# \mathrm{corr}[\boldsymbol{y},\boldsymbol{x}] & 1 \\
# \end{bmatrix},
# $$
# In the above example this is the function we constructed using **pandas**.
#
#
# In our derivation of the various regression algorithms like **Ordinary Least Squares** or **Ridge regression**
# we defined the design/feature matrix $\boldsymbol{X}$ as
# $$
# \boldsymbol{X}=\begin{bmatrix}
# x_{0,0} & x_{0,1} & x_{0,2}& \dots & \dots x_{0,p-1}\\
# x_{1,0} & x_{1,1} & x_{1,2}& \dots & \dots x_{1,p-1}\\
# x_{2,0} & x_{2,1} & x_{2,2}& \dots & \dots x_{2,p-1}\\
# \dots & \dots & \dots & \dots \dots & \dots \\
# x_{n-2,0} & x_{n-2,1} & x_{n-2,2}& \dots & \dots x_{n-2,p-1}\\
# x_{n-1,0} & x_{n-1,1} & x_{n-1,2}& \dots & \dots x_{n-1,p-1}\\
# \end{bmatrix},
# $$
# with $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$, with the predictors/features $p$ refering to the column numbers and the
# entries $n$ being the row elements.
# We can rewrite the design/feature matrix in terms of its column vectors as
# $$
# \boldsymbol{X}=\begin{bmatrix} \boldsymbol{x}_0 & \boldsymbol{x}_1 & \boldsymbol{x}_2 & \dots & \dots & \boldsymbol{x}_{p-1}\end{bmatrix},
# $$
# with a given vector
# $$
# \boldsymbol{x}_i^T = \begin{bmatrix}x_{0,i} & x_{1,i} & x_{2,i}& \dots & \dots x_{n-1,i}\end{bmatrix}.
# $$
# With these definitions, we can now rewrite our $2\times 2$
# correaltion/covariance matrix in terms of a moe general design/feature
# matrix $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$. This leads to a $p\times p$
# covariance matrix for the vectors $\boldsymbol{x}_i$ with $i=0,1,\dots,p-1$
# $$
# \boldsymbol{C}[\boldsymbol{x}] = \begin{bmatrix}
# \mathrm{var}[\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_{p-1}]\\
# \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_0] & \mathrm{var}[\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_{p-1}]\\
# \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_1] & \mathrm{var}[\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_{p-1}]\\
# \dots & \dots & \dots & \dots & \dots & \dots \\
# \dots & \dots & \dots & \dots & \dots & \dots \\
# \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_{2}] & \dots & \dots & \mathrm{var}[\boldsymbol{x}_{p-1}]\\
# \end{bmatrix},
# $$
# and the correlation matrix
# $$
# \boldsymbol{K}[\boldsymbol{x}] = \begin{bmatrix}
# 1 & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_1] & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_2] & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_{p-1}]\\
# \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_0] & 1 & \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_2] & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_{p-1}]\\
# \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_0] & \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_1] & 1 & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_{p-1}]\\
# \dots & \dots & \dots & \dots & \dots & \dots \\
# \dots & \dots & \dots & \dots & \dots & \dots \\
# \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_0] & \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_1] & \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_{2}] & \dots & \dots & 1\\
# \end{bmatrix},
# $$
# The Numpy function **np.cov** calculates the covariance elements using
# the factor $1/(n-1)$ instead of $1/n$ since it assumes we do not have
# the exact mean values. The following simple function uses the
# **np.vstack** function which takes each vector of dimension $1\times n$
# and produces a $2\times n$ matrix $\boldsymbol{W}$
# $$
# \boldsymbol{W} = \begin{bmatrix} x_0 & y_0 \\
# x_1 & y_1 \\
# x_2 & y_2\\
# \dots & \dots \\
# x_{n-2} & y_{n-2}\\
# x_{n-1} & y_{n-1} &
# \end{bmatrix},
# $$
# which in turn is converted into into the $2\times 2$ covariance matrix
# $\boldsymbol{C}$ via the Numpy function **np.cov()**. We note that we can also calculate
# the mean value of each set of samples $\boldsymbol{x}$ etc using the Numpy
# function **np.mean(x)**. We can also extract the eigenvalues of the
# covariance matrix through the **np.linalg.eig()** function.
# In[1]:
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
W = np.vstack((x, y))
C = np.cov(W)
print(C)
# ## Correlation Matrix
#
# The previous example can be converted into the correlation matrix by
# simply scaling the matrix elements with the variances. We should also
# subtract the mean values for each column. This leads to the following
# code which sets up the correlations matrix for the previous example in
# a more brute force way. Here we scale the mean values for each column of the design matrix, calculate the relevant mean values and variances and then finally set up the $2\times 2$ correlation matrix (since we have only two vectors).
# In[2]:
import numpy as np
n = 100
# define two vectors
x = np.random.random(size=n)
y = 4+3*x+np.random.normal(size=n)
#scaling the x and y vectors
x = x - np.mean(x)
y = y - np.mean(y)
variance_x = np.sum(x@x)/n
variance_y = np.sum(y@y)/n
print(variance_x)
print(variance_y)
cov_xy = np.sum(x@y)/n
cov_xx = np.sum(x@x)/n
cov_yy = np.sum(y@y)/n
C = np.zeros((2,2))
C[0,0]= cov_xx/variance_x
C[1,1]= cov_yy/variance_y
C[0,1]= cov_xy/np.sqrt(variance_y*variance_x)
C[1,0]= C[0,1]
print(C)
# We see that the matrix elements along the diagonal are one as they
# should be and that the matrix is symmetric. Furthermore, diagonalizing
# this matrix we easily see that it is a positive definite matrix.
#
# The above procedure with **numpy** can be made more compact if we use **pandas**.
#
#
# We whow here how we can set up the correlation matrix using **pandas**, as done in this simple code
# In[3]:
import numpy as np
import pandas as pd
n = 10
x = np.random.normal(size=n)
x = x - np.mean(x)
y = 4+3*x+np.random.normal(size=n)
y = y - np.mean(y)
X = (np.vstack((x, y))).T
print(X)
Xpd = pd.DataFrame(X)
print(Xpd)
correlation_matrix = Xpd.corr()
print(correlation_matrix)
# We expand this model to the Franke function discussed above.
# In[4]:
# Common imports
import numpy as np
import pandas as pd
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 4
N = 100
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
Xpd = pd.DataFrame(X)
# subtract the mean values and set up the covariance matrix
Xpd = Xpd - Xpd.mean()
covariance_matrix = Xpd.cov()
print(covariance_matrix)
# We note here that the covariance is zero for the first rows and
# columns since all matrix elements in the design matrix were set to one
# (we are fitting the function in terms of a polynomial of degree $n$). We would however not include the intercept
# and wee can simply
# drop these elements and construct a correlation
# matrix without them.
#
#
#
# We can rewrite the covariance matrix in a more compact form in terms of the design/feature matrix $\boldsymbol{X}$ as
# $$
# \boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}^T\boldsymbol{X}= \mathbb{E}[\boldsymbol{X}^T\boldsymbol{X}].
# $$
# To see this let us simply look at a design matrix $\boldsymbol{X}\in {\mathbb{R}}^{2\times 2}$
# $$
# \boldsymbol{X}=\begin{bmatrix}
# x_{00} & x_{01}\\
# x_{10} & x_{11}\\
# \end{bmatrix}=\begin{bmatrix}
# \boldsymbol{x}_{0} & \boldsymbol{x}_{1}\\
# \end{bmatrix}.
# $$
# If we then compute the expectation value
# $$
# \mathbb{E}[\boldsymbol{X}^T\boldsymbol{X}] = \frac{1}{n}\boldsymbol{X}^T\boldsymbol{X}=\begin{bmatrix}
# x_{00}^2+x_{01}^2 & x_{00}x_{10}+x_{01}x_{11}\\
# x_{10}x_{00}+x_{11}x_{01} & x_{10}^2+x_{11}^2\\
# \end{bmatrix},
# $$
# which is just
# $$
# \boldsymbol{C}[\boldsymbol{x}_0,\boldsymbol{x}_1] = \boldsymbol{C}[\boldsymbol{x}]=\begin{bmatrix} \mathrm{var}[\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_1] \\
# \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_0] & \mathrm{var}[\boldsymbol{x}_1] \\
# \end{bmatrix},
# $$
# where we wrote $$\boldsymbol{C}[\boldsymbol{x}_0,\boldsymbol{x}_1] = \boldsymbol{C}[\boldsymbol{x}]$$ to indicate that this the covariance of the vectors $\boldsymbol{x}$ of the design/feature matrix $\boldsymbol{X}$.
#
# It is easy to generalize this to a matrix $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$.
#
#
#
# ## Towards the PCA theorem
#
# We have that the covariance matrix (the correlation matrix involves a simple rescaling) is given as
# $$
# \boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}^T\boldsymbol{X}= \mathbb{E}[\boldsymbol{X}^T\boldsymbol{X}].
# $$
# Let us now assume that we can perform a series of orthogonal transformations where we employ some orthogonal matrices $\boldsymbol{S}$.
# These matrices are defined as $\boldsymbol{S}\in {\mathbb{R}}^{p\times p}$ and obey the orthogonality requirements $\boldsymbol{S}\boldsymbol{S}^T=\boldsymbol{S}^T\boldsymbol{S}=\boldsymbol{I}$. The matrix can be written out in terms of the column vectors $\boldsymbol{s}_i$ as $\boldsymbol{S}=[\boldsymbol{s}_0,\boldsymbol{s}_1,\dots,\boldsymbol{s}_{p-1}]$ and $\boldsymbol{s}_i \in {\mathbb{R}}^{p}$.
#
# Assume also that there is a transformation $\boldsymbol{S}^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}=\boldsymbol{C}[\boldsymbol{y}]$ such that the new matrix $\boldsymbol{C}[\boldsymbol{y}]$ is diagonal with elements $[\lambda_0,\lambda_1,\lambda_2,\dots,\lambda_{p-1}]$.
#
# That is we have
# $$
# \boldsymbol{C}[\boldsymbol{y}] = \mathbb{E}[\boldsymbol{S}^T\boldsymbol{X}^T\boldsymbol{X}T\boldsymbol{S}]=\boldsymbol{S}^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S},
# $$
# since the matrix $\boldsymbol{S}$ is not a data dependent matrix. Multiplying with $\boldsymbol{S}$ from the left we have
# $$
# \boldsymbol{S}\boldsymbol{C}[\boldsymbol{y}] = \boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S},
# $$
# and since $\boldsymbol{C}[\boldsymbol{y}]$ is diagonal we have for a given eigenvalue $i$ of the covariance matrix that
# $$
# \boldsymbol{S}_i\lambda_i = \boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}_i.
# $$
# In the derivation of the PCA theorem we will assume that the eigenvalues are ordered in descending order, that is
# $\lambda_0 > \lambda_1 > \dots > \lambda_{p-1}$.
#
#
# The eigenvalues tell us then how much we need to stretch the
# corresponding eigenvectors. Dimensions with large eigenvalues have
# thus large variations (large variance) and define therefore useful
# dimensions. The data points are more spread out in the direction of
# these eigenvectors. Smaller eigenvalues mean on the other hand that
# the corresponding eigenvectors are shrunk accordingly and the data
# points are tightly bunched together and there is not much variation in
# these specific directions. Hopefully then we could leave it out
# dimensions where the eigenvalues are very small. If $p$ is very large,
# we could then aim at reducing $p$ to $l << p$ and handle only $l$
# features/predictors.
#
# ### The Algorithm before theorem
#
# Here's how we would proceed in setting up the algorithm for the PCA, see also discussion below here.
# * Set up the datapoints for the design/feature matrix $\boldsymbol{X}$ with $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$, with the predictors/features $p$ referring to the column numbers and the entries $n$ being the row elements.
# $$
# \boldsymbol{X}=\begin{bmatrix}
# x_{0,0} & x_{0,1} & x_{0,2}& \dots & \dots x_{0,p-1}\\
# x_{1,0} & x_{1,1} & x_{1,2}& \dots & \dots x_{1,p-1}\\
# x_{2,0} & x_{2,1} & x_{2,2}& \dots & \dots x_{2,p-1}\\
# \dots & \dots & \dots & \dots \dots & \dots \\
# x_{n-2,0} & x_{n-2,1} & x_{n-2,2}& \dots & \dots x_{n-2,p-1}\\
# x_{n-1,0} & x_{n-1,1} & x_{n-1,2}& \dots & \dots x_{n-1,p-1}\\
# \end{bmatrix},
# $$
# * Center the data by subtracting the mean value for each column. This leads to a new matrix $\boldsymbol{X}\rightarrow \overline{\boldsymbol{X}}$.
#
# * Compute then the covariance/correlation matrix $\mathbb{E}[\overline{\boldsymbol{X}}^T\overline{\boldsymbol{X}}]$.
#
# * Find the eigenpairs of $\boldsymbol{C}$ with eigenvalues $[\lambda_0,\lambda_1,\dots,\lambda_{p-1}]$ and eigenvectors $[\boldsymbol{s}_0,\boldsymbol{s}_1,\dots,\boldsymbol{s}_{p-1}]$.
#
# * Order the eigenvalue (and the eigenvectors accordingly) in order of decreasing eigenvalues.
#
# * Keep only those $l$ eigenvalues larger than a selected threshold value, discarding thus $p-l$ features since we expect small variations in the data here.
#
# ### Writing our own PCA code
#
# We will use a simple example first with two-dimensional data
# drawn from a multivariate normal distribution with the following mean and covariance matrix (we have fixed these quantities but will play around with them below):
# $$
# \mu = (-1,2) \qquad \Sigma = \begin{bmatrix} 4 & 2 \\
# 2 & 2
# \end{bmatrix}
# $$
# Note that the mean refers to each column of data.
# We will generate $n = 10000$ points $X = \{ x_1, \ldots, x_N \}$ from
# this distribution, and store them in the $1000 \times 2$ matrix $\boldsymbol{X}$. This is our design matrix where we have forced the covariance and mean values to take specific values.
#
# The following Python code aids in setting up the data and writing out the design matrix.
# Note that the function **multivariate** returns also the covariance discussed above and that it is defined by dividing by $n-1$ instead of $n$.
# In[5]:
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display
n = 10000
mean = (-1, 2)
cov = [[4, 2], [2, 2]]
X = np.random.multivariate_normal(mean, cov, n)
# Now we are going to implement the PCA algorithm. We will break it down into various substeps.
#
#
# The first step of PCA is to compute the sample mean of the data and use it to center the data. Recall that the sample mean is
# $$
# \mu_n = \frac{1}{n} \sum_{i=1}^n x_i
# $$
# and the mean-centered data $\bar{X} = \{ \bar{x}_1, \ldots, \bar{x}_n \}$ takes the form
# $$
# \bar{x}_i = x_i - \mu_n.
# $$
# When you are done with these steps, print out $\mu_n$ to verify it is
# close to $\mu$ and plot your mean centered data to verify it is
# centered at the origin!
# The following code elements perform these operations using **pandas** or using our own functionality for doing so. The latter, using **numpy** is rather simple through the **mean()** function.
# In[6]:
df = pd.DataFrame(X)
# Pandas does the centering for us
df = df -df.mean()
# we center it ourselves
X_centered = X - X.mean(axis=0)
# Alternatively, we could use the functions we discussed
# earlier for scaling the data set. That is, we could have used the
# **StandardScaler** function in **Scikit-Learn**, a function which ensures
# that for each feature/predictor we study the mean value is zero and
# the variance is one (every column in the design/feature matrix). You
# would then not get the same results, since we divide by the
# variance. The diagonal covariance matrix elements will then be one,
# while the non-diagonal ones need to be divided by $2\sqrt{2}$ for our
# specific case.
#
#
# Now we are going to use the mean centered data to compute the sample covariance of the data by using the following equation
# $$
# \Sigma_n = \frac{1}{n-1} \sum_{i=1}^n \bar{x}_i^T \bar{x}_i = \frac{1}{n-1} \sum_{i=1}^n (x_i - \mu_n)^T (x_i - \mu_n)
# $$
# where the data points $x_i \in \mathbb{R}^p$ (here in this example $p = 2$) are column vectors and $x^T$ is the transpose of $x$.
# We can write our own code or simply use either the functionaly of **numpy** or that of **pandas**, as follows
# In[7]:
print(df.cov())
print(np.cov(X_centered.T))
# Note that the way we define the covariance matrix here has a factor $n-1$ instead of $n$. This is included in the **cov()** function by **numpy** and **pandas**.
# Our own code here is not very elegant and asks for obvious improvements. It is tailored to this specific $2\times 2$ covariance matrix.
# In[8]:
# extract the relevant columns from the centered design matrix of dim n x 2
x = X_centered[:,0]
y = X_centered[:,1]
Cov = np.zeros((2,2))
Cov[0,1] = np.sum(x.T@y)/(n-1.0)
Cov[0,0] = np.sum(x.T@x)/(n-1.0)
Cov[1,1] = np.sum(y.T@y)/(n-1.0)
Cov[1,0]= Cov[0,1]
print("Centered covariance using own code")
print(Cov)
plt.plot(x, y, 'x')
plt.axis('equal')
plt.show()
# Depending on the number of points $n$, we will get results that are close to the covariance values defined above.
# The plot shows how the data are clustered around a line with slope close to one. Is this expected? Try to change the covariance and the mean values. For example, try to make the variance of the first element much larger than that of the second diagonal element. Try also to shrink the covariance (the non-diagonal elements) and see how the data points are distributed.
#
# ### Diagonalize the sample covariance matrix to obtain the principal components
#
# Now we are ready to solve for the principal components! To do so we
# diagonalize the sample covariance matrix $\Sigma$. We can use the
# function **np.linalg.eig** to do so. It will return the eigenvalues and
# eigenvectors of $\Sigma$. Once we have these we can perform the
# following tasks:
#
# * We compute the percentage of the total variance captured by the first principal component
#
# * We plot the mean centered data and lines along the first and second principal components
#
# * Then we project the mean centered data onto the first and second principal components, and plot the projected data.
#
# * Finally, we approximate the data as
# $$
# x_i \approx \tilde{x}_i = \mu_n + \langle x_i, v_0 \rangle v_0
# $$
# where $v_0$ is the first principal component.
#
# Collecting all these steps we can write our own PCA function and
# compare this with the functionality included in **Scikit-Learn**.
#
# The code here outlines some of the elements we could include in the
# analysis. Feel free to extend upon this in order to address the above
# questions.
# In[9]:
# diagonalize and obtain eigenvalues, not necessarily sorted
EigValues, EigVectors = np.linalg.eig(Cov)
# sort eigenvectors and eigenvalues
#permute = EigValues.argsort()
#EigValues = EigValues[permute]
#EigVectors = EigVectors[:,permute]
print("Eigenvalues of Covariance matrix")
for i in range(2):
print(EigValues[i])
FirstEigvector = EigVectors[:,0]
SecondEigvector = EigVectors[:,1]
print("First eigenvector")
print(FirstEigvector)
print("Second eigenvector")
print(SecondEigvector)
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2Dsl = pca.fit_transform(X)
print("Eigenvector of largest eigenvalue")
print(pca.components_.T[:, 0])
# This code does not contain all the above elements, but it shows how we can use **Scikit-Learn** to extract the eigenvector which corresponds to the largest eigenvalue. Try to address the questions we pose before the above code. Try also to change the values of the covariance matrix by making one of the diagonal elements much larger than the other. What do you observe then?
#
#
# ## Classical PCA Theorem
#
# We assume now that we have a design matrix $\boldsymbol{X}$ which has been
# centered as discussed above. For the sake of simplicity we skip the
# overline symbol. The matrix is defined in terms of the various column
# vectors $[\boldsymbol{x}_0,\boldsymbol{x}_1,\dots, \boldsymbol{x}_{p-1}]$ each with dimension
# $\boldsymbol{x}\in {\mathbb{R}}^{n}$.
#
#
#
# The PCA theorem states that minimizing the above reconstruction error
# corresponds to setting $\boldsymbol{W}=\boldsymbol{S}$, the orthogonal matrix which
# diagonalizes the empirical covariance(correlation) matrix. The optimal
# low-dimensional encoding of the data is then given by a set of vectors
# $\boldsymbol{z}_i$ with at most $l$ vectors, with $l << p$, defined by the
# orthogonal projection of the data onto the columns spanned by the
# eigenvectors of the covariance(correlations matrix).
#
#
#
#
# To show the PCA theorem let us start with the assumption that there is one vector $\boldsymbol{s}_0$ which corresponds to a solution which minimized the reconstruction error $J$. This is an orthogonal vector. It means that we now approximate the reconstruction error in terms of $\boldsymbol{w}_0$ and $\boldsymbol{z}_0$ as
#
#
#
# We are almost there, we have obtained a relation between minimizing
# the reconstruction error and the variance and the covariance
# matrix. Minimizing the error is equivalent to maximizing the variance
# of the projected data.
#
#
# We could trivially maximize the variance of the projection (and
# thereby minimize the error in the reconstruction function) by letting
# the norm-2 of $\boldsymbol{w}_0$ go to infinity. However, this norm since we
# want the matrix $\boldsymbol{W}$ to be an orthogonal matrix, is constrained by
# $\vert\vert \boldsymbol{w}_0 \vert\vert_2^2=1$. Imposing this condition via a
# Lagrange multiplier we can then in turn maximize
# $$
# J(\boldsymbol{w}_0)= \boldsymbol{w}_0^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0+\lambda_0(1-\boldsymbol{w}_0^T\boldsymbol{w}_0).
# $$
# Taking the derivative with respect to $\boldsymbol{w}_0$ we obtain
# $$
# \frac{\partial J(\boldsymbol{w}_0)}{\partial \boldsymbol{w}_0}= 2\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0-2\lambda_0\boldsymbol{w}_0=0,
# $$
# meaning that
# $$
# \boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0=\lambda_0\boldsymbol{w}_0.
# $$
# **The direction that maximizes the variance (or minimizes the construction error) is an eigenvector of the covariance matrix**! If we left multiply with $\boldsymbol{w}_0^T$ we have the variance of the projected data is
# $$
# \boldsymbol{w}_0^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0=\lambda_0.
# $$
# If we want to maximize the variance (minimize the construction error)
# we simply pick the eigenvector of the covariance matrix with the
# largest eigenvalue. This establishes the link between the minimization
# of the reconstruction function $J$ in terms of an orthogonal matrix
# and the maximization of the variance and thereby the covariance of our
# observations encoded in the design/feature matrix $\boldsymbol{X}$.
#
# The proof
# for the other eigenvectors $\boldsymbol{w}_1,\boldsymbol{w}_2,\dots$ can be
# established by applying the above arguments and using the fact that
# our basis of eigenvectors is orthogonal, see [Murphy chapter
# 12.2](https://mitpress.mit.edu/books/machine-learning-1). The
# discussion in chapter 12.2 of Murphy's text has also a nice link with
# the Singular Value Decomposition theorem. For categorical data, see
# chapter 12.4 and discussion therein.
#
# For more details, see for example [Vidal, Ma and Sastry, chapter 2](https://www.springer.com/gp/book/9780387878102).
#
#
# ## Geometric Interpretation and link with Singular Value Decomposition
#
# For a detailed demonstration of the geometric interpretation, see [Vidal, Ma and Sastry, section 2.1.2](https://www.springer.com/gp/book/9780387878102).
#
#
# Principal Component Analysis (PCA) is by far the most popular dimensionality reduction algorithm.
# First it identifies the hyperplane that lies closest to the data, and then it projects the data onto it.
#
# The following Python code uses NumPy’s **svd()** function to obtain all the principal components of the
# training set, then extracts the first two principal components. First we center the data using either **pandas** or our own code
# In[10]:
import numpy as np
import pandas as pd
from IPython.display import display
np.random.seed(100)
# setting up a 10 x 5 vanilla matrix
rows = 10
cols = 5
X = np.random.randn(rows,cols)
df = pd.DataFrame(X)
# Pandas does the centering for us
df = df -df.mean()
display(df)
# we center it ourselves
X_centered = X - X.mean(axis=0)
# Then check the difference between pandas and our own set up
print(X_centered-df)
#Now we do an SVD
U, s, V = np.linalg.svd(X_centered)
c1 = V.T[:, 0]
c2 = V.T[:, 1]
W2 = V.T[:, :2]
X2D = X_centered.dot(W2)
print(X2D)
# PCA assumes that the dataset is centered around the origin. Scikit-Learn’s PCA classes take care of centering
# the data for you. However, if you implement PCA yourself (as in the preceding example), or if you use other libraries, don’t
# forget to center the data first.
#
# Once you have identified all the principal components, you can reduce the dimensionality of the dataset
# down to $d$ dimensions by projecting it onto the hyperplane defined by the first $d$ principal components.
# Selecting this hyperplane ensures that the projection will preserve as much variance as possible.
# In[11]:
W2 = V.T[:, :2]
X2D = X_centered.dot(W2)
# ## PCA and scikit-learn
#
# Scikit-Learn’s PCA class implements PCA using SVD decomposition just like we did before. The
# following code applies PCA to reduce the dimensionality of the dataset down to two dimensions (note
# that it automatically takes care of centering the data):
# In[12]:
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D = pca.fit_transform(X)
print(X2D)
# After fitting the PCA transformer to the dataset, you can access the principal components using the
# components variable (note that it contains the PCs as horizontal vectors, so, for example, the first
# principal component is equal to
# In[13]:
pca.components_.T[:, 0]
# Another very useful piece of information is the explained variance ratio of each principal component,
# available via the $explained\_variance\_ratio$ variable. It indicates the proportion of the dataset’s
# variance that lies along the axis of each principal component.
#
# ## Back to the Cancer Data
# We can now repeat the above but applied to real data, in this case our breast cancer data.
# Here we compute performance scores on the training data using logistic regression.
# In[14]:
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Train set accuracy from Logistic Regression: {:.2f}".format(logreg.score(X_train,y_train)))
# We scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Then perform again a log reg fit
logreg.fit(X_train_scaled, y_train)
print("Train set accuracy scaled data: {:.2f}".format(logreg.score(X_train_scaled,y_train)))
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D_train = pca.fit_transform(X_train_scaled)
# and finally compute the log reg fit and the score on the training data
logreg.fit(X2D_train,y_train)
print("Train set accuracy scaled and PCA data: {:.2f}".format(logreg.score(X2D_train,y_train)))
# We see that our training data after the PCA decomposition has a performance similar to the non-scaled data.
#
#
# Instead of arbitrarily choosing the number of dimensions to reduce down to, it is generally preferable to
# choose the number of dimensions that add up to a sufficiently large portion of the variance (e.g., 95%).
# Unless, of course, you are reducing dimensionality for data visualization — in that case you will
# generally want to reduce the dimensionality down to 2 or 3.
# The following code computes PCA without reducing dimensionality, then computes the minimum number
# of dimensions required to preserve 95% of the training set’s variance:
# In[15]:
pca = PCA()
pca.fit(X)
cumsum = np.cumsum(pca.explained_variance_ratio_)
d = np.argmax(cumsum >= 0.95) + 1
# You could then set $n\_components=d$ and run PCA again. However, there is a much better option: instead
# of specifying the number of principal components you want to preserve, you can set $n\_components$ to be
# a float between 0.0 and 1.0, indicating the ratio of variance you wish to preserve:
# In[16]:
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(X)
# ### Incremental PCA
#
# One problem with the preceding implementation of PCA is that it requires the whole training set to fit in
# memory in order for the SVD algorithm to run. Fortunately, Incremental PCA (IPCA) algorithms have
# been developed: you can split the training set into mini-batches and feed an IPCA algorithm one minibatch
# at a time. This is useful for large training sets, and also to apply PCA online (i.e., on the fly, as new
# instances arrive).
#
#
# ### Randomized PCA
#
# Scikit-Learn offers yet another option to perform PCA, called Randomized PCA. This is a stochastic
# algorithm that quickly finds an approximation of the first d principal components. Its computational
# complexity is $O(m \times d^2)+O(d^3)$, instead of $O(m \times n^2) + O(n^3)$, so it is dramatically faster than the
# previous algorithms when $d$ is much smaller than $n$.
#
#
# ### Kernel PCA
#
# The kernel trick is a mathematical technique that implicitly maps instances into a
# very high-dimensional space (called the feature space), enabling nonlinear classification and regression
# with Support Vector Machines. Recall that a linear decision boundary in the high-dimensional feature
# space corresponds to a complex nonlinear decision boundary in the original space.
# It turns out that the same trick can be applied to PCA, making it possible to perform complex nonlinear
# projections for dimensionality reduction. This is called Kernel PCA (kPCA). It is often good at
# preserving clusters of instances after projection, or sometimes even unrolling datasets that lie close to a
# twisted manifold.
# For example, the following code uses Scikit-Learn’s KernelPCA class to perform kPCA with an
# In[17]:
from sklearn.decomposition import KernelPCA
rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.04)
X_reduced = rbf_pca.fit_transform(X)
# ## Other techniques
#
#
# There are many other dimensionality reduction techniques, several of which are available in Scikit-Learn.
#
# Here are some of the most popular:
# * **Multidimensional Scaling (MDS)** reduces dimensionality while trying to preserve the distances between the instances.
#
# * **Isomap** creates a graph by connecting each instance to its nearest neighbors, then reduces dimensionality while trying to preserve the geodesic distances between the instances.
#
# * **t-Distributed Stochastic Neighbor Embedding** (t-SNE) reduces dimensionality while trying to keep similar instances close and dissimilar instances apart. It is mostly used for visualization, in particular to visualize clusters of instances in high-dimensional space (e.g., to visualize the MNIST images in 2D).
#
# * Linear Discriminant Analysis (LDA) is actually a classification algorithm, but during training it learns the most discriminative axes between the classes, and these axes can then be used to define a hyperplane onto which to project the data. The benefit is that the projection will keep classes as far apart as possible, so LDA is a good technique to reduce dimensionality before running another classification algorithm such as a Support Vector Machine (SVM) classifier discussed in the SVM lectures.
|
CompPhysics/MachineLearning
|
doc/LectureNotes/_build/jupyter_execute/chapter8.py
|
Python
|
cc0-1.0
| 36,331
|
[
"MOE"
] |
867d4994f87875adc17a279e3ce7cd4bad8733c6cba0353c8cc2bdb9da875cf7
|
from ase import *
a = 2.70
c = 1.59 * a
h = 1.85
d = 1.10
slab = Atoms('2Cu', [(0., 0., 0.), (1/3., 1/3., -0.5*c)],
tags=(0, 1),
pbc=(1, 1, 0))
slab.set_cell([(a, 0, 0),
(a / 2, 3**0.5 * a / 2, 0),
(0, 0, 1)])
slab = slab.repeat((4, 4, 1))
slab.set_calculator(EMT())
mask = [a.tag == 1 for a in slab]
slab.set_constraint(FixAtoms(mask=mask))
dyn = QuasiNewton(slab)
dyn.run(fmax=0.05)
e_slab = slab.get_potential_energy()
x = slab.positions[0, 2] / (c / 2) * 100
print 'Relaxation of the top layer: %f %%' % x
molecule = Atoms('2N', positions=[(0., 0., h),
(0., 0., h + d)])
molecule.set_calculator(EMT())
e_N2 = molecule.get_potential_energy()
slab.extend(molecule)
dyn = QuasiNewton(slab)
dyn.run(fmax=0.05)
print 'Adsorption energy:', e_slab + e_N2 - slab.get_potential_energy()
|
freephys/python_ase
|
tutorials/N2Ru-relax.py
|
Python
|
gpl-3.0
| 879
|
[
"ASE"
] |
1818380e89eda8efd30fa7d7aaaaf069f4cf989bb37e7eb577a5933fb5dc3a7d
|
from __future__ import print_function
import six
import bz2
from json import dumps, loads
from operator import itemgetter
from Bio.Blast import NCBIXML
from Bio.File import as_handle
from dark.hsp import HSP, LSP
from dark.score import HigherIsBetterScore
from dark.alignments import Alignment, ReadAlignments
from dark.blast.hsp import normalizeHSP
class XMLRecordsReader(object):
"""
Provide a method that yields parsed XML records from a file. Store and
make accessible the global BLAST parameters.
@ivar params: A C{dict} of global BLAST parameters.
@param filename: A C{str} filename or an open file pointer, containing XML
BLAST records.
"""
def __init__(self, filename):
self._filename = filename
self.params = None # Set below, in records.
def _convertBlastRecordToDict(self, record):
"""
Pull (only) the fields we use out of the record and return them as a
dict. Although we take the title from each alignment description, we
save space in the JSON output by storing it in the alignment dict (not
in a separated 'description' dict). When we undo this conversion (in
JSONRecordsReader._convertDictToBlastRecord) we'll pull the title out
of the alignment dict and put it into the right place in the BLAST
record.
@param record: An instance of C{Bio.Blast.Record.Blast}. The attributes
on this don't seem to be documented. You'll need to look at the
BioPython source to see everything it contains.
@return: A C{dict} with 'alignments' and 'query' keys.
"""
alignments = []
for alignment in record.alignments:
hsps = []
for hsp in alignment.hsps:
hsps.append({
'bits': hsp.bits,
'expect': hsp.expect,
'frame': hsp.frame,
'identicalCount': hsp.identities,
'positiveCount': hsp.positives,
'query': hsp.query,
'query_start': hsp.query_start,
'query_end': hsp.query_end,
'sbjct': hsp.sbjct,
'sbjct_start': hsp.sbjct_start,
'sbjct_end': hsp.sbjct_end,
})
alignments.append({
'hsps': hsps,
'length': alignment.length,
'title': alignment.title,
})
return {
'alignments': alignments,
'query': record.query,
}
def _convertBlastParamsToDict(self, record):
"""
Pull the global BLAST parameters out of a BLAST record and return
them as a C{dict}.
Some of these attributes are useless (not filled in), but we record
them all just in case we one day need them or they start to be used or
they disappear etc. Any of those changes might alert us that something
has changed in BLAST XML output or in BioPython.
@param record: An instance of C{Bio.Blast.Record.Blast}. The attributes
on this don't seem to be documented. You'll need to look at the
BioPython source to see everything it contains.
@return: A C{dict}, as described above.
"""
result = {}
for attr in (
# From Bio.Blast.Record.Header
'application',
'version',
'date',
'reference',
'query',
'query_letters',
'database',
'database_sequences',
'database_letters',
# From Bio.Blast.Record.DatabaseReport
'database_name',
'posted_date',
'num_letters_in_database',
'num_sequences_in_database',
'ka_params',
'gapped',
'ka_params_gap',
# From Bio.Blast.Record.Parameters
'matrix',
'gap_penalties',
'sc_match',
'sc_mismatch',
'num_hits',
'num_sequences',
'num_good_extends',
'num_seqs_better_e',
'hsps_no_gap',
'hsps_prelim_gapped',
'hsps_prelim_gapped_attemped',
'hsps_gapped',
'query_id',
'query_length',
'database_length',
'effective_hsp_length',
'effective_query_length',
'effective_database_length',
'effective_search_space',
'effective_search_space_used',
'frameshift',
'threshold',
'window_size',
'dropoff_1st_pass',
'gap_x_dropoff',
'gap_x_dropoff_final',
'gap_trigger',
'blast_cutoff'):
result[attr] = getattr(record, attr)
return result
def records(self):
"""
Yield BLAST records, as read by the BioPython NCBIXML.parse
method. Set self.params from data in the first record.
"""
first = True
with as_handle(self._filename) as fp:
for record in NCBIXML.parse(fp):
if first:
self.params = self._convertBlastParamsToDict(record)
first = False
yield record
def saveAsJSON(self, fp):
"""
Write the records out as JSON. The first JSON object saved contains
the BLAST parameters.
@param fp: A C{str} file pointer to write to.
"""
first = True
for record in self.records():
if first:
print(dumps(self.params, separators=(',', ':')), file=fp)
first = False
print(dumps(self._convertBlastRecordToDict(record),
separators=(',', ':')), file=fp)
class JSONRecordsReader(object):
"""
Provide a method that yields JSON records from a file. Store, check, and
make accessible the global BLAST parameters.
@param filename: A C{str} filename containing JSON BLAST records.
@param scoreClass: A class to hold and compare scores (see scores.py).
Default is C{HigherIsBetterScore}, for comparing bit scores. If you
are using e-values, pass LowerIsBetterScore instead.
"""
# Note that self._fp is opened in self.__init__, accessed in
# self._params and in self.records, and closed in self.close.
def __init__(self, filename, scoreClass=HigherIsBetterScore):
self._filename = filename
self._scoreClass = scoreClass
if scoreClass is HigherIsBetterScore:
self._hspClass = HSP
else:
self._hspClass = LSP
self._open(filename)
self.application = self.params['application'].lower()
def _open(self, filename):
"""
Open the input file. Set self._fp to point to it. Read the first
line of parameters.
@param filename: A C{str} filename containing JSON BLAST records.
@raise ValueError: if the first line of the file isn't valid JSON,
if the input file is empty, or if the JSON does not contain an
'application' key.
"""
if filename.endswith('.bz2'):
if six.PY3:
self._fp = bz2.open(filename, mode='rt', encoding='UTF-8')
else:
self._fp = bz2.BZ2File(filename)
else:
self._fp = open(filename)
line = self._fp.readline()
if not line:
raise ValueError('JSON file %r was empty.' % self._filename)
try:
self.params = loads(line[:-1])
except ValueError as e:
raise ValueError(
'Could not convert first line of %r to JSON (%s). '
'Line is %r.' % (self._filename, e, line[:-1]))
else:
if 'application' not in self.params:
raise ValueError(
'%r appears to be an old JSON file with no BLAST global '
'parameters. Please re-run convert-blast-xml-to-json.py '
'to convert it to the newest format.' % self._filename)
def _dictToAlignments(self, blastDict, read):
"""
Take a dict (made by XMLRecordsReader._convertBlastRecordToDict)
and convert it to a list of alignments.
@param blastDict: A C{dict}, from convertBlastRecordToDict.
@param read: A C{Read} instance, containing the read that BLAST used
to create this record.
@raise ValueError: If the query id in the BLAST dictionary does not
match the id of the read.
@return: A C{list} of L{dark.alignment.Alignment} instances.
"""
if (blastDict['query'] != read.id and
blastDict['query'].split()[0] != read.id):
raise ValueError(
'The reads you have provided do not match the BLAST output: '
'BLAST record query id (%s) does not match the id of the '
'supposedly corresponding read (%s).' %
(blastDict['query'], read.id))
alignments = []
getScore = itemgetter('bits' if self._hspClass is HSP else 'expect')
for blastAlignment in blastDict['alignments']:
alignment = Alignment(blastAlignment['length'],
blastAlignment['title'])
alignments.append(alignment)
for blastHsp in blastAlignment['hsps']:
score = getScore(blastHsp)
normalized = normalizeHSP(blastHsp, len(read),
self.application)
hsp = self._hspClass(
score,
readStart=normalized['readStart'],
readEnd=normalized['readEnd'],
readStartInSubject=normalized['readStartInSubject'],
readEndInSubject=normalized['readEndInSubject'],
readFrame=blastHsp['frame'][0],
subjectStart=normalized['subjectStart'],
subjectEnd=normalized['subjectEnd'],
subjectFrame=blastHsp['frame'][1],
readMatchedSequence=blastHsp['query'],
subjectMatchedSequence=blastHsp['sbjct'],
# Use blastHsp.get on identicalCount and positiveCount
# because they were added in version 2.0.3 and will not
# be present in any of our JSON output generated before
# that. Those values will be None for those JSON files,
# but that's much better than no longer being able to
# read all that data.
identicalCount=blastHsp.get('identicalCount'),
positiveCount=blastHsp.get('positiveCount'))
alignment.addHsp(hsp)
return alignments
def readAlignments(self, reads):
"""
Read lines of JSON from self._filename, convert them to read alignments
and yield them.
@param reads: An iterable of L{Read} instances, corresponding to the
reads that were given to BLAST.
@raise ValueError: If any of the lines in the file cannot be converted
to JSON.
@return: A generator that yields C{dark.alignments.ReadAlignments}
instances.
"""
if self._fp is None:
self._open(self._filename)
reads = iter(reads)
try:
for lineNumber, line in enumerate(self._fp, start=2):
try:
record = loads(line[:-1])
except ValueError as e:
raise ValueError(
'Could not convert line %d of %r to JSON (%s). '
'Line is %r.' %
(lineNumber, self._filename, e, line[:-1]))
else:
try:
read = next(reads)
except StopIteration:
raise ValueError(
'Read generator failed to yield read number %d '
'during parsing of BLAST file %r.' %
(lineNumber - 1, self._filename))
else:
alignments = self._dictToAlignments(record, read)
yield ReadAlignments(read, alignments)
finally:
self._fp.close()
self._fp = None
|
bamueh/dark-matter
|
dark/blast/conversion.py
|
Python
|
mit
| 12,742
|
[
"BLAST",
"Biopython"
] |
e844ea9ac8c36c68133c9e22a035f8f2025e4a30dacd75078b8aebe4ab1d57a6
|
## @package memonger
# Module caffe2.python.memonger
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import networkx as nx
import collections
import time
import heapq
import copy
from caffe2.python import workspace
from caffe2.proto import caffe2_pb2
import enum
import logging
import numpy as np
from future.utils import viewitems, viewvalues
import caffe2.python._import_c_extension as C
log = logging.getLogger("memonger")
log.setLevel(logging.INFO)
LiveRange = collections.namedtuple('LiveRange', ["defined", "used", "size"])
def share_grad_blobs(
net,
losses,
param_grads,
namescope,
dont_share_blobs=None,
share_activations=False,
blob_shapes=None,
):
'''
Implements similar optimization as Torch's shareGradInput():
for the gradients that are passed between layers, share blobs between
operators when possible. This yields significant memory savings with
deep networks.
Returns an optimized protobuf (assign to net._net)
'''
def is_grad_blob(b):
name = str(b)
# Note: need to look at _{namescope} pattern as it matches
# to handle the auto-split gradients
return "_grad" in name and (name.startswith(namescope) or
name.startswith("_" + namescope)) and name not in param_grads
def is_grad_op(op):
# TODO: something smarter
for b in list(op.input) + list(op.output):
if is_grad_blob(b):
return True
return False
log.warn("NOTE: Executing memonger to optimize gradient memory")
# Collect ops that have something to do with gradients
if not namescope.endswith("/"):
namescope += "/"
netproto = copy.deepcopy(net.Proto())
activations = []
external_output = set(net.Proto().external_output)
# Hacky way to get activations, think of a better way
for op in net.Proto().op:
for b in op.output:
if b + "_w" in op.input and b not in external_output:
activations.append(b)
# Remove last activations, as they are usually accessed externally
activations = set(activations[:-2])
# Gradient ops
grad_ops = [op for op in netproto.op if is_grad_op(op)]
return _compute_blob_recycling_for_dag(
netproto,
losses,
grad_ops,
lambda b: is_grad_blob(b) or (share_activations and b in activations),
namescope,
{} if dont_share_blobs is None else dont_share_blobs,
blob_shapes
)
def optimize_inference_for_dag(net, input_blobs, namescope=""):
netproto = copy.deepcopy(net.Proto())
external_input = set(net.Proto().external_input)
external_output = set(net.Proto().external_output)
def is_activation_blob(b):
return b not in external_input and b not in external_output
seen_as_output = set()
ops = list(net.Proto().op)
# Sanity check: check that all external inputs are properlyh accounted
# and that no gradient ops are included in 'net'
for op in ops:
for b in op.input:
if is_activation_blob(b) and b not in seen_as_output:
assert False, "{} not in external input".format(b)
seen_as_output = seen_as_output.union(set(op.output))
assert not op.is_gradient_op, \
"You can only pass inference-only nets to optimize_inference_for_dag"
return _compute_blob_recycling_for_dag(
netproto, input_blobs, ops, is_activation_blob,
namescope, set(), None,
)
def _compute_blob_recycling_for_dag(
netproto, heads, ops, is_shareable,
namescope, dont_share_blobs, blob_shapes=None,
):
'''
Computes a blob recycling by traversing the computation DAG. The resulting
model can be executed safely on a DAGNet.
'''
start_time = time.time()
if dont_share_blobs is not None:
dont_share_blobs = set([str(b) for b in dont_share_blobs])
# Create mapping from blobs to ops
origproto = copy.deepcopy(netproto)
blobs_to_ops = collections.defaultdict(lambda: [])
blob_input_count = collections.defaultdict(lambda: 0)
op_inputs = collections.defaultdict(lambda: 0)
op_visit_count = collections.defaultdict(lambda: 0)
share_counts = collections.defaultdict(lambda: 0)
req_tokens = collections.defaultdict(lambda: set())
op_token_deposit = [set() for _ in ops]
blob_sizes = {} if blob_shapes is not None else None
# First figure out which of the shareable blobs
# are 'internal' to the optimization. For example, if optimizing
# only gradient ops, then activation blobs will be 'external' as they
# are not output by these ops.
optim_op_outputs = set()
for op in ops:
optim_op_outputs.update(set(op.output))
for i, op in enumerate(ops):
for inp in op.input:
if is_shareable(inp) or inp in heads:
if inp in optim_op_outputs:
blobs_to_ops[inp].append(i)
op_inputs[i] += 1
else:
# For external blobs, we don't increase the op_inputs
# count.
blobs_to_ops[inp].append(i)
share_counts[inp] = 1
output_blobs = set()
mapping = {}
unknown_shapes = set()
# Helper function to return blob size based on shape inference.
# If we don't have shape inference available, return 0.
def infer_blob_size(b):
if b in blob_shapes:
return np.prod(blob_shapes[b])
else:
unknown_shapes.add(b)
return 0
global token_seq
token_seq = 0
# Creates a next "token". Tokens are used to to keep track of
# dependendencies: a blob can be replaced by another only if that
# blob "holds" all tokens currently in scope.
def next_token():
global token_seq
token_seq += 1
return token_seq
saved_count = 0
# Main recursive function. We start recursion from the "heads" and
# only descend on an operator when all its inputs have been 'satisfied'.
# That is, all parent operators have been visited.
def descend(op_idx, free_blobs, tokens):
# Check if there are tokens left at this operator from a
# parent operator.
tokens = tokens.union(op_token_deposit[op_idx])
op_token_deposit[op_idx] = None
cur_op = ops[op_idx]
# new_free_blobs contains the blobs that we will release after
# visiting this op
new_free_blobs = set()
saved = 0
# Update the tokens assigned to blobs to be union of the
# tokens we are currently holding and the tokens already held
# by that blob.
for b in list(cur_op.input) + list(cur_op.output):
actual_blob = b if b not in mapping else mapping[b]
req_tokens[b] = req_tokens[b].union(tokens)
if actual_blob != b:
# This blob has been assigned to another (recycled) blob,
# so update the token holdings of the recycled blob.
req_tokens[actual_blob] = req_tokens[actual_blob].union(tokens)
# Check each input and increment the counters for each of the input
# blobs.
for inp in cur_op.input:
if is_shareable(inp):
blob_input_count[inp] += 1
if blob_input_count[inp] == len(blobs_to_ops[inp]):
# This input blob has been now consumed, so we
# can release it to be recycled. If it was replaced
# by another recycled blob, release the recycled blob
# instead.
actual_blob = inp if inp not in mapping else mapping[inp]
if actual_blob not in dont_share_blobs:
new_free_blobs.add(
(-share_counts[actual_blob], actual_blob),
)
def can_be_used(blob, cur_tokens):
# Do we have all required tokens, and this one
# was not released in this op?
for (_cnt, b) in new_free_blobs:
if b == blob:
return False
return len(req_tokens[blob] - cur_tokens) == 0
# Check each output to see if we see the output the first time (i.e
# it is created by this op). if it is then, we can replace it with
# a recycled blob, if available.
for outp in cur_op.output:
if is_shareable(outp):
if outp not in output_blobs:
# First seen this blob as output, can assign to a free blob
freeb = None
# We have two algorithms for choosing the blob to replace
# this one. One that uses size information and another
# that uses a priority queue that prefers blobs that are
# have been shared before.
if blob_sizes is None:
put_back = []
while len(free_blobs) > 0:
(negcnt, cand_freeb) = heapq.heappop(free_blobs)
if can_be_used(cand_freeb, tokens):
freeb = cand_freeb
break
else:
put_back.append((negcnt, cand_freeb))
for cnt, b in put_back:
heapq.heappush(free_blobs, (cnt, b))
else:
bsize = infer_blob_size(outp)
best_blob = None
best_size = -1
# Heuristic to choose the most suitably sized blob
for b in free_blobs:
if can_be_used(b, tokens):
sz = blob_sizes[b]
if sz >= best_size:
if best_size < bsize or best_size >= sz:
best_size = sz
best_blob = b
freeb = best_blob
if freeb is not None:
free_blobs.remove(freeb)
saved += bsize
# "freeb" is the blob output to be replaced with. We
# update its tokens to include the tokens being held
# now.
if freeb is not None:
req_tokens[freeb] = req_tokens[freeb].union(tokens)
mapping[outp] = freeb
share_counts[freeb] += 1
output_blobs.add(outp)
# Process blobs released during this op visit. Depending
# on whether we have blob sizes or not, we store the list
# of free blobs differently (NOTE: this should be unified).
for (cnt, nf) in new_free_blobs:
already_inserted = False
# Note: we prevent double insertion, but it can
# happen because of parallel branches. Token management
# ensures free blobs are handled correctly.
if blob_sizes is None:
for _c, b in free_blobs:
if b == nf:
already_inserted = True
if not already_inserted:
heapq.heappush(free_blobs, (cnt, nf))
else:
if nf not in blob_sizes:
blob_sizes[nf] = infer_blob_size(outp)
if nf in free_blobs:
already_inserted = True
if not already_inserted:
free_blobs.append(nf)
num_branches = 0
# Count branches
for outp in cur_op.output:
for _ in blobs_to_ops[outp]:
num_branches += 1
# Here we process each output again and see if we can descend
# down the operator graph.
for outp in cur_op.output:
for inp_op_idx in blobs_to_ops[outp]:
op_visit_count[inp_op_idx] += 1
# Descend only if we have satisfied all inputs
if op_visit_count[inp_op_idx] == op_inputs[inp_op_idx]:
assert inp_op_idx != op_idx
new_tokens = tokens
if num_branches > 1:
# Optimization
new_tokens = tokens.union(set([next_token()]))
saved_desc = descend(
inp_op_idx,
free_blobs,
new_tokens,
)
saved += saved_desc
else:
# Leave my tokens here so that they can be grabbed
# when we visit the operator (after all inputs have been
# satisfied).
if op_token_deposit[inp_op_idx] is not None:
op_token_deposit[inp_op_idx] = \
op_token_deposit[inp_op_idx].union(tokens)
return saved
# Start DFS from the heads' (losses or inputs)
for head_blob in heads:
for op_idx in blobs_to_ops[head_blob]:
if op_token_deposit[op_idx] is not None:
saved = descend(op_idx, [], set([next_token()]))
saved_count += saved
# Rename the shared blobs
shared_blobs = set(viewvalues(mapping))
renamed = {}
for j, b in enumerate(shared_blobs):
if b in optim_op_outputs:
renamed[b] = namescope + "__m{}_shared".format(j)
else:
renamed[b] = b
# Update the mapping recursively
mapping.update(renamed)
had_changes = True
while had_changes:
had_changes = False
for k, v in mapping.items():
if v in renamed and renamed[v] != v:
renamed[k] = renamed[v]
mapping[k] = renamed[k]
had_changes = True
shared_blobs = set(mapping.values())
if saved_count > 0:
log.info("Remapping {} blobs, using {} shared; saved apprx {} MB".format(
len(mapping), len(shared_blobs), int(saved_count * 4 / 1024 / 1024),
))
log.info("Could not infer sizes for: {}".format(unknown_shapes))
else:
log.info("Remapping {} blobs, using {} shared".format(
len(mapping), len(shared_blobs),
))
apply_assignments(netproto, mapping)
log.info("Memonger memory optimization took {} secs".format(
time.time() - start_time),
)
assert verify_graph_equality(origproto, netproto), \
"Memonger graph is not equal to original."
assert verify_inplace_blobs(origproto, netproto), \
"Inplace assignments differ in memonger net."
return netproto
def _find_source_nodes(g):
''' Return nodes without predecessors '''
ret = []
for cn in g:
cur_pred = g.predecessors(cn)
if not cur_pred:
ret.append(cn)
return ret
def _find_target_nodes(g):
''' Return nodes without successors '''
ret = []
for cn in g:
cur_succ = g.successors(cn)
if not cur_succ:
ret.append(cn)
return ret
def _add_single_target_ifneeded(g):
targets = _find_target_nodes(g)
assert len(targets) >= 1
if len(targets) == 1:
return g
ret = copy.deepcopy(g)
def _next_available_idx(g):
ret = -1
for cn in g:
if cn > ret:
ret = cn
ret += 1
return ret
target_node_idx = _next_available_idx(g)
ret.add_node(target_node_idx)
for cn in targets:
ret.add_edge(cn, target_node_idx)
return ret
def _get_path(pred_list, dist_list):
''' Get the path from nx.bellman_ford()'s output '''
# distances are negative
assert all(dist_list[x] <= 0 for x in dist_list)
# node with longest distance to source is the target
target = min(dist_list, key=lambda x: dist_list[x])
ret = []
cur = target
while cur is not None:
ret.append(cur)
cur = pred_list[cur]
return list(reversed(ret))
def _get_longest_paths(g, source_nodes):
''' Get the longest path for nodes in 'source_nodes'
Find with bellman_ford() by setting weight = -1
'''
ng = copy.deepcopy(g)
for u, v in ng.edges():
ng[u][v]["weight"] = -1
ret = {}
for cn in source_nodes:
pred, dist = nx.bellman_ford(ng, cn, weight="weight")
path = _get_path(pred, dist)
assert path[0] == cn
assert len(path) - 1 == -dist[path[-1]]
ret[cn] = path
return ret
def _build_tree(paths):
''' Build a tree for given paths based on common elements.
Last elements of all paths are the same, which is the root of the tree.
'''
assert all(cp[-1] == paths[0][-1] for cp in paths)
g = nx.DiGraph()
node_set = {y for x in paths for y in x}
g.add_nodes_from(node_set)
for cp in paths:
for ce in zip(cp[0:-1], cp[1:]):
g.add_edge(ce[1], ce[0])
root = paths[0][-1]
_compute_tree_height(g, root)
return (g, root)
def _compute_tree_height(g, root):
''' Compute the heights of the tree for all nodes
Height of leaves are 0
'''
def _get_height(root):
children = g.successors(root)
height = 0
if children:
child_heights = [_get_height(x) for x in children]
height = max(child_heights) + 1
g.node[root]["height"] = height
return height
_get_height(root)
def _sort_tree_leaves(g, root):
''' For each node, sort its child nodes based on the height of the nodes.
Return the leaf nodes of the tree after sorting.
'''
def _get_height(root):
return g.node[root]["height"]
def _get_sorted_leaves(root):
children = g.successors(root)
if not children:
return [root]
child_heights = [_get_height(x) for x in children]
order = sorted(range(len(children)), key=lambda x: child_heights[x])
ret = []
for co in order:
cr = children[co]
ret += _get_sorted_leaves(cr)
return ret
return _get_sorted_leaves(root)
def topological_sort_traversal_longest_path(g):
''' The graph 'g' may contain several source nodes (nodes without incoming
edge), which could be in any order and still be a valid
topological sorting result. We would like to arrange these source nodes
so that the average live spans of the computed blobs are shorter.
The idea is to sort the source nodes based on the length of their path to
the target node so that the one with longer path is used first.
This is done by:
- Add a single target node if there are multiple target nodes in 'g'.
- Find the longest path between each source and the target node.
- Convert the longest paths to a tree with the target node being the root
and source nodes being the leaves.
- Sort the nodes of the tree based on the height of the tree.
'''
gt = _add_single_target_ifneeded(g)
source_nodes = _find_source_nodes(gt)
lpaths = _get_longest_paths(gt, source_nodes)
tree, root = _build_tree(list(viewvalues(lpaths)))
sorted_sources = _sort_tree_leaves(tree, root)
assert(sorted(sorted_sources) == sorted(source_nodes))
ret = nx.topological_sort(g, sorted_sources)
assert(len(ret) == len(g.node))
return ret
def topological_sort_traversal(g):
return nx.topological_sort(g)
def compute_ranges(linearized_ops, blob_sizes=None):
if not blob_sizes:
log.warning('Provide blob sizes to get more accurate assignments.')
blobs = collections.defaultdict(
lambda: LiveRange(defined=None, used=None, size=None))
for i, op in enumerate(linearized_ops):
for blob in op.input:
used = blobs[blob].used
if used is None:
used = i
else:
used = max(used, i)
blobs[blob] = blobs[blob]._replace(used=used)
blob_size = blob_sizes[blob] if blob_sizes else None
assert not blob_sizes or blob_size is not None
blobs[blob] = blobs[blob]._replace(size=blob_size)
for blob in op.output:
defined = blobs[blob].defined
if defined is None:
defined = i
else:
defined = min(defined, i)
blobs[blob] = blobs[blob]._replace(defined=defined)
blob_size = blob_sizes[blob] if blob_sizes else None
assert not blob_sizes or blob_size is not None
blobs[blob] = blobs[blob]._replace(size=blob_size)
return blobs
def is_compatible(candidate_range, assignment, static_blobs):
(name, range_) = assignment[-1]
if name in static_blobs:
return False
if candidate_range.defined is None or range_.defined is None \
or range_.used is None:
return False
return candidate_range.defined > range_.used
def compute_blob_assignments(assignments):
blob_assignments = {}
for assignment in assignments:
if len(assignment) == 1:
continue
last_blob, _ = assignment[-1]
for (blob, _) in assignment:
blob_assignments[blob] = last_blob
return blob_assignments
def _get_max_size(assignment):
if not assignment:
return 0
ret = max([x[1].size for x in assignment])
ret = 0 if ret is None else ret
return ret
def get_memory_usage(assignments):
ret = 0
for cur in assignments:
ret += _get_max_size(cur)
return ret
def compute_assignments_greedy(ranges_sorted, init_assignments=None):
assignments = init_assignments or []
visited = {y[0] for x in assignments for y in x}
for (name, range_) in ranges_sorted:
if name in visited:
continue
assigned = False
best_assignment = 0
min_dist = float("inf")
candidate_size = range_.size or 0
for idx, assignment in enumerate(assignments):
if is_compatible(range_, assignment, []):
assigned = True
dist = abs(_get_max_size(assignment) - candidate_size)
if dist < min_dist:
min_dist = dist
best_assignment = idx
if assigned:
assignment = assignments[best_assignment]
assignment.append((name, range_))
else:
assignments.append([(name, range_)])
return assignments
def _get_count(assignments):
''' Return number of blobs in assignments '''
if assignments:
return sum([len(x) for x in assignments])
return 0
def compute_assignments_dp(ranges_sorted, init_assignment, counter=None):
''' Compute assignment for blobs in 'ranges_sorted' on top of 'init_assignment'
using dynamic programming + recursion.
ranges_sorted: blobs sorted by 'used'
init_assignment: assignment to start with, blobs in 'ranges_sorted' should
not be used in 'init_assignment'
Using f(b, k, init) to represent the best assignment for blobs b[0:k]
given initial assignment 'init', we have
f(b, k, init) = f(b, j, init) +
find_best(b[j:k], f(b, j, init))
where j is the index of the last best assignment that is independent of
blob b[k - 1] (b[k - 1] is compatible with all assignments in
f(b, j, init)), and find_best(b1, init1) gives the best assignment
for blobs in 'b1' based on the initial assignment 'init1', and blobs
b1[0:-1] should be incompatible with b1[-1]. f(b, len(b), []) gives
the best assignment for blobs 'b'.
For find_best(b, init), since b[0:-1] are not compatible with b[-1], we
could reduce it to a smaller problem to find best assignment for b[0:-1]
as
find_best(b, init) = min {
f(b[0:-1], len(b) - 1, init - x) + [x, b[-1]] for x in init, or
f(b[0:-1], len(b) - 1, init) + [b[-1]]
}
where min{} gives the assignment with minimum memory usage.
'''
def _get_compatible_prev(candidate_range, best_assignments, cur_idx):
''' Find closest position k of best_assignments that is independent of
candidate_range that candiate_range is compatible with all assignments
in best_assignments[k].
Return -1 if not found.
'''
def is_compatible_all(candidate_range, assignments):
''' return true if compatiable for all assignments in assignments '''
return all([is_compatible(candidate_range[1], x, []) for x in assignments])
ii = cur_idx - 1
while ii >= 0:
cba = best_assignments[ii]
if is_compatible_all(candidate_range, cba):
return ii
ii -= 1
return -1
def _find_best(ranges, init_assignment, prev_best_assignment, counter):
''' Find the best assignment for blobs 'ranges' given an initialized
assignment 'init_assignment'.
Blobs in ranges[0:-1] should be incompatible with blob range[-1].
'prev_best_assignment': best assignment for blobs in ranges[:-1]
By assigning ranges[-1] to each assignment k in 'init_assignment' or
in a new assignment, the problem becomes a smaller problem to find
the best assignment for ranges[0:-1] given the initial assignment
init_assigment[0:k, (k+1):-1].
'''
# Blob to check
find_range = ranges[-1]
# Blobs in ranges[0:-1] are incompatible with ranges[-1] so that we can
# reduce it to a smaller problem.
assert all(not is_compatible(x[1], [find_range], []) for x in ranges[0:-1])
sz = len(init_assignment)
best_candidates = []
# Try to assign 'find_range' to each assignment in init_assignment
for ii in range(sz):
if not is_compatible(find_range[1], init_assignment[ii], []):
continue
cur_best = copy.deepcopy(init_assignment)
cur_best[ii].append(find_range)
if len(ranges) > 1:
cur_best_tmp = [x for i, x in enumerate(cur_best) if i != ii]
# reduce to a smaller dp problem
cur_best_tmp = compute_assignments_dp(
ranges[:-1], cur_best_tmp, counter)
cur_best = cur_best_tmp + [cur_best[ii]]
best_candidates.append(cur_best)
# Try to put 'find_range' in a new assignment
best_candidates.append(prev_best_assignment + [[find_range]])
ret = min(best_candidates, key=lambda x: get_memory_usage(x))
return ret
if not counter:
counter = [0]
counter[0] += 1
if counter and counter[0] % 5000 == 0:
rs = [ranges_sorted[0][1].defined, ranges_sorted[-1][1].used]
log.info('Finding assignments {} ({} -> {})...'.format(
counter[0], rs[0], rs[1]))
init_assignment = init_assignment or []
# best_assignments[k]: best assignments for first k blobs ranges_sorted[0:(k+1)]
best_assignments = []
# Find best assignment for blobs ranges_sorted[0:ii]
for ii, cur_range in enumerate(ranges_sorted):
# closest best_assignment that is independent of ranges_sorted[ii]
prev_idx = _get_compatible_prev(cur_range, best_assignments, ii)
prev_best = copy.deepcopy(init_assignment) if prev_idx < 0 else \
copy.deepcopy(best_assignments[prev_idx])
# Need to find best assignment for blobs in 'ranges_part'
ranges_part = ranges_sorted[(prev_idx + 1):(ii + 1)]
cur_best = _find_best(
ranges_part, prev_best,
best_assignments[-1] if best_assignments else init_assignment,
counter)
assert _get_count(cur_best) == _get_count(prev_best) + len(ranges_part)
best_assignments.append(copy.deepcopy(cur_best))
assert len(best_assignments) == len(ranges_sorted)
best = best_assignments[-1]
return best
def get_updated_ranges(ranges, max_live=None):
''' Set LiveRange.defined = -1 if it is None
Set LiveRange.used = max_live if it is None
Set LiveRanee.size = 1 if it is None
'''
def _get_max_live(ranges):
max_live = max(x[1].used for x in ranges if x[1].used) + 1
return max_live
def _update_range(x, max_live, size):
cx = x
if x[1].defined is None:
cx = (cx[0], cx[1]._replace(defined=-1))
if x[1].used is None:
cx = (cx[0], cx[1]._replace(used=max_live))
if x[1].size is None:
cx = (cx[0], cx[1]._replace(size=size))
return cx
if max_live is None:
max_live = _get_max_live(ranges)
ranges = [_update_range(x, max_live, 1) for x in ranges]
return ranges
def compute_assignments(ranges, static_blobs, algo):
'''
algo: Method used to find assignments (AssignmentAlgorithm.GREEDY or
AssignmentAlgorithm.DYNAMIC_PROGRAMMING).
AssignmentAlgorithm.DYNAMIC_PROGRAMMING gives optimal solution at the
cost of more computation.
AssignmentAlgorithm.GREEDY may be better in the case 'blob_sizes' is
not provided.
'''
# Sort the ranges based on when they are last used.
# If LiveRange.used is None, then the blob is never used and could
# be consumed externally. Sort these to the end of the list as opposed
# to the beginning so that they can be shared as well.
ranges = sorted(
viewitems(ranges),
key=lambda p: (p[1].used is None, p[1].used),
)
# Update None values
ranges = get_updated_ranges(ranges)
# Sharable blobs
ranges_sharable = [x for x in ranges if x[0] not in static_blobs]
# Static blobs, not sharable
ranges_static = [x for x in ranges if x[0] in static_blobs]
log.info("Total sharable blobs {}".format(len(ranges_sharable)))
best_assignment = []
if algo == AssignmentAlgorithm.DYNAMIC_PROGRAMMING:
best_assignment = compute_assignments_dp(ranges_sharable, [])
elif algo == AssignmentAlgorithm.GREEDY:
best_assignment = compute_assignments_greedy(ranges_sharable, [])
else:
assert "Invalid algo name {}".format(algo)
best_assignment += [[x] for x in ranges_static]
# verify_assignments(best_assignment)
return best_assignment
def verify_assignments(assignments):
for cur in assignments:
for x, y in zip(cur[0:-1], cur[1:]):
assert x[1].used < y[1].defined
def compute_interference_graph(ops):
g = nx.DiGraph()
for i, op in enumerate(ops):
g.add_node(i, op=op)
for i, parent_op in enumerate(ops):
for j, child_op in enumerate(ops):
if i >= j:
continue
if any(output in child_op.input for output in parent_op.output):
deps = set(child_op.input).intersection(parent_op.output)
g.add_edge(i, j, deps=deps)
assert nx.is_directed_acyclic_graph(g), child_op
return g
Optimization = collections.namedtuple(
'Optimization', ['net', 'assignments', 'blob_assignments'])
def apply_assignments(net, blob_assignments):
def canonical_name(blob):
if blob not in blob_assignments:
return blob
return blob_assignments[blob]
for op in net.op:
# Descend into subnets of the recurrent network
if op.type.startswith('RecurrentNetwork'):
apply_recurrent_blob_assignments(op, blob_assignments, canonical_name)
for i, input_ in enumerate(op.input):
op.input[i] = canonical_name(input_)
for i, output in enumerate(op.output):
op.output[i] = canonical_name(output)
def apply_recurrent_blob_assignments(op, blob_assignments, canonical_name):
log.debug("Applying assignments to recurrent op: {}".format(op.type))
import google.protobuf.text_format as protobuftx
step_args = [a for a in op.arg if a.name.endswith("step_net")]
for step_arg in step_args:
step_proto = caffe2_pb2.NetDef()
protobuftx.Merge(step_arg.s.decode("ascii"), step_proto)
apply_assignments(step_proto, blob_assignments)
for i, einp in enumerate(step_proto.external_input):
if einp in blob_assignments:
step_proto.external_input[i] = canonical_name(einp)
step_arg.s = str(step_proto).encode("ascii")
# Store renamings
for blob, renamed in viewitems(blob_assignments):
if blob in list(op.input) + list(op.output):
a = caffe2_pb2.Argument()
a.name = blob + ".rename"
a.s = str(renamed).encode("ascii")
op.arg.extend([a])
class AssignmentAlgorithm(enum.Enum):
GREEDY = 0
DYNAMIC_PROGRAMMING = 1
def optimize_inference_fast(net, static_blobs):
optim = caffe2_pb2.NetDef()
optim_str = C.memonger_optimize_inference_net(
net.SerializeToString(), [str(s).encode('utf-8') for s in static_blobs]
)
optim.ParseFromString(optim_str)
return optim
def optimize_interference(net, static_blobs,
ordering_function=topological_sort_traversal,
blob_sizes=None,
algo=AssignmentAlgorithm.GREEDY):
"""
ordering_function: topological_sort_traversal or
topological_sort_traversal_longest_path.
topological_sort_traversal_longest_path gives better
results but needs a bit more computation.
algo: Method used to find assignments (AssignmentAlgorithm.GREEDY or
AssignmentAlgorithm.DYNAMIC_PROGRAMMING).
AssignmentAlgorithm.DYNAMIC_PROGRAMMING gives optimal solution at the
cost of more computation.
AssignmentAlgorithm.GREEDY may be better in the case 'blob_sizes' is
not provided.
"""
"""
1) Use a BFS traversal of the execution graph to generate an
ordering of the node executions.
2) Generate use-def ranges for each `blob` in the BFS traversal
order.
3) Assign blobs to `canonical blobs`
4) Rename blobs to canonical blobs
"""
net = copy.deepcopy(net)
g = compute_interference_graph(net.op)
ordering = ordering_function(g)
linearized_ops = [net.op[i] for i in ordering]
# Reorder ops in net based on the computed linearlized order.
# If the graph has multiple topological orderings and if the NetDef's
# ordering differs from the order used to compute ranges, then the
# runtime might end up overwriting blobs before they are used.
del net.op[:]
net.op.extend(linearized_ops)
ranges = compute_ranges(linearized_ops, blob_sizes)
assignments = compute_assignments(ranges, static_blobs, algo)
blob_assignments = compute_blob_assignments(assignments)
apply_assignments(net, blob_assignments)
return Optimization(
net=net,
blob_assignments=blob_assignments,
assignments=assignments)
def verify_inplace_blobs(net_a, net_b):
"""
Verifies that net_a and net_b have the same in-place blob assignments.
Particularly, that memonger did not add an in-place assignment when that
did not exist before.
"""
def get_inplaces(op):
out = list(op.output)
inplaces = []
for j, inp in enumerate(op.input):
if inp in out:
inplaces.append([j, out.index(inp)])
return inplaces
for op_a, op_b in zip(net_a.op, net_b.op):
if op_a.type != op_b.type:
return False
if get_inplaces(op_a) != get_inplaces(op_b):
return False
return True
def verify_graph_equality(net_a, net_b):
"""
Determines if the execution of two graphs are identical.
That is, all inputs blobs are mapped to the same output blobs
for each operator in their respective positions.
This is meant to check the output of memonger with the original graph.
It assumes that the nets have same external input and output.
O(E) runtime + O(1) amortized cost to hash for python dict
"""
def parent_list(ops):
parent_list = [[] for _ in ops]
edge_owner = {}
for i, op in enumerate(ops):
for blob in op.input:
parent_id = edge_owner.get(blob)
if parent_id is not None:
parent_list[i].append(parent_id)
for blob in op.output:
edge_owner[blob] = i
return parent_list
# Operator wise equality checks
if (len(net_a.op) != len(net_b.op)):
return False
for op_a, op_b in zip(net_a.op, net_b.op):
if (op_a.type != op_b.type or
op_a.device_option != op_b.device_option or
op_a.engine != op_b.engine):
return False
# Print debug info
parent_list_a = parent_list(net_a.op)
parent_list_b = parent_list(net_b.op)
if parent_list_a != parent_list_b:
j = 0
for a, b in zip(parent_list_a, parent_list_b):
if a != b:
print("Difference {} vs {} \n {}".format(
j, net_a.op[j], net_b.op[j]))
print("Parents: {} vs {}".format(a, b))
j += 1
# Net wise equality check
return parent_list_a == parent_list_b
Statistics = collections.namedtuple(
'Statistics', ['baseline_nbytes', 'optimized_nbytes'])
def blob_nbytes(blob):
sz = 0
try:
sz = workspace.FetchBlob(blob).nbytes
except Exception:
log.warning('Error when fetching blob {}'.format(blob))
return sz
def compute_statistics(assignments):
blob_bytes = {
blob: blob_nbytes(blob) for assignment in assignments
for (blob, _) in assignment}
baseline_nbytes = sum(viewvalues(blob_bytes))
optimized_nbytes = sum(
max(blob_bytes[blob] for (blob, _) in assignment)
for assignment in assignments)
return Statistics(
baseline_nbytes=baseline_nbytes,
optimized_nbytes=optimized_nbytes)
def collect_blob_sizes(net):
blobs = {}
for op in net.op:
for blob in op.input:
blobs[blob] = blob_nbytes(blob)
for blob in op.output:
blobs[blob] = blob_nbytes(blob)
return blobs
|
bwasti/caffe2
|
caffe2/python/memonger.py
|
Python
|
apache-2.0
| 38,744
|
[
"VisIt"
] |
e3eb7c5fc516a419d89e28fa657611876c42838ebdf80aee4c5f7b8d8f1cad8b
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# generate_ufuncs.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
#
# Note : After editing this file and committing changes, please run
# generate_funcs.py and commit the changes as a separate commit with a comment
# such as : GEN: special: run generate_ufuncs.py
from __future__ import division, print_function, absolute_import
docdict = {}
def get(name):
return docdict.get(name)
def add_newdoc(place, name, doc):
docdict['.'.join((place, name))] = doc
add_newdoc("scipy.special", "_sf_error_test_function",
"""
Private function; do not use.
""")
add_newdoc("scipy.special", "sph_harm",
r"""
sph_harm(m, n, theta, phi)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
Returns
-------
y_mn : complex float
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
http://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("scipy.special", "_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("scipy.special", "_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("scipy.special", "_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("scipy.special", "wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
Returns
-------
omega : ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
""")
add_newdoc("scipy.special", "agm",
"""
agm(a, b)
Compute the arithmetic-geometric mean of `a` and `b`.
Start with a_0 = a and b_0 = b and iteratively compute::
a_{n+1} = (a_n + b_n)/2
b_{n+1} = sqrt(a_n*b_n)
a_n and b_n converge to the same limit as n increases; their common
limit is agm(a, b).
Parameters
----------
a, b : array_like
Real values only. If the values are both negative, the result
is negative. If one value is negative and the other is positive,
`nan` is returned.
Returns
-------
float
The arithmetic-geometric mean of `a` and `b`.
Examples
--------
>>> from scipy.special import agm
>>> a, b = 24.0, 6.0
>>> agm(a, b)
13.458171481725614
Compare that result to the iteration:
>>> while a != b:
... a, b = (a + b)/2, np.sqrt(a*b)
... print("a = %19.16f b=%19.16f" % (a, b))
...
a = 15.0000000000000000 b=12.0000000000000000
a = 13.5000000000000000 b=13.4164078649987388
a = 13.4582039324993694 b=13.4581390309909850
a = 13.4581714817451772 b=13.4581714817060547
a = 13.4581714817256159 b=13.4581714817256159
When array-like arguments are given, broadcasting applies:
>>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1).
>>> b = np.array([6, 12, 24, 48]) # b has shape (4,).
>>> agm(a, b)
array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756],
[ 4.37037309, 6.72908574, 10.84726853, 18.11597502],
[ 6. , 8.74074619, 13.45817148, 21.69453707]])
""")
add_newdoc("scipy.special", "airy",
r"""
airy(z)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
Ai, Aip, Bi, Bip : ndarrays
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Compute the Airy functions on the interval [-15, 5].
>>> from scipy import special
>>> x = np.linspace(-15, 5, 201)
>>> ai, aip, bi, bip = special.airy(x)
Plot Ai(x) and Bi(x).
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, ai, 'r', label='Ai(x)')
>>> plt.plot(x, bi, 'b--', label='Bi(x)')
>>> plt.ylim(-0.5, 1.0)
>>> plt.grid()
>>> plt.legend(loc='upper left')
>>> plt.show()
""")
add_newdoc("scipy.special", "airye",
"""
airye(z)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
Returns
-------
eAi, eAip, eBi, eBip : array_like
Airy functions Ai and Bi, and their derivatives Aip and Bip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "bdtr",
r"""
bdtr(k, n, p)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `k` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^k {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
Returns
-------
y : ndarray
Probability of `k` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - k, k + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrc",
r"""
bdtrc(k, n, p)
Binomial distribution survival function.
Sum of the terms `k + 1` through `n` of the binomial probability density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=k+1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (int).
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
Returns
-------
y : ndarray
Probability of `k + 1` or more successes in `n` independent events
with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(k + 1, n - k).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtri",
"""
bdtri(k, n, y)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
Returns
-------
p : ndarray
The event probability such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "bdtrik",
"""
bdtrik(y, n, p)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
Returns
-------
k : ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "bdtrin",
"""
bdtrin(k, y, p)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
Returns
-------
n : ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("scipy.special", "binom",
"""
binom(n, k)
Binomial coefficient
See Also
--------
comb : The number of combinations of N things taken k at a time.
""")
add_newdoc("scipy.special", "btdtria",
r"""
btdtria(p, b, x)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
Returns
-------
a : ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "btdtrib",
r"""
btdtria(a, p, x)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
Returns
-------
b : ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative density function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("scipy.special", "bei",
"""
bei(x)
Kelvin function bei
""")
add_newdoc("scipy.special", "beip",
"""
beip(x)
Derivative of the Kelvin function `bei`
""")
add_newdoc("scipy.special", "ber",
"""
ber(x)
Kelvin function ber.
""")
add_newdoc("scipy.special", "berp",
"""
berp(x)
Derivative of the Kelvin function `ber`
""")
add_newdoc("scipy.special", "besselpoly",
r"""
besselpoly(a, lmb, nu)
Weighted integral of a Bessel function.
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
""")
add_newdoc("scipy.special", "beta",
"""
beta(a, b)
Beta function.
::
beta(a, b) = gamma(a) * gamma(b) / gamma(a+b)
""")
add_newdoc("scipy.special", "betainc",
"""
betainc(a, b, x)
Incomplete beta integral.
Compute the incomplete beta integral of the arguments, evaluated
from zero to `x`::
gamma(a+b) / (gamma(a)*gamma(b)) * integral(t**(a-1) (1-t)**(b-1), t=0..x).
Notes
-----
The incomplete beta is also sometimes defined without the terms
in gamma, in which case the above definition is the so-called regularized
incomplete beta. Under this definition, you can get the incomplete beta by
multiplying the result of the scipy function by beta(a, b).
""")
add_newdoc("scipy.special", "betaincinv",
"""
betaincinv(a, b, y)
Inverse function to beta integral.
Compute `x` such that betainc(a, b, x) = y.
""")
add_newdoc("scipy.special", "betaln",
"""
betaln(a, b)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
""")
add_newdoc("scipy.special", "boxcox",
"""
boxcox(x, lmbda)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("scipy.special", "boxcox1p",
"""
boxcox1p(x, lmbda)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
y : array
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("scipy.special", "inv_boxcox",
"""
inv_boxcox(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
Returns
-------
x : array
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("scipy.special", "btdtr",
r"""
btdtr(a, b, x)
Cumulative density function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
Returns
-------
I : ndarray
Cumulative density function of the beta distribution with parameters
`a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "btdtri",
r"""
btdtri(a, b, p)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "cbrt",
"""
cbrt(x)
Element-wise cube root of `x`.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
The cube root of each value in `x`.
Examples
--------
>>> from scipy.special import cbrt
>>> cbrt(8)
2.0
>>> cbrt([-8, -3, 0.125, 1.331])
array([-2. , -1.44224957, 0.5 , 1.1 ])
""")
add_newdoc("scipy.special", "chdtr",
"""
chdtr(v, x)
Chi square cumulative distribution function
Returns the area under the left hand tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=0..x)
""")
add_newdoc("scipy.special", "chdtrc",
"""
chdtrc(v, x)
Chi square survival function
Returns the area under the right hand tail (from `x` to
infinity) of the Chi square probability density function with `v`
degrees of freedom::
1/(2**(v/2) * gamma(v/2)) * integral(t**(v/2-1) * exp(-t/2), t=x..inf)
""")
add_newdoc("scipy.special", "chdtri",
"""
chdtri(v, p)
Inverse to `chdtrc`
Returns the argument x such that ``chdtrc(v, x) == p``.
""")
add_newdoc("scipy.special", "chdtriv",
"""
chdtriv(p, x)
Inverse to `chdtr` vs `v`
Returns the argument v such that ``chdtr(v, x) == p``.
""")
add_newdoc("scipy.special", "chndtr",
"""
chndtr(x, df, nc)
Non-central chi square cumulative distribution function
""")
add_newdoc("scipy.special", "chndtrix",
"""
chndtrix(p, df, nc)
Inverse to `chndtr` vs `x`
""")
add_newdoc("scipy.special", "chndtridf",
"""
chndtridf(x, p, nc)
Inverse to `chndtr` vs `df`
""")
add_newdoc("scipy.special", "chndtrinc",
"""
chndtrinc(x, df, p)
Inverse to `chndtr` vs `nc`
""")
add_newdoc("scipy.special", "cosdg",
"""
cosdg(x)
Cosine of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "cosm1",
"""
cosm1(x)
cos(x) - 1 for use when `x` is near zero.
""")
add_newdoc("scipy.special", "cotdg",
"""
cotdg(x)
Cotangent of the angle `x` given in degrees.
""")
add_newdoc("scipy.special", "dawsn",
"""
dawsn(x)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "ellipe",
r"""
ellipe(m)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \sqrt(1-m)
is used.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ellipeinc",
r"""
ellipeinc(phi, m)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
Returns
-------
E : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ellipj",
"""
ellipj(u, m)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
Returns
-------
sn, cn, dn, ph : ndarrays
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipk(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipk(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkm1",
"""
ellipkm1(p)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
Returns
-------
K : ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "ellipkinc",
r"""
ellipkinc(phi, m)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt
This function is also called `F(phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
Returns
-------
K : ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "entr",
r"""
entr(x)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr
Notes
-----
This function is concave.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "erf",
"""
erf(z)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
Returns
-------
res : ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] http://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfc",
"""
erfc(x)
Complementary error function, ``1 - erf(x)``.
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfi",
"""
erfi(z)
Imaginary error function, ``-i erf(i z)``.
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "erfcx",
"""
erfcx(x)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc("scipy.special", "eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
Returns
-------
P : ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
Returns
-------
G : ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
""")
add_newdoc("scipy.special", "eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
Returns
-------
C : ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
T : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
""")
add_newdoc("scipy.special", "eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
U : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
""")
add_newdoc("scipy.special", "eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
S : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
Returns
-------
C : ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
""")
add_newdoc("scipy.special", "eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
T : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
""")
add_newdoc("scipy.special", "eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
Returns
-------
U : ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
""")
add_newdoc("scipy.special", "eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
Returns
-------
P : ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
Returns
-------
P : ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
""")
add_newdoc("scipy.special", "eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. The Laguerre polynomials are the special case where
:math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
Returns
-------
L : ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
Returns
-------
L : ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
""")
add_newdoc("scipy.special", "eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
H : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
""")
add_newdoc("scipy.special", "eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
Returns
-------
He : ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
""")
add_newdoc("scipy.special", "exp1",
"""
exp1(z)
Exponential integral E_1 of complex argument z
::
integral(exp(-z*t)/t, t=1..inf).
""")
add_newdoc("scipy.special", "exp10",
"""
exp10(x)
Compute ``10**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``10**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp10
>>> exp10(3)
1000.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp10(x)
array([[ 0.1 , 0.31622777, 1. ],
[ 3.16227766, 10. , 31.6227766 ]])
""")
add_newdoc("scipy.special", "exp2",
"""
exp2(x)
Compute ``2**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``2**x``, computed element-wise.
Examples
--------
>>> from scipy.special import exp2
>>> exp2(3)
8.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp2(x)
array([[ 0.5 , 0.70710678, 1. ],
[ 1.41421356, 2. , 2.82842712]])
""")
add_newdoc("scipy.special", "expi",
"""
expi(x)
Exponential integral Ei
Defined as::
integral(exp(t)/t, t=-inf..x)
See `expn` for a different exponential integral.
""")
add_newdoc('scipy.special', 'expit',
"""
expit(x)
Expit ufunc for ndarrays.
The expit function, also known as the logistic function, is defined as
expit(x) = 1/(1+exp(-x)). It is the inverse of the logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are expit of the corresponding entry of x.
See Also
--------
logit
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import expit, logit
>>> expit([-np.inf, -1.5, 0, 1.5, np.inf])
array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ])
`logit` is the inverse of `expit`:
>>> logit(expit([-2.5, 0, 3.1, 5.0]))
array([-2.5, 0. , 3.1, 5. ])
Plot expit(x) for x in [-6, 6]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-6, 6, 121)
>>> y = expit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.xlim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('expit(x)')
>>> plt.show()
""")
add_newdoc("scipy.special", "expm1",
"""
expm1(x)
Compute ``exp(x) - 1``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``expm1(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
float
``exp(x) - 1`` computed element-wise.
Examples
--------
>>> from scipy.special import expm1
>>> expm1(1.0)
1.7182818284590451
>>> expm1([-0.2, -0.1, 0, 0.1, 0.2])
array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276])
The exact value of ``exp(7.5e-13) - 1`` is::
7.5000000000028125000000007031250000001318...*10**-13.
Here is what ``expm1(7.5e-13)`` gives:
>>> expm1(7.5e-13)
7.5000000000028135e-13
Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in
a "catastrophic" loss of precision:
>>> np.exp(7.5e-13) - 1
7.5006667543675576e-13
""")
add_newdoc("scipy.special", "expn",
"""
expn(n, x)
Exponential integral E_n
Returns the exponential integral for integer `n` and non-negative `x` and
`n`::
integral(exp(-x*t) / t**n, t=1..inf).
""")
add_newdoc("scipy.special", "exprel",
r"""
exprel(x)
Relative error exponential, ``(exp(x) - 1)/x``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``exprel(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : ndarray
Input array. `x` must contain real numbers.
Returns
-------
float
``(exp(x) - 1)/x``, computed element-wise.
See Also
--------
expm1
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> from scipy.special import exprel
>>> exprel(0.01)
1.0050167084168056
>>> exprel([-0.25, -0.1, 0, 0.1, 0.25])
array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167])
Compare ``exprel(5e-9)`` to the naive calculation. The exact value
is ``1.00000000250000000416...``.
>>> exprel(5e-9)
1.0000000025
>>> (np.exp(5e-9) - 1)/5e-9
0.99999999392252903
""")
add_newdoc("scipy.special", "fdtr",
r"""
fdtr(dfn, dfd, x)
F cumulative distribution function.
Returns the value of the cumulative density function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtrc",
r"""
fdtrc(dfn, dfd, x)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
Returns
-------
y : ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See also
--------
fdtr
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtri",
r"""
fdtri(dfn, dfd, p)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
Returns
-------
x : ndarray
The quantile corresponding to `p`.
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "fdtridfd",
"""
fdtridfd(dfn, p, x)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fdtridfn",
"""
fdtridfn(p, dfd, x)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
""")
add_newdoc("scipy.special", "fresnel",
"""
fresnel(z)
Fresnel sin and cos integrals
Defined as::
ssa = integral(sin(pi/2 * t**2), t=0..z)
csa = integral(cos(pi/2 * t**2), t=0..z)
Parameters
----------
z : float or complex array_like
Argument
Returns
-------
ssa, csa
Fresnel sin and cos integral values
""")
add_newdoc("scipy.special", "gamma",
r"""
gamma(z)
Gamma function.
.. math::
\Gamma(z) = \int_0^\infty x^{z-1} e^{-x} dx = (z - 1)!
The gamma function is often referred to as the generalized
factorial since ``z*gamma(z) = gamma(z+1)`` and ``gamma(n+1) =
n!`` for natural number *n*.
Parameters
----------
z : float or complex array_like
Returns
-------
float or complex
The value(s) of gamma(z)
Examples
--------
>>> from scipy.special import gamma, factorial
>>> gamma([0, 0.5, 1, 5])
array([ inf, 1.77245385, 1. , 24. ])
>>> z = 2.5 + 1j
>>> gamma(z)
(0.77476210455108352+0.70763120437959293j)
>>> gamma(z+1), z*gamma(z) # Recurrence property
((1.2292740569981171+2.5438401155000685j),
(1.2292740569981158+2.5438401155000658j))
>>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi)
3.1415926535897927
Plot gamma(x) for real x
>>> x = np.linspace(-3.5, 5.5, 2251)
>>> y = gamma(x)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')
>>> k = np.arange(1, 7)
>>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,
... label='(x-1)!, x = 1, 2, ...')
>>> plt.xlim(-3.5, 5.5)
>>> plt.ylim(-10, 25)
>>> plt.grid()
>>> plt.xlabel('x')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("scipy.special", "gammainc",
r"""
gammainc(a, x)
Regularized lower incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where
`gammaincc` is the regularized upper incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammaincc",
r"""
gammaincc(a, x)
Regularized upper incomplete gamma function.
Defined as
.. math::
\frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. The function satisfies the
relation ``gammainc(a, x) + gammaincc(a, x) = 1`` where `gammainc`
is the regularized lower incomplete gamma function.
Notes
-----
The implementation largely follows that of [1]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse to ``gammainc`` versus ``x``
gammainccinv : inverse to ``gammaincc`` versus ``x``
References
----------
.. [1] Maddock et. al., "Incomplete Gamma Functions",
http://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
""")
add_newdoc("scipy.special", "gammainccinv",
"""
gammainccinv(a, y)
Inverse to `gammaincc`
Returns `x` such that ``gammaincc(a, x) == y``.
""")
add_newdoc("scipy.special", "gammaincinv",
"""
gammaincinv(a, y)
Inverse to `gammainc`
Returns `x` such that ``gammainc(a, x) = y``.
""")
add_newdoc("scipy.special", "gammaln",
"""
Logarithm of the absolute value of the Gamma function.
Parameters
----------
x : array-like
Values on the real line at which to compute ``gammaln``
Returns
-------
gammaln : ndarray
Values of ``gammaln`` at x.
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal with
complex numbers, via the relation ``exp(gammaln(x)) = gammasgn(x)*gamma(x)``.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
""")
add_newdoc("scipy.special", "gammasgn",
"""
gammasgn(x)
Sign of the gamma function.
See Also
--------
gammaln
loggamma
""")
add_newdoc("scipy.special", "gdtr",
r"""
gdtr(a, b, x)
Gamma distribution cumulative density function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
Returns
-------
F : ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtrc",
r"""
gdtrc(a, b, x)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
Returns
-------
F : ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr, gdtri
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("scipy.special", "gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("scipy.special", "gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the p'th quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("scipy.special", "hankel1",
r"""
hankel1(v, z)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel1e",
r"""
hankel1e(v, z)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2",
r"""
hankel2(v, z)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "hankel2e",
r"""
hankel2e(v, z)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
out : Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "huber",
r"""
huber(delta, r)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Huber loss function values.
Notes
-----
This function is convex in r.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "hyp0f1",
r"""
hyp0f1(v, x)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) = f(z)`.
""")
add_newdoc("scipy.special", "hyp1f1",
"""
hyp1f1(a, b, x)
Confluent hypergeometric function 1F1(a, b; x)
""")
add_newdoc("scipy.special", "hyp1f2",
"""
hyp1f2(a, b, c, x)
Hypergeometric function 1F2 and error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f0",
"""
hyp2f0(a, b, x, type)
Hypergeometric function 2F0 in y and an error estimate
The parameter `type` determines a convergence factor and can be
either 1 or 2.
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyp2f1",
r"""
hyp2f1(a, b, c, z)
Gauss hypergeometric function 2F1(a, b; c; z)
Parameters
----------
a, b, c : array_like
Arguments, should be real-valued.
z : array_like
Argument, real or complex.
Returns
-------
hyp2f1 : scalar or ndarray
The values of the gaussian hypergeometric function.
See also
--------
hyp0f1 : confluent hypergeometric limit function.
hyp1f1 : Kummer's (confluent hypergeometric) function.
Notes
-----
This function is defined for :math:`|z| < 1` as
.. math::
\mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty
\frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!},
and defined on the rest of the complex z-plane by analytic continuation.
Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree :math:`n`.
The implementation for complex values of ``z`` is described in [1]_.
References
----------
.. [1] J.M. Jin and Z. S. Jjie, "Computation of special functions", Wiley, 1996.
.. [2] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [3] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/
""")
add_newdoc("scipy.special", "hyp3f0",
"""
hyp3f0(a, b, c, x)
Hypergeometric function 3F0 in y and an error estimate
Returns
-------
y
Value of the function
err
Error estimate
""")
add_newdoc("scipy.special", "hyperu",
"""
hyperu(a, b, x)
Confluent hypergeometric function U(a, b, x) of the second kind
""")
add_newdoc("scipy.special", "i0",
r"""
i0(x)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv
i0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i0e",
"""
i0e(x)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`.
See also
--------
iv
i0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1",
r"""
i1(x)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv
i1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "i1e",
"""
i1e(x)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
I : ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`.
See also
--------
iv
i1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "it2i0k0",
"""
it2i0k0(x)
Integrals related to modified Bessel functions of order 0
Returns
-------
ii0
``integral((i0(t)-1)/t, t=0..x)``
ik0
``int(k0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2j0y0",
"""
it2j0y0(x)
Integrals related to Bessel functions of order 0
Returns
-------
ij0
``integral((1-j0(t))/t, t=0..x)``
iy0
``integral(y0(t)/t, t=x..inf)``
""")
add_newdoc("scipy.special", "it2struve0",
r"""
it2struve0(x)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
Returns
-------
I : ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "itairy",
"""
itairy(x)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x: array_like
Upper limit of integration (float).
Returns
-------
Apt
Integral of Ai(t) from 0 to x.
Bpt
Integral of Bi(t) from 0 to x.
Ant
Integral of Ai(-t) from 0 to x.
Bnt
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "iti0k0",
"""
iti0k0(x)
Integrals of modified Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order modified
Bessel functions `i0` and `k0`.
Returns
-------
ii0, ik0
""")
add_newdoc("scipy.special", "itj0y0",
"""
itj0y0(x)
Integrals of Bessel functions of order 0
Returns simple integrals from 0 to `x` of the zeroth order Bessel
functions `j0` and `y0`.
Returns
-------
ij0, iy0
""")
add_newdoc("scipy.special", "itmodstruve0",
r"""
itmodstruve0(x)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "itstruve0",
r"""
itstruve0(x)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
Returns
-------
I : ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "iv",
r"""
iv(v, z)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptotic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
kve : This function with leading exponential behavior stripped off.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "ive",
r"""
ive(v, z)
Exponentially scaled modified Bessel function of the first kind
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
Returns
-------
out : ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptotic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptotic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "j0",
r"""
j0(x)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
It should not to be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv : Bessel function of real order and complex argument.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "j1",
"""
j1(x)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
J : ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
It should not to be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "jn",
"""
jn(n, x)
Bessel function of the first kind of integer order and real argument.
Notes
-----
`jn` is an alias of `jv`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jv
spherical_jn : spherical Bessel functions.
""")
add_newdoc("scipy.special", "jv",
r"""
jv(v, z)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the Bessel function, :math:`J_v(z)`.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "jve",
r"""
jve(v, z)
Exponentially scaled Bessel function of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
J : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "k0",
r"""
k0(x)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
K : ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv
k0e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k0e",
"""
k0e(x)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`.
See also
--------
kv
k0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1",
"""
k1(x)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv
k1e
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "k1e",
"""
k1e(x)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
Returns
-------
K : ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv
k1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "kei",
"""
kei(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "keip",
"""
keip(x)
Derivative of the Kelvin function kei
""")
add_newdoc("scipy.special", "kelvin",
"""
kelvin(x)
Kelvin functions as complex numbers
Returns
-------
Be, Ke, Bep, Kep
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("scipy.special", "ker",
"""
ker(x)
Kelvin function ker
""")
add_newdoc("scipy.special", "kerp",
"""
kerp(x)
Derivative of the Kelvin function ker
""")
add_newdoc("scipy.special", "kl_div",
r"""
kl_div(x, y)
Elementwise function for computing Kullback-Leibler divergence.
.. math:: \mathrm{kl\_div}(x, y) = \begin{cases} x \log(x / y) - x + y & x > 0, y > 0 \\ y & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, rel_entr
Notes
-----
This function is non-negative and is jointly convex in `x` and `y`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "kn",
r"""
kn(n, x)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
z : array_like of float
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("scipy.special", "kolmogi",
"""
kolmogi(p)
Inverse function to kolmogorov
Returns y such that ``kolmogorov(y) == p``.
""")
add_newdoc("scipy.special", "kolmogorov",
"""
kolmogorov(y)
Complementary cumulative distribution function of Kolmogorov distribution
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (Kn* for large n) of a
two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that sqrt(n) * max absolute deviation > y.
""")
add_newdoc("scipy.special", "kv",
r"""
kv(v, z)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. http://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("scipy.special", "kve",
r"""
kve(v, z)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
Returns
-------
out : ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
""")
add_newdoc("scipy.special", "_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "log1p",
"""
log1p(x)
Calculates log(1+x) for use when `x` is near zero
""")
add_newdoc("scipy.special", "_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('scipy.special', 'logit',
"""
logit(x)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
Returns
-------
out : ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
See Also
--------
expit
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> from scipy.special import logit, expit
>>> logit([0, 0.25, 0.5, 0.75, 1])
array([ -inf, -1.09861229, 0. , 1.09861229, inf])
`expit` is the inverse of `logit`:
>>> expit(logit([0.1, 0.75, 0.999]))
array([ 0.1 , 0.75 , 0.999])
Plot logit(x) for x in [0, 1]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 1, 501)
>>> y = logit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.ylim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('logit(x)')
>>> plt.show()
""")
add_newdoc("scipy.special", "lpmv",
r"""
lpmv(m, v, x)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
Returns
-------
pmv : ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("scipy.special", "mathieu_a",
"""
mathieu_a(m, q)
Characteristic value of even Mathieu functions
Returns the characteristic value for the even solution,
``ce_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_b",
"""
mathieu_b(m, q)
Characteristic value of odd Mathieu functions
Returns the characteristic value for the odd solution,
``se_m(z, q)``, of Mathieu's equation.
""")
add_newdoc("scipy.special", "mathieu_cem",
"""
mathieu_cem(m, q, x)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem1",
"""
mathieu_modcem1(m, q, x)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modcem2",
"""
mathieu_modcem2(m, q, x)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem1",
"""
mathieu_modsem1(m, q, x)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_modsem2",
"""
mathieu_modsem2(m, q, x)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "mathieu_sem",
"""
mathieu_sem(m, q, x)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m
Order of the function
q
Parameter of the function
x
Argument of the function, *given in degrees, not radians*.
Returns
-------
y
Value of the function
yp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "modfresnelm",
"""
modfresnelm(x)
Modified Fresnel negative integrals
Returns
-------
fm
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modfresnelp",
"""
modfresnelp(x)
Modified Fresnel positive integrals
Returns
-------
fp
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
""")
add_newdoc("scipy.special", "modstruve",
r"""
modstruve(v, x)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
L : ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "nbdtr",
r"""
nbdtr(k, n, p)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrc",
r"""
nbdtrc(k, n, p)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
F : ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtri",
"""
nbdtri(k, n, y)
Inverse of `nbdtr` vs `p`.
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
Returns
-------
p : ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "nbdtrik",
r"""
nbdtrik(y, n, p)
Inverse of `nbdtr` vs `k`.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
Returns
-------
k : ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "nbdtrin",
r"""
nbdtrin(k, y, p)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
Returns
-------
n : ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("scipy.special", "ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "ncfdtri",
"""
ncfdtri(dfn, dfd, nc, p)
Inverse with respect to `f` of the CDF of the non-central F distribution.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
Returns
-------
f : float
Quantiles, i.e. the upper limit of integration.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtri
Compute the CDF for several values of `f`:
>>> f = [0.5, 1, 1.5]
>>> p = ncfdtr(2, 3, 1.5, f)
>>> p
array([ 0.20782291, 0.36107392, 0.47345752])
Compute the inverse. We recover the values of `f`, as expected:
>>> ncfdtri(2, 3, 1.5, p)
array([ 0.5, 1. , 1.5])
""")
add_newdoc("scipy.special", "ncfdtridfd",
"""
ncfdtridfd(dfn, p, nc, f)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
This is the inverse with respect to `dfd` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
dfd : float
Degrees of freedom of the denominator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfd
Compute the CDF for several values of `dfd`:
>>> dfd = [1, 2, 3]
>>> p = ncfdtr(2, dfd, 0.25, 15)
>>> p
array([ 0.8097138 , 0.93020416, 0.96787852])
Compute the inverse. We recover the values of `dfd`, as expected:
>>> ncfdtridfd(2, p, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("scipy.special", "ncfdtridfn",
"""
ncfdtridfn(p, dfd, nc, f)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
This is the inverse with respect to `dfn` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : float
Quantiles, i.e. the upper limit of integration.
Returns
-------
dfn : float
Degrees of freedom of the numerator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfn
Compute the CDF for several values of `dfn`:
>>> dfn = [1, 2, 3]
>>> p = ncfdtr(dfn, 2, 0.25, 15)
>>> p
array([ 0.92562363, 0.93020416, 0.93188394])
Compute the inverse. We recover the values of `dfn`, as expected:
>>> ncfdtridfn(p, 2, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("scipy.special", "ncfdtrinc",
"""
ncfdtrinc(dfn, dfd, p, f)
Calculate non-centrality parameter for non-central F distribution.
This is the inverse with respect to `nc` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
f : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
nc : float
Noncentrality parameter.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtrinc
Compute the CDF for several values of `nc`:
>>> nc = [0.5, 1.5, 2.0]
>>> p = ncfdtr(2, 3, nc, 15)
>>> p
array([ 0.96309246, 0.94327955, 0.93304098])
Compute the inverse. We recover the values of `nc`, as expected:
>>> ncfdtrinc(2, 3, p, 15)
array([ 0.5, 1.5, 2. ])
""")
add_newdoc("scipy.special", "nctdtr",
"""
nctdtr(df, nc, t)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
Returns
-------
cdf : float or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("scipy.special", "nctdtridf",
"""
nctdtridf(p, nc, t)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrinc",
"""
nctdtrinc(df, p, t)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e. the upper limit of integration.
""")
add_newdoc("scipy.special", "nctdtrit",
"""
nctdtrit(df, nc, p)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
""")
add_newdoc("scipy.special", "ndtr",
r"""
ndtr(x)
Gaussian cumulative distribution function.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
log_ndtr
""")
add_newdoc("scipy.special", "nrdtrimn",
"""
nrdtrimn(p, x, std)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
Returns
-------
mn : float or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("scipy.special", "nrdtrisd",
"""
nrdtrisd(p, x, mn)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : float or ndarray
The mean of the normal distribution.
Returns
-------
std : array_like
Standard deviation.
See Also
--------
nrdtristd, ndtr
""")
add_newdoc("scipy.special", "log_ndtr",
"""
log_ndtr(x)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
Returns
-------
ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
""")
add_newdoc("scipy.special", "ndtri",
"""
ndtri(y)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the Gaussian
probability density function (integrated from minus infinity to `x`)
is equal to y.
""")
add_newdoc("scipy.special", "obl_ang1",
"""
obl_ang1(m, n, c, x)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_cv",
"""
obl_cv(m, n, c)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "obl_rad1",
"""
obl_rad1(m, n, c, x)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2",
"""
obl_rad2(m, n, c, x)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbdv",
"""
pbdv(v, x)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Returns
-------
d
Value of the function
dp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbvv",
"""
pbvv(v, x)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Returns
-------
v
Value of the function
vp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pbwa",
r"""
pbwa(a, x)
Parabolic cylinder function W.
The function is a particular solution to the differential equation
.. math::
y'' + \left(\frac{1}{4}x^2 - a\right)y = 0,
for a full definition see section 12.14 in [1]_.
Parameters
----------
a : array_like
Real parameter
x : array_like
Real argument
Returns
-------
w : scalar or ndarray
Value of the function
wp : scalar or ndarray
Value of the derivative in x
Notes
-----
The function is a wrapper for a Fortran routine by Zhang and Jin
[2]_. The implementation is accurate only for ``|a|, |x| < 5`` and
returns NaN outside that range.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
http://dlmf.nist.gov/14.30
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("scipy.special", "pdtr",
"""
pdtr(k, m)
Poisson cumulative distribution function
Returns the sum of the first `k` terms of the Poisson distribution:
sum(exp(-m) * m**j / j!, j=0..k) = gammaincc( k+1, m). Arguments
must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtrc",
"""
pdtrc(k, m)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be positive and `k` an integer.
""")
add_newdoc("scipy.special", "pdtri",
"""
pdtri(k, y)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by gammaincinv(k+1, y). `k` must be a nonnegative
integer and `y` between 0 and 1.
""")
add_newdoc("scipy.special", "pdtrik",
"""
pdtrik(p, m)
Inverse to `pdtr` vs k
Returns the quantile k such that ``pdtr(k, m) = p``
""")
add_newdoc("scipy.special", "poch",
r"""
poch(z, m)
Rising factorial (z)_m
The Pochhammer symbol (rising factorial), is defined as
.. math::
(z)_m = \frac{\Gamma(z + m)}{\Gamma(z)}
For positive integer `m` it reads
.. math::
(z)_m = z (z + 1) ... (z + m - 1)
Parameters
----------
z : array_like
(int or float)
m : array_like
(int or float)
Returns
-------
poch : ndarray
The value of the function.
""")
add_newdoc("scipy.special", "pro_ang1",
"""
pro_ang1(m, n, c, x)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_cv",
"""
pro_cv(m, n, c)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
""")
add_newdoc("scipy.special", "pro_rad1",
"""
pro_rad1(m, n, c, x)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2",
"""
pro_rad2(m, n, c, x)
Prolate spheroidal radial function of the second kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Returns
-------
s
Value of the function
sp
Value of the derivative vs x
""")
add_newdoc("scipy.special", "pseudo_huber",
r"""
pseudo_huber(delta, r)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : ndarray
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
Returns
-------
res : ndarray
The computed Pseudo-Huber loss function values.
Notes
-----
This function is convex in :math:`r`.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "radian",
"""
radian(d, m, s)
Convert from degrees to radians
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
""")
add_newdoc("scipy.special", "rel_entr",
r"""
rel_entr(x, y)
Elementwise function for computing relative entropy.
.. math:: \mathrm{rel\_entr}(x, y) = \begin{cases} x \log(x / y) & x > 0, y > 0 \\ 0 & x = 0, y \ge 0 \\ \infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
First input array.
y : ndarray
Second input array.
Returns
-------
res : ndarray
Output array.
See Also
--------
entr, kl_div
Notes
-----
This function is jointly convex in x and y.
.. versionadded:: 0.15.0
""")
add_newdoc("scipy.special", "rgamma",
"""
rgamma(z)
Gamma function inverted
Returns ``1/gamma(x)``
""")
add_newdoc("scipy.special", "round",
"""
round(x)
Round to nearest integer
Returns the nearest integer to `x` as a double precision floating
point result. If `x` ends in 0.5 exactly, the nearest even integer
is chosen.
""")
add_newdoc("scipy.special", "shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
Returns
-------
si : ndarray
Hyperbolic sine integral at ``x``
ci : ndarray
Hyperbolic cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *shi* and *chi* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principle branch of the logarithm.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
Returns
-------
si : ndarray
Sine integral at ``x``
ci : ndarray
Cosine integral at ``x``
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[1]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [2]_ *si* and *ci* routines.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
""")
add_newdoc("scipy.special", "sindg",
"""
sindg(x)
Sine of angle given in degrees
""")
add_newdoc("scipy.special", "smirnov",
"""
smirnov(n, e)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function (Dn+ or Dn-) for a one-sided test of
equality between an empirical and a theoretical distribution. It
is equal to the probability that the maximum difference between a
theoretical distribution and an empirical one based on `n` samples
is greater than e.
""")
add_newdoc("scipy.special", "smirnovi",
"""
smirnovi(n, y)
Inverse to `smirnov`
Returns ``e`` such that ``smirnov(n, e) = y``.
""")
add_newdoc("scipy.special", "spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_0^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
Returns
-------
s : ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
""")
add_newdoc("scipy.special", "stdtr",
"""
stdtr(df, t)
Student t distribution cumulative density function
Returns the integral from minus infinity to t of the Student t
distribution with df > 0 degrees of freedom::
gamma((df+1)/2)/(sqrt(df*pi)*gamma(df/2)) *
integral((1+x**2/df)**(-df/2-1/2), x=-inf..t)
""")
add_newdoc("scipy.special", "stdtridf",
"""
stdtridf(p, t)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "stdtrit",
"""
stdtrit(df, p)
Inverse of `stdtr` vs `t`
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
""")
add_newdoc("scipy.special", "struve",
r"""
struve(v, x)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
Returns
-------
H : ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/11
""")
add_newdoc("scipy.special", "tandg",
"""
tandg(x)
Tangent of angle x given in degrees.
""")
add_newdoc("scipy.special", "tklmbda",
"""
tklmbda(x, lmbda)
Tukey-Lambda cumulative distribution function
""")
add_newdoc("scipy.special", "wofz",
"""
wofz(z)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> z = special.wofz(x)
>>> plt.plot(x, z.real, label='wofz(x).real')
>>> plt.plot(x, z.imag, label='wofz(x).imag')
>>> plt.xlabel('$x$')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
""")
add_newdoc("scipy.special", "xlogy",
"""
xlogy(x, y)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "xlog1py",
"""
xlog1py(x, y)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
Returns
-------
z : array_like
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
""")
add_newdoc("scipy.special", "y0",
r"""
y0(x)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "y1",
"""
y1(x)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1
yn
yv
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yn",
r"""
yn(n, x)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
z : array_like
Argument (float).
Returns
-------
Y : ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/index.html
""")
add_newdoc("scipy.special", "yv",
r"""
yv(v, z)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "yve",
r"""
yve(v, z)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
Returns
-------
Y : ndarray
Value of the exponentially scaled Bessel function.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("scipy.special", "_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("scipy.special", "zetac",
"""
zetac(x)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``.
See Also
--------
zeta
""")
add_newdoc("scipy.special", "_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("scipy.special", "_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("scipy.special", "_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("scipy.special", "_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("scipy.special", "loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the Gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array-like
Values in the complex plain at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining ``loggamma`` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas ``loggamma`` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make ``loggama`` useful for working in complex logspace. However,
``loggamma`` necessarily returns complex outputs for real inputs,
so if you want to work only with real numbers use `gammaln`. On
the real line the two functions are related by ``exp(loggamma(x))
= gammasgn(x)*exp(gammaln(x))``, though in practice rounding
errors will introduce small spurious imaginary components in
``exp(loggamma(x))``.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the Gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("scipy.special", "_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("scipy.special", "_cospi",
"""
Internal function, do not use.
""")
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/special/add_newdocs.py
|
Python
|
mit
| 175,349
|
[
"Gaussian"
] |
fbce9e529ef258b59a986926491af8e1f11171c7c5fd8c670738d794b9a8b73b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Localization Compiler
#
import os, sys, codecs, shutil
from xml.dom.minidom import parse
template_dir = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
sys.path.append(os.path.join(template_dir,'../'))
from tiapp import *
ignoreFiles = ['.gitignore', '.cvsignore']
ignoreDirs = ['.git','.svn', 'CVS']
class LocaleCompiler(object):
def __init__(self,name,dir,platform,mode='simulator',outdir=None):
self.dir = os.path.join(dir,'i18n')
self.platform = platform
self.name = name
self.mode = mode
self.outdir = outdir
self.iphone_dir = os.path.join(dir,'build','iphone','build')
self.android_dir = os.path.join(dir,'build','android','res')
if self.outdir!=None:
self.android_dir = self.outdir
def get_locale(self,file):
return os.path.basename(os.path.dirname(file))
def get_ios_dir(self):
if self.outdir!=None: return self.outdir
if self.mode == 'development': # simulator
return os.path.join(self.iphone_dir,'Debug-iphonesimulator','%s.app' % self.name)
elif self.mode == 'test': # adhoc install
return os.path.join(self.iphone_dir,'Debug-iphoneos','%s.app' % self.name)
else: # distribution
return os.path.join(self.iphone_dir,'Release-iphoneos','%s.app' % self.name)
def getText(self,nodelist):
rc = u""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def isApp(self,file):
return (os.path.basename(file) == "app.xml")
def isStrings(self,file):
return (os.path.basename(file) == "strings.xml")
def localization_file_name_ios(self,file):
if self.isApp(file):
return "InfoPlist.strings"
return "Localizable.strings"
def compile_for_ios(self,file):
locale = self.get_locale(file)
build_dir = self.get_ios_dir()
lproj_dir = os.path.join(build_dir,'%s.lproj' % locale)
if not os.path.exists(lproj_dir): os.makedirs(lproj_dir)
locale_file = os.path.join(lproj_dir,self.localization_file_name_ios(file))
f = codecs.open(locale_file,'w','utf-16')
f.write(u'/**\n * Appcelerator Titanium\n * this is a generated file - DO NOT EDIT\n */\n\n')
dom = parse(file)
appkeys = { 'appname' : 'CFBundleDisplayName' }
for node in dom.documentElement.childNodes:
if node.nodeType != 1: continue
name = node.attributes['name'].nodeValue
if self.isApp(file):
name = appkeys[name]
if name is None:
pass
value = self.getText(node.childNodes)
# TODO: translate any more symbols?
value = value.replace("%s",'%@')
f.write(u'"%s" = "%s";\n' % (name,value))
f.close()
if self.mode!='development': #only compile if not simulator
os.system("/usr/bin/plutil -convert binary1 \"%s\"" % locale_file)
print "[DEBUG] compiled ios file: %s" % locale_file
def compile_for_android(self,file):
#TODO: Add android support for app.xml
if not self.isStrings(file):
return
locale = self.get_locale(file)
# for andoird, we can simply copy into the right directory
if locale == 'en' or locale.lower() == 'en-us':
dir = os.path.join(self.android_dir,'values')
else:
if len(locale) == 5 and locale[2] == '-':
# Android en-US -> en-rUS (need the r)
locale = locale[0:3] + 'r' + locale[-2:]
dir = os.path.join(self.android_dir,'values-%s' % locale)
to_ = os.path.join(dir,'strings.xml')
if not os.path.exists(dir):
os.makedirs(dir)
shutil.copy(file, to_)
#
# Merge strings.xml from /i18n/ and build/android/res/values/
# (TIMOB-12663)
#
elif os.path.isfile(to_):
sfile = open(file, 'r')
dfile = open(to_, 'r')
scontent = sfile.read()
dcontent = dfile.read()
sfile.close()
dfile.close()
sindex = scontent.find('</resources>')
dindex = dcontent.find('<resources>') + 11
content_to_write = scontent[:sindex] + dcontent[dindex:]
wfile = open(to_, 'w')
wfile.write(content_to_write)
wfile.close()
else:
shutil.copy(file, to_)
print "[DEBUG] compiled android file: %s" % file
def compile(self):
if not os.path.exists(self.dir): return
print "[INFO] Compiling localization files"
sys.stdout.flush()
for dirname,dirs,files in os.walk(self.dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for f in files:
if f in ignoreFiles: continue
if not f.endswith('.xml'): continue
file = os.path.join(dirname,f)
if self.platform == 'ios' or self.platform == 'iphone' or self.platform == 'ipad' or self.platform == 'universal':
self.compile_for_ios(file)
elif self.platform == 'android':
self.compile_for_android(file)
elif self.platform == 'blackberry':
# TODO
pass
if __name__ == "__main__":
if len(sys.argv)==1 or len(sys.argv) < 3:
print "Appcelerator Locale Compiler"
print "Usage: %s <project_dir> <platform> [mode] [outdir]" % os.path.basename(sys.argv[0])
sys.exit(1)
path = os.path.expanduser(sys.argv[1])
if not os.path.exists(path):
print "Project directory not found: %s" % path
sys.exit(1)
tiapp_xml_path = os.path.join(path,'tiapp.xml')
if not os.path.exists(tiapp_xml_path):
print "Project directory doesn't look like a valid Titanium project: %s" % path
sys.exit(1)
resources_dir = os.path.join(path,'Resources')
if not os.path.exists(resources_dir):
print "Project directory doesn't look like a valid Titanium project: %s" % path
sys.exit(1)
platform = sys.argv[2]
tiapp = TiAppXML(tiapp_xml_path)
app_name = tiapp.properties['name']
mode = 'simulator'
outdir = None
if len(sys.argv) > 3:
mode = sys.argv[3]
if len(sys.argv) > 4:
outdir = os.path.expanduser(sys.argv[4])
c = LocaleCompiler(app_name,path,platform,mode,outdir)
c.compile()
|
hieupham007/Titanium_Mobile
|
support/common/localecompiler.py
|
Python
|
apache-2.0
| 5,732
|
[
"VisIt"
] |
3f957950583b5cbfa86eb5711e623a88ce7ac8dda720474844134e7bbd720066
|
#!/usr/bin/env ipython
"""
Analisis of sheath-from-icme for Auger Low Energy data
IMPORTANT:
- Note that 'structure' argument refers to MC, sheath, ICME,
sheath-of-icme, taking the following possible values:
'i' : ICME
'mc' : MC
'sh.i' : sheath of ICME
'sh.mc' : sheath of MC,
and 'events_mgr.filter_events()' uses this flag to know which
average values it will use to filter events.
"""
from pylab import *
from numpy import *
from scipy.io.netcdf import netcdf_file
from datetime import datetime, time, timedelta
#------------ shared libraries:
"""
--- antes de modificar cosas, tener en cuenta los bugs en:
'../../shared_lib/COMENTARIOS.txt'
"""
import sys
sys.path.append('../../shared_lib')
from shared_funcs import * #c_funcs import *
#------------------------------
#from read_NewTable import tshck, tini_icme, tend_icme, tini_mc, tend_mc, n_icmes, MCsig
from ShiftTimes import *
import numpy as np
from z_expansion_gulisano import z as z_exp
import console_colors as ccl
import read_NewTable as tb
from os.path import isfile, isdir
class boundaries:
def __init__(self):
name = 'name'
HOME = os.environ['HOME']
PAO = os.environ['PAO']
PAO_PROCESS = os.environ['PAO_PROCESS']
gral = general()
day = 86400.
#---- cosas input
gral.fnames = fnames = {}
# DATASETS
fnames['ACE'] = '%s/data_ace/64sec_mag-swepam/ace.1998-2015.nc' % HOME
fnames['McMurdo'] = '%s/actividad_solar/neutron_monitors/mcmurdo/mcmurdo_utc_correg.dat' % HOME
fnames['Auger_scals'] = '%s/data_auger/estudios_AoP/data/unir_con_presion/data_final_2006-2013.h5' % PAO
fnames['Auger_BandMuons'] = '%s/data_auger/data_histogramas/all.array.avrs/temp.corrected/shape.ok_and_3pmt.ok/15min/test_temp.corrected.nc' % PAO
fnames['Auger_BandMuons_avrs'] = '%s/long_trends/code_figs/avr_histos_press_shape.ok_and_3pmt.ok.txt' % PAO_PROCESS # average histogram
fnames['Auger_BandScals'] = fnames['Auger_BandMuons']
fnames['Auger_BandScals_avrs'] = fnames['Auger_BandMuons_avrs']
fnames['table_richardson'] = '%s/ASOC_ICME-FD/icmes_richardson/data/rich_events_ace.nc' % HOME
for name in fnames.keys():
assert isfile(fnames[name]),\
" --> NO EXISTE: " + fnames[name]
#---- directorios de salida
gral.dirs = dirs = {}
dirs['dir_plots'] = '../plots'
dirs['dir_ascii'] = '../ascii'
dirs['suffix'] = '_auger_' # sufijo para el directorio donde guardare
# estas figuras
#-------------------------------------------------------------
#------- seleccionamos MCs con label-de-catalogo (lepping=2, etc)
MCwant = {'flags': ('0', '1', '2', '2H'),
'alias': '0.1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('1', '2', '2H'),
# 'alias': '1.2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('2', '2H'),
# 'alias': '2.2H'} # para "flagear" el nombre/ruta de las figuras
#MCwant = {'flags': ('2',),
# 'alias': '2'} # para "flagear" el nombre/ruta de las figuras
FILTER = {}
FILTER['Mcmultiple'] = False #True para incluir eventos multi-MC
FILTER['CorrShift'] = False #True
FILTER['wang'] = False #False #True
FILTER['vsw_filter'] = False #True
FILTER['z_filter_on'] = False
FILTER['MCwant'] = MCwant
FILTER['B_filter'] = False
FILTER['filter_dR.icme'] = False #True
FILTER['choose_1998-2006'] = False # False:no excluye el periodo 1998-2006
CUTS = {}
CUTS['ThetaThres'] = 90.0 # all events with theta>ThetaThres
CUTS['dTday'] = 0.0
CUTS['v_lo'] = 550.0
CUTS['v_hi'] = 3000.0
CUTS['z_lo'] = -50.0
CUTS['z_hi'] = 0.65
nBin = {}
nBin['before'] = 2
nBin['after'] = 4
nBin['bins_per_utime'] = 50 # bins por unidad de tiempo
nBin['total'] = (1+nBin['before']+nBin['after'])*nBin['bins_per_utime']
fgap = 0.2
#--- bordes de estructura
# Analisis de sheath-from-icme para Auger
bounds = boundaries()
bounds.tini = tb.tshck #tb.tini_mc #tb.tini_mc #tb.tshck
bounds.tend = tb.tini_icme #tb.tend_mc #tb.tend_mc #tb.tini_mc
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = 'Auger_scals'#scals' #'McMurdo' #'ACE'
FILTER['vsw_filter'] = False
emgr = events_mgr(gral, FILTER, CUTS, bounds, nBin, fgap, tb, z_exp, structure='i')
emgr.run_all()
emgr.lock_IDs()
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = 'Auger_BandMuons' #'McMurdo' #'ACE'
emgr.run_all()
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
gral.data_name = 'Auger_BandScals' #'McMurdo' #'ACE'
emgr.run_all()
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
emgr.data_name = 'ACE' #'Auger' #'McMurdo'
emgr.run_all()
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
emgr.data_name = 'McMurdo' #'Auger' #'McMurdo'
emgr.run_all()
#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
##
|
jimsrc/seatos
|
sheaths.icmes/src/sea_global.py
|
Python
|
mit
| 5,255
|
[
"NetCDF"
] |
0db9f055fa00ab77edb434ceab2d9b115fc706e99e2562b947be65ae608e04a3
|
import numpy as np
import theano
import theano.tensor as T
from smartlearner.tasks import stopping_criteria
from smartlearner.utils import sharedX
from smartlearner import Trainer
from smartlearner.optimizers import SGD, AdaGrad
from smartlearner.tasks import tasks
from numpy.testing import assert_array_almost_equal
from smartlearner.testing import DummyLoss, DummyBatchScheduler
floatX = theano.config.floatX
class DummyLossWithGradient(DummyLoss):
def __init__(self, cost, param):
super().__init__()
self.cost = cost
self.param = param
def _get_gradients(self):
gparam = T.grad(cost=self.cost, wrt=self.param)
return {self.param: gparam}
def test_sgd():
# Create simple Nd gaussian functions to optimize. These functions are
# (perfectly) well-conditioned so it should take only one gradient step
# to converge using 1/L, where L is the largest eigenvalue of the hessian.
max_epoch = 2
for N in range(1, 5):
center = np.arange(1, N+1)[None, :].astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), T.eye(N)), (param-center).T))
loss = DummyLossWithGradient(cost, param)
trainer = Trainer(SGD(loss), DummyBatchScheduler())
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
# Monitor the gradient of `loss` w.r.t. to `param`.
gparam = tasks.MonitorVariable(loss.gradients[param])
trainer.append_task(gparam)
trainer.train()
# Since the problem is well-conditionned and we use an optimal gradient step 1/L,
# two epochs should be enough for `param` to be around `center` and the gradients near 0.
assert_array_almost_equal(param.get_value(), center)
assert_array_almost_equal(gparam.value, 0.)
# Create an Nd gaussian function to optimize. This function is not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
#cost = T.sum(N*0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
max_epoch = 80
N = 4
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), (param-center).T))
loss = DummyLossWithGradient(cost, param)
trainer = Trainer(SGD(loss), DummyBatchScheduler())
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
#trainer.append_task(tasks.PrintVariable("Loss param : {}", param))
#trainer.append_task(tasks.PrintVariable("Loss gradient: {}", loss.gradients[param]))
# Monitor the gradient of `loss` w.r.t. to `param`.
gparam = tasks.MonitorVariable(loss.gradients[param])
trainer.append_task(gparam)
trainer.train()
# Since the problem is well-conditionned and we use an optimal gradient step 1/L,
# two epochs should be enough for `param` to be around `center` and the gradients near 0.
assert_array_almost_equal(param.get_value(), center, decimal=6)
assert_array_almost_equal(gparam.value, 0.)
def test_adagrad():
max_epoch = 15
# Create an Nd gaussian functions to optimize. These functions are not
# well-conditioned and there exists no perfect gradient step to converge in
# only one iteration.
for N in range(1, 5):
center = 5*np.ones((1, N)).astype(floatX)
param = sharedX(np.zeros((1, N)))
cost = T.sum(0.5*T.dot(T.dot((param-center), np.diag(1./np.arange(1, N+1))), ((param-center).T)))
loss = DummyLossWithGradient(cost, param)
# Even with a really high gradient step, AdaGrad can still converge.
# Actually, it is faster than using the optimal gradient step with SGD.
optimizer = AdaGrad(loss, lr=100, eps=1e-1)
trainer = Trainer(optimizer, DummyBatchScheduler())
trainer.append_task(stopping_criteria.MaxEpochStopping(max_epoch))
#trainer.append_task(tasks.PrintVariable("Loss param : {}", param))
#trainer.append_task(tasks.PrintVariable("Loss gradient: {}", loss.gradients[param]))
# Monitor the gradient of `loss` w.r.t. to `param`.
gparam = tasks.MonitorVariable(loss.gradients[param])
trainer.append_task(gparam)
trainer.train()
# After 30 epochs, param should be around the center and gradients near 0.
assert_array_almost_equal(param.get_value(), center)
assert_array_almost_equal(gparam.value, 0.)
|
havaeimo/smartlearner
|
smartlearner/optimizers/tests/tests_optimizers.py
|
Python
|
bsd-3-clause
| 4,539
|
[
"Gaussian"
] |
2d6cee43446292e3a36a025d1b398b438bc92683123bc1fbc12aaaa97a8f1dbe
|
# -*- coding: utf-8 -*-
"""
NACO AGPM/Saturated PSF real-time statistics module.
The main program here is run_and_process. Everything else is defined so the
code is easier to follow. It is intended to be run on the offline machine at
Paranal, and monitors a folder for incoming data.
You should exit and relaunch the program when you change stars, so that the plots
are all reset.
Unfortunately it will idle until it finds a sky frame, since it can't find the
peak or estimate the background level without a sky. For non-AGPM frames, it will
wait until it has at least 2 frames.
It will ignore flux frames if their exposure times are NOT between 0.1-0.5s
Known bugs:
- If you try to exit while matplotlib is thinking, it won't exit properly and you may
have to close the terminal window. The only way to fix this is to change the plots
to an interactive GUI, which I might do later. For now, I've intentionally added
a 2s pause to reduce the chances of this happening.
An example of how to run it from the directory you put the code:
import monitor
monitor.run_and_process(folder='/path/to/the/data/')
"""
#Ideas:
# - Just plot last ~10 cubes? No, it is more useful to show everything. Can always
# rerun the program for every new target.
# - Organise data by target and then plot all data for a certain target?
# - Plot the standard deviation of the flux in the donut (for the agpm)?
# - Plot the agpm centering?
#
#Problems:
# - Peak stellar flux + background doesnt work with dithered data.
# - Infinite loops don't play nicely with matplotlib. Sometimes ctrl+c doesn't work.
# Very hard to reproduce, but might be related to exception generated during plt.pause
#
#Solved Problems:
# - SOLVED: Need to do sky subtraction to check peak flux, since the centre of the agpm
# can be saturated while the actual light that we care about is not.
# - SOLVED: Infinite loops don't work with the default MacOSX backend for matplotlib.
# Have to use (e.g.) ipython --matplotlib='tk'
# Before we start, change the backend to Qt4Agg, since the MacOSX default doesnt work.
# The combination of the infinite loop and the plt.tight_layout() call (as well as the
# plt.show() and plt.pause() calls) causes problems with the macosx backend
import matplotlib as mpl
# mpl.use('TkAgg') # if this doesn't work, try the next line instead
#mpl.use('QT4Agg')
import numpy as np
import matplotlib.pyplot as plt
import glob,time,datetime
import astropy.io.fits as pyfits
from astropy.time import Time
from matplotlib.dates import DateFormatter
import scipy.signal as signal
#from Tkinter import *
#import tkMessageBox
#import pdb
plt.interactive(True)
# Here are all of the hard-coded numbers in case we need to change any
#nonlinear_limit=18000.
#saturate_limit=22000.
#minimum_flux=-7000. # The actual "zero" point
#nonlinear_limit=11000. # Dec 2015
nonlinear_limit = 9500. # Dec 2016
saturate_limit=15000.
minimum_flux=0. # This should reduce some confusion
nexpo_limit=2 # If nexpo > 2, this indicates that it is a target observation. otherwise, sky
obstime_limits=[0.1,0.5] # all target/sky observations have exp times in this range. Anything outside is a flux frame.
smooth_dist=4 # FWHM of gaussian used to smooth the images before measuring the
# background and finding the centre
def detect_filetype(hdr,get_folder_string=False):
''' Works out what kind of file it is based on the header.
This function is necessarily complicated and hard to read, since there
are so many cases it has to cover.'''
type_flag=hdr['HIERARCH ESO DPR TYPE']
# type_cat = hdr['HIERARCH ESO DPR CATG']
expt=hdr['EXPTIME'] # exposure time.
agpm=hdr['HIERARCH ESO INS OPTI1 ID'] # this is AGPM if it is used
date = Time(hdr['DATE-OBS']) # date of exposure
try:
targ_name=hdr['HIERARCH ESO OBS NAME']
except:
targ_name='NoName'
naxis=hdr['NAXIS']
naxis1=hdr['NAXIS1']
try:
nexpo=hdr['HIERARCH ESO SEQ NEXPO']
except:
nexpo=0
# Now format all of these strings
if 'astcal' in targ_name.lower():
# Astrometric calibrators are an annoying case that we have to deal with first
# For now, assume they have "AstCal" in their target names
obstype='AstCal'
folder_string='AstCal'
elif type_flag=='OBJECT':
# We need to work out which of the "OBJECT" frames are skies, flux
# frames and actual target observations.
#
# For the AGPM skies, we can use the number of exposures. Skies are a single cube for the AGPM.
# There are no separate skies for non-AGPM observations, so label them all as Targ.
#
# For the flux, the only way to guess is the exposure time (or possibly the ND?)
# Handle Ks data first since the rules are different
if hdr['HIERARCH ESO INS OPTI6 ID'] == 'Ks':
if hdr['HIERARCH ESO INS OPTI3 ID'] == 'Full':
obstype='Target_saturated'
folder_string = 'Targ'
else:
obstype='Flux'
folder_string = 'Flux'
# Handle the AGPM and non-AGPM cases differently
elif agpm=='AGPM':
# In old data, sky frames had TYPE = "OBJECT" and NEXP = 1
# Until October 2017, sky frames had TYPE = "SKY" and NEXP >1
# Since October 2017, sky frames have TYPE = "SKY" and some targ frames have NEXP=1
# So we need to put date-dependent logic in here since this function
# is also used in the data handling pipeline.
if (nexpo > nexpo_limit) or ((date >Time('2017-10-01')) and (naxis1 > 300)):
obstype='Target_AGPM'
folder_string='Targ'
elif (expt < obstime_limits[1]) and (expt > obstime_limits[0]) and (naxis1 >512):
obstype='Sky'
folder_string='Sky'
else:
obstype='Flux'
folder_string='Flux'
else:
if ((expt < obstime_limits[1]) and (expt > obstime_limits[0])):
obstype='Target_saturated'
folder_string='Targ'
else:
# This is a special case for M band observations, which need to have very short exposure times for target and flux
if hdr['ESO INS OPTI6 ID'] == 'M_prime':
obstype='Target_saturated'
folder_string='Targ'
else:
obstype='Flux'
folder_string='Flux'
elif type_flag=='SKY':
obstype='Sky'
folder_string='Sky'
elif 'FLAT' in type_flag:
obstype='Flat'
folder_string='Flats'
elif type_flag.lower=='psf-calibrator':
obstype='Flux'
folder_string='Flux'
# We don't actually use any of the following types, but I thought we might as well
# put them somewhere
elif type_flag=='STD':
obstype='Std'
folder_string='STD'
elif 'DARK' in type_flag:
obstype='Dark'
folder_string='Dark'
else:
# Put all of the unknown file types into a single folder to make it easy
print('Unrecognised DPR type:'+type_flag)
obstype='Unknown'
folder_string='Uncategorized'
# But if it has NAXIS3=0, it is really an acquisition!
if naxis==2 and (obstype != 'Flat') and (obstype !='Dark'):
folder_string='Acq_'+folder_string
obstype='Acq'
if get_folder_string:
return obstype,folder_string
else:
return obstype
###################
###################
def diagnostic_plots(axes,capture_time,peakcounts,bgflux,parangs,clean_im):
''' Wrapper function for the diagnostic plots for the real-time monitor'''
# Clear the plots
ax1,ax2,ax3,ax4=axes.flatten()
ax1.cla()
ax2.cla()
ax3.cla()
ax4.cla()
# Work out the order of the data, just in case it is not in chronological order
order=np.argsort(capture_time)
t_lims=[np.min(capture_time),np.max(capture_time)]
# Plot 1: The peak flux
ax1.cla()
ax1.plot_date(capture_time[order],peakcounts[order],'x',label='Peak flux')
ax1.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
ax1.set_title('Peak Stellar Flux (or peak around agpm donut)')
ax1.set_xlabel('Time')
ax1.set_ylim(np.min([0,np.min(bgflux)]),1.2*np.max([np.max(bgflux),saturate_limit])) # force the plot to start at zero so it is easier to read
# plot the nonlinear and saturation regimes
ax1.plot(t_lims,[nonlinear_limit,nonlinear_limit],'r')
ax1.plot(t_lims,[saturate_limit,saturate_limit],'k')
for tick in ax1.get_xticklabels():
tick.set_rotation(45)
# Plot 2: Background flux
ax2.cla()
ax2.plot_date(capture_time[order],bgflux[order],'x',label='Background flux')
ax2.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
ax2.set_title('Background Flux')
ax2.set_xlabel('Time')
ax2.ticklabel_format(axis='y',useOffset=False)
# ax2.set_ylim(np.min([0,np.min(bgflux)]),1.2*np.max([np.max(bgflux),saturate_limit])) # force the plot to start at zero so it is easier to read
# plot the nonlinear and saturation regimes
# ax2.plot(t_lims,[nonlinear_limit,nonlinear_limit],'r')
# ax2.plot(t_lims,[saturate_limit,saturate_limit],'k')
for tick in ax2.get_xticklabels():
tick.set_rotation(45)
# Plot 3: Parallactic angle
ax3.cla()
ax3.plot_date(capture_time[order],parangs[order],label='Parallactic angle')
ax3.xaxis.set_major_formatter(DateFormatter('%H:%M:%S'))
ax3.set_title('Parallactic Angle')
ax3.set_xlabel('Time')
for tick in ax3.get_xticklabels():
tick.set_rotation(45)
# plot 4: FWHM of image... Need to fit these first
# For now, just plot the image (should be x and y position in the future)
ax4.cla()
try:
ax4.imshow(clean_im,origin='lowerleft')
ax4.colorbar()
except:
pass
ax4.set_title('Clean image')
# ax4.set_title('Clean image (will be psf width in the future)')
###################
###################
def quick_clean(im,sky,crop_size):
''' Does some quick data cosmetics so it can be used in the real-time analysis plots'''
image_size=np.min([im.shape[0],crop_size])
# crop the image so we don't have to deal with the region outside the agpm
im=im[im.shape[0]/2-image_size/2:im.shape[0]/2+image_size/2,
im.shape[1]/2-image_size/2:im.shape[1]/2+image_size/2]
# change it so that the zero point is actually zero
im-=minimum_flux
# sky subtract and return
return im-sky
###################
###################
def check_data(head,window=None):
''' This program is a place to put any warnings that the data are bad
First use is for the Full_Uszd mask, since we never want to use it
but some datasets seem to have it even though the OB didn't ask for it
'''
warning = False
if head['ESO INS OPTI3 NAME'] == 'Full_Uszd':
# If we want to make a text box pop up, use this code:
#centre screen message
# window.geometry("1x1+"+str(window.winfo_screenwidth()/2)+"+"+str(window.winfo_screenheight()/2))
# tkMessageBox.showwarning(title="NACO-ISPY monitor", message="WARNING! Full_Uszd mask is inserted. Ask the night astronomer to send a new preset")
# If we want to print the message to the console, use this code:
print('WARNING: Full_Uszd mask is inserted. Ask the night astronomer to send a new preset.')
warning = True
return warning
###################
###################
def run_and_process(folder='./',prefix='NACO',suffix='.fits',
pause_interval=2.,crop_size=500,new_only=True):
'''
Run this program on a directory of NACO data to display some important
information in real-time as data is added. Currently this program plots the
peak flux, background flux and parallactic angle of each frame as a function
of time.
Options:
- folder: the path to the folder to monitor
- prefix: the name of the naco data (e.g. NACO_CORO_SCI)
- suffix: the file type (e.g. .fits)
- pause_interval: the delay between updates for the plots
- crop_size: the number of pixels to consider. This is used to crop
the image and speed up the processing. This is taken from the _centre_
so be careful not to crop the star out!
- new_only: if True, it will ignore files that already exist in a folder
and only display the statistics of new files.
'''
print('Monitoring folder:'+folder)
print('Press ctrl+c to exit')
# Make sure the / is included in the filename
if folder[-1]!='/':
folder=folder+'/'
# Set up all of the arrays that will hold the information
known_files=np.array([])
capture_time=np.array([])
peakcounts=np.array([])
bgflux=np.array([])
parangs=np.array([])
target_names=np.array([])
# Set up the plots
fig,axes=plt.subplots(2,2,num=0)
fig.canvas.set_window_title('Summary of data')
# Set up some arrays:
skies={} # a dictionary to contain all of the skies
clean_im=0
# Set up error window
# window = Tk()
# window.wm_withdraw()
window = None
# Begin the main loop
repeats=0
first_plot=True
while True:
try:
# Find the data in the folder
files=glob.glob(folder+prefix+'*'+suffix)
# Remove any acquisition files
acq_files = glob.glob(folder+'*ACQ*'+suffix)
files = set(files)
acq_files = set(acq_files)
files = list(files-acq_files)
nfiles=len(files)
if new_only and repeats==0:
known_files=files
# Now find which files are new
new_files=list(set(files)-set(known_files))
n_new=len(new_files)
if nfiles ==0 and repeats ==0:
print('No files found')
time.sleep(pause_interval)
elif n_new >0:
pass
# Sort them so they are in filename order
# (which should also correspond to the time they were made)
new_files=sorted(new_files)
# Go through them and see what to do with them
for f in new_files:
head=pyfits.getheader(f)
exptime=np.round(head['EXPTIME'],decimals=3) # to the nearest ms to avoid mismatches
# Classify the file (we only care about sky and target for now)
obstype=detect_filetype(head)
# Print any warnings about the data or observing strategy
warnings = check_data(head,window)
# If it is a saturated psf, we can make a dodgy sky by combining all of the data
if obstype=='Target_saturated':
# Work out if we already have a sky for this target
if skies.has_key(str(exptime)):
sky=skies[str(exptime)]
nsky=skies['n_'+str(exptime)]
else:
sky=0
nsky=0
skies['last4_'+str(exptime)]=[]
im=pyfits.getdata(f)[0]
this_sky=quick_clean(im,0,crop_size)
# Update the existing sky estimate
# sky=(nsky*sky+this_sky)/(nsky+1) # the old way that has self-subtraction
skies['last4_'+str(exptime)].append(this_sky) # track the last 4
skies['n_'+str(exptime)]=nsky+1
# if we have more than 4, pop the first one and continue
if (nsky+1) >4:
skies['last4_'+str(exptime)].pop(0)
skies['n_'+str(exptime)]=nsky
skies[str(exptime)]=np.median(skies['last4_'+str(exptime)],axis=0)
if obstype=='Sky':
# If it is a sky, update the master sky frame (for that exposure time)
im=pyfits.getdata(f)[0]
this_sky=quick_clean(im,0,crop_size)
skies[str(exptime)]=this_sky
if obstype=='Target_AGPM' or obstype=='Target_saturated':
# sky subtract
if skies.has_key(str(exptime)):
sky=skies[str(exptime)]
else:
# if the sky doesnt exist yet, skip this file for now
# and come back to it
files.remove(f)
continue
# We don't want to sky subtract the first frame with itself...
if obstype=='Target_saturated' and skies['n_'+str(exptime)]==1:
files.remove(f)
# To avoid problems with the case of only 1 file (where it
# make a sky from 2 copies of itself), reset the sky until
# we have another file
if len(files)==1:
# print 'deleting sky'
skies.pop(str(exptime))
skies.pop('n_'+str(exptime))
continue
im=pyfits.getdata(f)[0]
im=quick_clean(im,0,crop_size)
clean_im=im-sky
# measure the background level
bg=np.median(sky)
bgflux=np.append(bgflux,bg)
# Save the observing time
t=Time(head['MJD-OBS'],format='mjd')
capture_time=np.append(capture_time,t.datetime)
# Measure the peak flux
# Pixel distance map
npix=im.shape[1]
xarr=np.arange(0,npix)-npix/2
xx,yy=np.meshgrid(xarr,xarr)
pix_dist_map=np.sqrt(xx**2+yy**2)
# Smooth the image for centering
circ_ap=np.zeros((npix,npix))
circ_ap[pix_dist_map<(smooth_dist/2)]=1
convol_sz=np.int(np.ceil(smooth_dist)+3)
circ_ap=circ_ap[npix/2-convol_sz/2:npix/2+convol_sz/2,
npix/2-convol_sz/2:npix/2+convol_sz/2]
smooth_image=signal.fftconvolve(clean_im,circ_ap,mode='same')
mx=np.where(smooth_image ==np.max(smooth_image))
peak_flux=im[mx[0][0],mx[1][0]]
# pdb.set_trace()
peakcounts=np.append(peakcounts,peak_flux)
# the parang (just use the rough value in the header...)
parang=head['HIERARCH ESO ADA POSANG']
parang = ((parang + 360) % 360)
parangs=np.append(parangs,parang)
# Find the target name
target_name=head['HIERARCH ESO OBS NAME']
target_names=np.append(target_names,target_name)
last_target_name=target_name
# Find the order that the data was taken in, by sorting the observation times
if len(capture_time) >0:
display_sz=80
cropped_im=clean_im[mx[0][0]-display_sz/2:mx[0][0]+display_sz/2,
mx[1][0]-display_sz/2:mx[1][0]+display_sz/2]
# Remove all data from previous targets and only plot the current one
target_ix=target_names==last_target_name
# pdb.set_trace()
diagnostic_plots(axes,capture_time[target_ix],peakcounts[target_ix],
bgflux[target_ix],parangs[target_ix],cropped_im)
if first_plot==True:
plt.tight_layout()
first_plot=False
known_files=files
plt.pause(0.05) # this gives python some time to make the plot
time.sleep(pause_interval) # we cant use plt.pause because it catches
# the KeyboardInterrupt and makes it hard to exit
except KeyboardInterrupt:
break
repeats+=1
###################
###################
# This code should run if you run it directly, e.g. python monitor.py
# It should get the correct date and monitor the correct folder on the offline
# machine at Paranal.
if __name__ == "__main__":
current_time=datetime.datetime.today()
# What was the date at the beginning of the night?
datestr='{0:4d}-{1:02d}-{2:02d}' # yyyy-mm-dd
if current_time.hour <12: # So midday in Chile is where the date swaps.
# it is after midnight but before midday so take away a day
delt=datetime.timedelta(1)
current_time-=delt
date=datestr.format(current_time.year,current_time.month,current_time.day)
else:
# it is after midday so the date is correct
date=datestr.format(current_time.year,current_time.month,current_time.day)
# Where is the data?
folder='/data-ut1/raw/'+date+'/'
# Run the monitor
run_and_process(folder=folder)
|
AnthonyCheetham/naco_ispy
|
monitor.py
|
Python
|
gpl-3.0
| 22,177
|
[
"Gaussian"
] |
78e799fa484f9e4dd7641b1e521ad25ef012d81932ef81f5d691d09aab7e0159
|
#
# Copied from VTK/Common/Testing/Python/PythonSmoke.py
#
import qt
try:
import vtk
except:
print "Cannot import vtk"
qt.QApplication.exit(1)
try:
print dir(vtk)
except:
print "Cannot print dir(vtk)"
qt.QApplication.exit(1)
try:
try:
try:
o = vtk.vtkLineWidget()
print "Using Hybrid"
except:
o = vtk.vtkActor()
print "Using Rendering"
except:
o = vtk.vtkObject()
print "Using Common"
except:
print "Cannot create vtkObject"
qt.QApplication.exit(1)
try:
print o
print "Reference count: %d" % o.GetReferenceCount()
print "Class name: %s" % o.GetClassName()
except:
print "Cannot print object"
qt.QApplication.exit(1)
try:
b = vtk.vtkObject()
d = b.SafeDownCast(o)
print b, d
except:
print "Cannot downcast"
qt.QApplication.exit(1)
qt.QApplication.exit(0)
|
finetjul/CTK
|
Applications/ctkSimplePythonShell/Testing/Python/vtkPythonSmoke.py
|
Python
|
apache-2.0
| 843
|
[
"VTK"
] |
119ab5293b2df52c94ae8beac89d2430fd8f144c9cc41c3bf30426a717202ce0
|
import sys
import os
import glob
import distutils.util
import doctest
build_dir = "build/lib.%s-%s" % (distutils.util.get_platform(), sys.version[0:3])
sys.path.insert(0, os.path.join(os.getcwd(), build_dir))
import HTSeq
py_fdn = 'python'+str(sys.version_info[0])
def test_rst_file(filename):
print "Doctest of %s:" % os.path.basename(filename)
os.chdir( "example_data" )
(failure_count, test_count) = doctest.testfile(
os.path.join("..", py_fdn, "doc", filename),
module_relative=False)
os.chdir("..")
if failure_count == 0:
print "All %d tests passed." % test_count
return True
else:
print "%d of %d tests failed." % (failure_count, test_count)
return False
ok = True
if len(sys.argv) == 1:
pathname = os.path.abspath(os.path.dirname(sys.argv[0]))
rst_glob = os.path.join(pathname, '..', 'doc', '*.rst')
print('RST files found in glob ', rst_glob+':', glob.glob(rst_glob))
for fn in glob.glob(rst_glob):
ok &= test_rst_file(os.path.basename(fn))
print
if not ok:
print "Not all tests passed."
exit(1)
elif len(sys.argv) == 2:
test_rst_file(sys.argv[1])
else:
print "Wrong usage"
print "Call without arguments to run all doctest, or with the (base) name"
print "of one rst file from the doc directory to run doctest on it."
|
simon-anders/htseq
|
python2/test/test.py
|
Python
|
gpl-3.0
| 1,384
|
[
"HTSeq"
] |
e41d1eabc5c073141ae8b784cfc28b1568a1c8a30342d40b6537123da3cbc458
|
#!/usr/bin/python
"""
A simple routine to load in a LIGGGHTS hybrid dump file containing
contact and contact force data and convert into a .vtk unstructured
grid which can be used to visualise the force network. This routine
also writes the length of the connection between particles, in order
to be able to filter out incorrect connections (produced by the
"deform" fix)
This routine is based on Mark Bentley's dump2force (Space Research Institute,
Austrian Academy of Sciences, mark.bentley@oeaw.ac.at)
contributing author: Stefan Radl, TU Graz (radl@tugraz.at)
evtk is used to write binary VTK files:
https://bitbucket.org/pauloh/pyevtk
The pizza.py bdump command is used to handle LIGGGHTS dump files and
therefore PYTHONPATH must include the pizza/src location.
NOTE: bdump is NOT included in granular pizza, and should be taken
from the standard LAMMPS pizza package!
NOTE: it is impossible to tell from the bdump header which values
have been requested in the compute, so check that your compute
and dump match the format here - this will be checked in future!
"""
from evtk.vtk import VtkFile, VtkGroup, VtkUnstructuredGrid
from bdump import bdump
import numpy as np
import sys, os
# TODO: use a try/except here to check for missing modules, and fallback to ASCII VTK if evtk not found
# TODO: ask for timestep or timestep range as input (code is NOT efficient and large files = long runtimes!)
# TODO: write celldata for contact area and heat flux (if present)
# Check for command line arguments
if len(sys.argv) != 2:
sys.exit('Usage: dump2force.py <filename>, where filename is a SINGLE filename; typically dump.<runname>')
elif len(sys.argv) == 2: # we have one input param, that should be parsed as a filename
filename = str(sys.argv[1])
if not os.path.isfile(filename):
sys.exit('File ' + filename + ' does not exist!')
splitname = filename.split('.')
if len(splitname) == 2 and splitname[0].lower() == 'dump':
fileprefix = splitname[1]
else:
fileprefix = splitname[0]
inputpath = os.path.abspath(filename)
inputdir = os.path.split(inputpath)[0]
# create a sub-directory for the output .vtu files
outputdir = os.path.join(inputdir,fileprefix)
try:
os.mkdir(outputdir)
except:
pass
# Read in the dump file - since we can have many contacts (i.e. >> nparticles)
# and many timesteps I will deal with one timestep at a time in memory,
# write to the appropriate .vtu file for a single timestep, then move on.
forcedata = bdump(filename,0)
groupfile = fileprefix
groupfile = os.path.join(inputdir,groupfile)
groupfile = VtkGroup(groupfile)
fileindex = 0
timestep = forcedata.next()
# check that we have the right number of colums (>11)
#
# NOTE: the first timesteps are often blank, and then natoms returns 0, so this doesn't really work...
#
if forcedata.snaps[fileindex].natoms !=0 and len(forcedata.snaps[0].atoms[0]) < 12:
print "Error - dump file requires at least all parameters from a compute pair/gran/local id pos force (12 in total)"
sys.exit()
# loop through available timesteps
#
while timestep >= 0:
# default data are stored as pos1 (3) pos2 (3) id1 id2 periodic_flag force (3) -> 12 columns
#
# if contactArea is enabled, that's one more (13) and heatflux (14)
#
# assign names to atom columns (1-N)
forcedata.map(1,"x1",2,"y1",3,"z1",4,"x2",5,"y2",6,"z2",7,"id1",8,"id2",9,"periodic",10,"fx",11,"fy",12,"fz")
# forcedata.map(1,"x1",2,"y1",3,"z1",4,"x2",5,"y2",6,"z2",7,"id1",8,"id2",9,"periodic",10,"fx",11,"fy",12,"fz",13,"area",14,"heatflux")
# check for contact data (some timesteps may have no particles in contact)
#
# NB. if one loads two datasets into ParaView with defined timesteps, but in which
# one datasets has some missing, data for the previous timestep are still displayed -
# this means that it is better here to generate "empty" files for these timesteps.
if forcedata.snaps[fileindex].natoms == 0:
vtufile = fileprefix+'_'+str(timestep)+'.vtu'
vtufile = os.path.join(outputdir,vtufile)
vtuwrite = file(vtufile,'w')
vtuwrite.write("""<?xml version="1.0"?>
<VTKFile byte_order="LittleEndian" version="0.1" type="UnstructuredGrid">
<UnstructuredGrid>
<Piece NumberOfCells="0" NumberOfPoints="0">
<Cells>
<DataArray NumberOfComponents="1" offset="0" type="Int64" Name="connectivity"/>
<DataArray NumberOfComponents="1" offset="0" type="Int64" Name="offsets"/>
<DataArray NumberOfComponents="1" offset="0" type="Int64" Name="types"/>
</Cells>
</Piece>
</UnstructuredGrid>
</VTKFile>""")
else:
# ******************************************
# Cell and connection lists
# ******************************************
# number of cells = number of interactions (i.e. entries in the dump file, includes periodic connections!)
ncells = len(forcedata.snaps[fileindex].atoms)
# number of periodic interactions
periodic = np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["periodic"]],dtype=bool)
nperiodic = sum(periodic)
# number of non-periodic interactions (which will be written out)
nconnex = ncells - nperiodic
# extract the IDs as an array of integers
id1 = np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["id1"]],dtype=long)
id2 = np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["id2"]],dtype=long)
# and convert to lists
id1 = id1.tolist()
id2 = id2.tolist()
# concatenate into a single list
ids = []
ids = id1[:]
ids.extend(id2)
# convert to a set and back to remove duplicates, then sort
ids = list(set(ids))
ids.sort()
# number of points = number of unique IDs (particles)
npoints = len(ids)
print 'Timestep:',str(timestep),'npoints=',str(npoints),'ncells=',str(ncells),'nperiodic=',nperiodic, 'nconnex=',str(nconnex)
# ******************************************
# Cell Data
# ******************************************
# extract the length of each connection (for all cells, including periodic)
connectionLength = \
(np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["x1"]],dtype=np.float64) \
-np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["x2"]],dtype=np.float64))**2 \
+ \
(np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["y1"]],dtype=np.float64) \
-np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["y2"]],dtype=np.float64))**2 \
+ \
(np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["z1"]],dtype=np.float64) \
-np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["z2"]],dtype=np.float64))**2
connectionLength = np.sqrt(connectionLength)
# extract the length of the force
force = np.sqrt( np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["fx"]],dtype=np.float64)**2 + \
np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["fy"]],dtype=np.float64)**2 + \
np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["fz"]],dtype=np.float64)**2 )
#create cleaned list of valid data for each connection
forceClean = np.zeros( nconnex, dtype=np.float64 )
connectionLengthClean = np.zeros( nconnex, dtype=np.float64 )
iConnex=0
for pair in range(ncells):
if np.invert(periodic[pair]):
connectionLengthClean[iConnex] = connectionLength[pair]
forceClean[iConnex] = force[pair]
iConnex+=1
# And, optionally, contact area and heat flux (using the same connectivity)
# area = np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["area"]],dtype=np.float64)
# heatflux = np.array(forcedata.snaps[fileindex].atoms[:,forcedata.names["heatflux"]],dtype=np.float64)
# Now we have enough data to create the file:
# Points - (x,y,z) (npoints)
# Cells
# Connectivity - connections (nconnex,2)
# Offset - offset (nconnex)
# type - celltype (nconnex)
# Celldata
# force (nconnex)
# area (nconnex)
# heatflux (nconnex)
# ******************************************
# Point data = location of each unique particle & Connectivity List
# ******************************************
# The order of this data is important since we use the position of each particle
# in this list to reference particle connectivity! We will use the order of the
# sorted ids array to determine this.
# create empty arrays to hold x,y,z data
x = np.zeros( npoints, dtype=np.float64)
y = np.zeros( npoints, dtype=np.float64)
z = np.zeros( npoints, dtype=np.float64)
counter = 0
for id in ids:
if id in id1:
index = id1.index(id)
xtemp,ytemp,ztemp = forcedata.snaps[fileindex].atoms[index,forcedata.names["x1"]], \
forcedata.snaps[fileindex].atoms[index,forcedata.names["y1"]], \
forcedata.snaps[fileindex].atoms[index,forcedata.names["z1"]]
else:
index = id2.index(id)
xtemp,ytemp,ztemp = forcedata.snaps[fileindex].atoms[index,forcedata.names["x2"]], \
forcedata.snaps[fileindex].atoms[index,forcedata.names["y2"]], \
forcedata.snaps[fileindex].atoms[index,forcedata.names["z2"]]
x[counter]=xtemp
y[counter]=ytemp
z[counter]=ztemp
counter += 1
# Now create the connectivity list - this corresponds to pairs of IDs, but referencing
# the order of the ids array, so now we loop through 0..ncells and have to connect
# id1 and id2, so I need to see where in ids these correspond to
# If the periodic flag is set for a given interactions, DO NOT connect the points
# (to avoid lines that cross the simulation domain)
# Mask out periodic interactions from the cell (connectivity) array
# newList = [word for (word, mask) in zip(s,b) if mask]
id1_masked = [ident for (ident,mask) in zip(id1,np.invert(periodic)) if mask]
id2_masked = [ident for (ident,mask) in zip(id2,np.invert(periodic)) if mask]
# create an empty array to hold particle pairs and particle connectionLength
connections = np.zeros( 2*nconnex, dtype=int )
for pair in range(nconnex):
connections[2*pair],connections[2*pair+1] = ids.index(id1_masked[pair]),ids.index(id2_masked[pair])
# The offset array is simply generated from 2*(1..ncells)
offset=(np.arange(nconnex,dtype=int)+1)*2
# The type array is simply ncells x 3 (i.e. a VTKLine type)
celltype = np.ones(nconnex,dtype=int)*3
# ******************************************
# Write DATA to FILE (binary)
# ******************************************
# create a VTK unstructured grid (.vtu) file
vtufile = fileprefix+'_'+str(timestep)
vtufile = os.path.join(outputdir,vtufile)
w = VtkFile(vtufile, VtkUnstructuredGrid)
vtufile += '.vtu'
w.openGrid()
w.openPiece(npoints=npoints, ncells=nconnex)
# Set up Points (x,y,z) data XML
w.openElement("Points")
w.addData("points", (x,y,z) )
w.closeElement("Points")
# Set up Cell data
w.openElement("Cells")
w.addData("connectivity", connections )
w.addData("offsets", offset)
w.addData("types", celltype)
w.closeElement("Cells")
# Set up force data
w.openData("Cell")
w.addData("force", forceClean)
# w.addData("area", area)
# w.addData("heatflux", heatflux)
# w.closeData("Cell")
# Set up connectionLength data
# w.openData("Cell")
w.addData("connectionLength", connectionLengthClean)
w.closeData("Cell")
# and contact area
# w.openData("Cell")
# w.addData("area", area)
# w.closeData("Cell")
# and heat flux
# w.openData("Cell")
# w.addData("heatflux", heatflux)
# w.closeData("Cell")
# Wrap up
w.closePiece()
w.closeGrid()
# Append binary data
w.appendData( (x,y,z) )
w.appendData(connections).appendData(offset).appendData(celltype)
w.appendData(forceClean).appendData(connectionLengthClean)
w.save()
# Add this file to the group of all timesteps
groupfile.addFile(filepath = os.path.relpath(vtufile,inputdir), sim_time = timestep)
fileindex += 1
timestep = forcedata.next()
# end of main loop - close group file
groupfile.save()
|
CFDEMproject/LPP
|
src/dump2force.py
|
Python
|
gpl-2.0
| 13,376
|
[
"LAMMPS",
"ParaView",
"VTK"
] |
f71a993bbafd9feca52d68e353f1b5d076566577712f8183fda5b39588df2947
|
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
import enchant
from enchant.checker import SpellChecker
from pypln.backend.celery_task import PyPLNTask
class SpellingChecker(PyPLNTask):
"""
This worker performs spellchecking in the plain text of a document
"""
def __init__(self):
# This method is only called once per process, but that is no problem
# since the enchant languange list should not change. Don't use this
# method for anything that should run every time the task is called.
# See http://docs.celeryproject.org/en/latest/userguide/tasks.html#instantiation
# for more information.
self.checkers = {lang: SpellChecker(lang) for lang in enchant.list_languages()}
def process(self, document):
#TODO: this worker may be enhanced by also checking the errors against an specific vocabulary supplied with the document
try:
checker = self.checkers[document['language']]
checker.set_text(document['text'])
errors = [[e.word, e.wordpos, e.suggest()] for e in checker]
except KeyError:
errors = None
return {'spelling_errors': errors}
|
NAMD/pypln.backend
|
pypln/backend/workers/spellchecker.py
|
Python
|
gpl-3.0
| 1,891
|
[
"NAMD"
] |
3869b400dfccb0bbb8ca3ca2d8294e239f151e82f009fd15add58bdc30af2103
|
#!/usr/bin/env python3
import itertools
from collections import defaultdict
import logging
from operator import mul
import networkx as nx
import numpy as np
import pandas as pd
from pgmpy.base import DirectedGraph
from pgmpy.factors.discrete import TabularCPD, JointProbabilityDistribution, DiscreteFactor
from pgmpy.independencies import Independencies
from pgmpy.extern import six
from pgmpy.extern.six.moves import range, reduce
from pgmpy.models.MarkovModel import MarkovModel
class BayesianModel(DirectedGraph):
"""
Base class for bayesian model.
A models stores nodes and edges with conditional probability
distribution (cpd) and other attributes.
models hold directed edges. Self loops are not allowed neither
multiple (parallel) edges.
Nodes should be strings.
Edges are represented as links between nodes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object.
Examples
--------
Create an empty bayesian model with no nodes and no edges.
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node('a')
Add the nodes from any container (a list, set or tuple or the nodes
from another graph).
>>> G.add_nodes_from(['a', 'b'])
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge('a', 'b')
a list of edges,
>>> G.add_edges_from([('a', 'b'), ('b', 'c')])
If some edges connect nodes not yet in the model, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Shortcuts:**
Many common graph features allow python syntax for speed reporting.
>>> 'a' in G # check if node in graph
True
>>> len(G) # number of nodes in graph
3
"""
def __init__(self, ebunch=None):
super(BayesianModel, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.cpds = []
self.cardinalities = defaultdict(int)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph
Parameters
----------
u,v : nodes
Nodes can be any hashable python object.
Examples
--------
>>> from pgmpy.models import BayesianModel/home/abinash/software_packages/numpy-1.7.1
>>> G = BayesianModel()
>>> G.add_nodes_from(['grade', 'intel'])
>>> G.add_edge('grade', 'intel')
"""
if u == v:
raise ValueError('Self loops are not allowed.')
if u in self.nodes() and v in self.nodes() and nx.has_path(self, v, u):
raise ValueError(
'Loops are not allowed. Adding the edge from (%s->%s) forms a loop.' % (u, v))
else:
super(BayesianModel, self).add_edge(u, v, **kwargs)
def remove_node(self, node):
"""
Remove node from the model.
Removing a node also removes all the associated edges, removes the CPD
of the node and marginalizes the CPDs of it's children.
Parameters
----------
node : node
Node which is to be removed from the model.
Returns
-------
None
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.models import BayesianModel
>>> model = BayesianModel([('A', 'B'), ('B', 'C'),
... ('A', 'D'), ('D', 'C')])
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)),
... columns=['A', 'B', 'C', 'D'])
>>> model.fit(values)
>>> model.get_cpds()
[<TabularCPD representing P(A:2) at 0x7f28248e2438>,
<TabularCPD representing P(B:2 | A:2) at 0x7f28248e23c8>,
<TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>,
<TabularCPD representing P(D:2 | A:2) at 0x7f28248e26a0>]
>>> model.remove_node('A')
>>> model.get_cpds()
[<TabularCPD representing P(B:2) at 0x7f28248e23c8>,
<TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>,
<TabularCPD representing P(D:2) at 0x7f28248e26a0>]
"""
affected_nodes = [v for u, v in self.edges() if u == node]
for affected_node in affected_nodes:
node_cpd = self.get_cpds(node=affected_node)
if node_cpd:
node_cpd.marginalize([node], inplace=True)
if self.get_cpds(node=node):
self.remove_cpds(node)
super(BayesianModel, self).remove_node(node)
def remove_nodes_from(self, nodes):
"""
Remove multiple nodes from the model.
Removing a node also removes all the associated edges, removes the CPD
of the node and marginalizes the CPDs of it's children.
Parameters
----------
nodes : list, set (iterable)
Nodes which are to be removed from the model.
Returns
-------
None
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.models import BayesianModel
>>> model = BayesianModel([('A', 'B'), ('B', 'C'),
... ('A', 'D'), ('D', 'C')])
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 4)),
... columns=['A', 'B', 'C', 'D'])
>>> model.fit(values)
>>> model.get_cpds()
[<TabularCPD representing P(A:2) at 0x7f28248e2438>,
<TabularCPD representing P(B:2 | A:2) at 0x7f28248e23c8>,
<TabularCPD representing P(C:2 | B:2, D:2) at 0x7f28248e2748>,
<TabularCPD representing P(D:2 | A:2) at 0x7f28248e26a0>]
>>> model.remove_nodes_from(['A', 'B'])
>>> model.get_cpds()
[<TabularCPD representing P(C:2 | D:2) at 0x7f28248e2a58>,
<TabularCPD representing P(D:2) at 0x7f28248e26d8>]
"""
for node in nodes:
self.remove_node(node)
def add_cpds(self, *cpds):
"""
Add CPD (Conditional Probability Distribution) to the Bayesian Model.
Parameters
----------
cpds : list, set, tuple (array-like)
List of CPDs which will be associated with the model
EXAMPLE
-------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete.CPD import TabularCPD
>>> student = BayesianModel([('diff', 'grades'), ('intel', 'grades')])
>>> grades_cpd = TabularCPD('grades', 3, [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'], evidence_card=[2, 3])
>>> student.add_cpds(grades_cpd)
+------+-----------------------+---------------------+
|diff: | easy | hard |
+------+------+------+---------+------+------+-------+
|intel:| dumb | avg | smart | dumb | avg | smart |
+------+------+------+---------+------+------+-------+
|gradeA| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+------+------+------+---------+------+------+-------+
|gradeB| 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+------+------+------+---------+------+------+-------+
|gradeC| 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
+------+------+------+---------+------+------+-------+
"""
for cpd in cpds:
if not isinstance(cpd, TabularCPD):
raise ValueError('Only TabularCPD can be added.')
if set(cpd.variables) - set(cpd.variables).intersection(
set(self.nodes())):
raise ValueError('CPD defined on variable not in the model', cpd)
for prev_cpd_index in range(len(self.cpds)):
if self.cpds[prev_cpd_index].variable == cpd.variable:
logging.warning("Replacing existing CPD for {var}".format(var=cpd.variable))
self.cpds[prev_cpd_index] = cpd
break
else:
self.cpds.append(cpd)
def get_cpds(self, node=None):
"""
Returns the cpd of the node. If node is not specified returns all the CPDs
that have been added till now to the graph
Parameter
---------
node: any hashable python object (optional)
The node whose CPD we want. If node not specified returns all the
CPDs added to the model.
Returns
-------
A list of TabularCPDs.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd)
>>> student.get_cpds()
"""
if node:
if node not in self.nodes():
raise ValueError('Node not present in the Directed Graph')
for cpd in self.cpds:
if cpd.variable == node:
return cpd
raise ValueError("CPD not added for the node: {node}".format(node=node))
else:
return self.cpds
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds: TabularCPD object
A CPD object on any subset of the variables of the model which
is to be associated with the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> student = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> cpd = TabularCPD('grade', 2, [[0.1, 0.9, 0.2, 0.7],
... [0.9, 0.1, 0.8, 0.3]],
... ['intel', 'diff'], [2, 2])
>>> student.add_cpds(cpd)
>>> student.remove_cpds(cpd)
"""
for cpd in cpds:
if isinstance(cpd, six.string_types):
cpd = self.get_cpds(cpd)
self.cpds.remove(cpd)
def get_cardinality(self, node):
"""
Returns the cardinality of the node. Throws an error if the CPD for the
queried node hasn't been added to the network.
Parameters
----------
node: Any hashable python object.
Returns
-------
int: The cardinality of the node.
"""
return self.get_cpds(node).cardinality[0]
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks are passed
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, TabularCPD):
evidence = cpd.variables[:0:-1]
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with %s doesn't have "
"proper parents associated with it." % node)
if not np.allclose(cpd.to_factor().marginalize([node], inplace=False).values.flatten('C'),
np.ones(np.product(cpd.cardinality[:0:-1])),
atol=0.01):
raise ValueError('Sum of probabilites of states for node %s'
' is not equal to 1.' % node)
return True
def _get_ancestors_of(self, obs_nodes_list):
"""
Returns a dictionary of all ancestors of all the observed nodes including the
node itself.
Parameters
----------
obs_nodes_list: string, list-type
name of all the observed nodes
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> model = BayesianModel([('D', 'G'), ('I', 'G'), ('G', 'L'),
... ('I', 'L')])
>>> model._get_ancestors_of('G')
{'D', 'G', 'I'}
>>> model._get_ancestors_of(['G', 'I'])
{'D', 'G', 'I'}
"""
if not isinstance(obs_nodes_list, (list, tuple)):
obs_nodes_list = [obs_nodes_list]
for node in obs_nodes_list:
if node not in self.nodes():
raise ValueError('Node {s} not in not in graph'.format(s=node))
ancestors_list = set()
nodes_list = set(obs_nodes_list)
while nodes_list:
node = nodes_list.pop()
if node not in ancestors_list:
nodes_list.update(self.predecessors(node))
ancestors_list.add(node)
return ancestors_list
def active_trail_nodes(self, variables, observed=None):
"""
Returns a dictionary with the given variables as keys and all the nodes reachable
from that respective variable as values.
Parameters
----------
variables: str or array like
variables whose active trails are to be found.
observed : List of nodes (optional)
If given the active trails would be computed assuming these nodes to be observed.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')])
>>> student.active_trail_nodes('diff')
{'diff': {'diff', 'grades'}}
>>> student.active_trail_nodes(['diff', 'intel'], observed='grades')
{'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}}
References
----------
Details of the algorithm can be found in 'Probabilistic Graphical Model
Principles and Techniques' - Koller and Friedman
Page 75 Algorithm 3.1
"""
if observed:
observed_list = observed if isinstance(observed, (list, tuple)) else [observed]
else:
observed_list = []
ancestors_list = self._get_ancestors_of(observed_list)
# Direction of flow of information
# up -> from parent to child
# down -> from child to parent
active_trails = {}
for start in variables if isinstance(variables, (list, tuple)) else [variables]:
visit_list = set()
visit_list.add((start, 'up'))
traversed_list = set()
active_nodes = set()
while visit_list:
node, direction = visit_list.pop()
if (node, direction) not in traversed_list:
if node not in observed_list:
active_nodes.add(node)
traversed_list.add((node, direction))
if direction == 'up' and node not in observed_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
for child in self.successors(node):
visit_list.add((child, 'down'))
elif direction == 'down':
if node not in observed_list:
for child in self.successors(node):
visit_list.add((child, 'down'))
if node in ancestors_list:
for parent in self.predecessors(node):
visit_list.add((parent, 'up'))
active_trails[start] = active_nodes
return active_trails
def local_independencies(self, variables):
"""
Returns an instance of Independencies containing the local independencies
of each of the variables.
Parameters
----------
variables: str or array like
variables whose local independencies are to be found.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
>>> ('grade', 'letter'), ('intel', 'SAT')])
>>> ind = student.local_independencies('grade')
>>> ind
(grade _|_ SAT | diff, intel)
"""
def dfs(node):
"""
Returns the descendents of node.
Since Bayesian Networks are acyclic, this is a very simple dfs
which does not remember which nodes it has visited.
"""
descendents = []
visit = [node]
while visit:
n = visit.pop()
neighbors = self.neighbors(n)
visit.extend(neighbors)
descendents.extend(neighbors)
return descendents
independencies = Independencies()
for variable in variables if isinstance(variables, (list, tuple)) else [variables]:
non_descendents = set(self.nodes()) - {variable} - set(dfs(variable))
parents = set(self.get_parents(variable))
if non_descendents - parents:
independencies.add_assertions([variable, non_descendents - parents, parents])
return independencies
def is_active_trail(self, start, end, observed=None):
"""
Returns True if there is any active trail between start and end node
Parameters
----------
start : Graph Node
end : Graph Node
observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed.
additional_observed : List of nodes (optional)
If given the active trail would be computed assuming these nodes to be observed along with
the nodes marked as observed in the model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_nodes_from(['diff', 'intel', 'grades', 'letter', 'sat'])
>>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades'), ('grades', 'letter'),
... ('intel', 'sat')])
>>> student.is_active_trail('diff', 'intel')
False
>>> student.is_active_trail('grades', 'sat')
True
"""
if end in self.active_trail_nodes(start, observed)[start]:
return True
else:
return False
def get_independencies(self, latex=False):
"""
Computes independencies in the Bayesian Network, by checking d-seperation.
Parameters
----------
latex: boolean
If latex=True then latex string of the independence assertion
would be created.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> chain = BayesianModel([('X', 'Y'), ('Y', 'Z')])
>>> chain.get_independencies()
(X _|_ Z | Y)
(Z _|_ X | Y)
"""
independencies = Independencies()
for start in (self.nodes()):
rest = set(self.nodes()) - {start}
for r in range(len(rest)):
for observed in itertools.combinations(rest, r):
d_seperated_variables = rest - set(observed) - set(
self.active_trail_nodes(start, observed=observed)[start])
if d_seperated_variables:
independencies.add_assertions([start, d_seperated_variables, observed])
independencies.reduce()
if not latex:
return independencies
else:
return independencies.latex_string()
def to_markov_model(self):
"""
Converts bayesian model to markov model. The markov model created would
be the moral graph of the bayesian model.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> mm = G.to_markov_model()
>>> mm.nodes()
['diff', 'grade', 'intel', 'SAT', 'letter']
>>> mm.edges()
[('diff', 'intel'), ('diff', 'grade'), ('intel', 'grade'),
('intel', 'SAT'), ('grade', 'letter')]
"""
moral_graph = self.moralize()
mm = MarkovModel(moral_graph.edges())
mm.add_factors(*[cpd.to_factor() for cpd in self.cpds])
return mm
def to_junction_tree(self):
"""
Creates a junction tree (or clique tree) for a given bayesian model.
For converting a Bayesian Model into a Clique tree, first it is converted
into a Markov one.
For a given markov model (H) a junction tree (G) is a graph
1. where each node in G corresponds to a maximal clique in H
2. each sepset in G separates the variables strictly on one side of the
edge to other.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> sat_cpd = TabularCPD('SAT', 2,
... [[0.1, 0.2, 0.7],
... [0.9, 0.8, 0.3]],
... evidence=['intel'], evidence_card=[3])
>>> letter_cpd = TabularCPD('letter', 2,
... [[0.1, 0.4, 0.8],
... [0.9, 0.6, 0.2]],
... evidence=['grade'], evidence_card=[3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd, sat_cpd, letter_cpd)
>>> jt = G.to_junction_tree()
"""
mm = self.to_markov_model()
return mm.to_junction_tree()
def fit(self, data, estimator_type=None, state_names=[], complete_samples_only=True, **kwargs):
"""
Estimates the CPD for each variable based on a given data set.
Parameters
----------
data: pandas DataFrame object
DataFrame object with column names identical to the variable names of the network.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
estimator: Estimator class
One of:
- MaximumLikelihoodEstimator (default)
- BayesianEstimator: In this case, pass 'prior_type' and either 'pseudo_counts'
or 'equivalent_sample_size' as additional keyword arguments.
See `BayesianEstimator.get_parameters()` for usage.
state_names: dict (optional)
A dict indicating, for each variable, the discrete set of states
that the variable can take. If unspecified, the observed values
in the data set are taken to be the only possible states.
complete_samples_only: bool (default `True`)
Specifies how to deal with missing data, if present. If set to `True` all rows
that contain `np.Nan` somewhere are ignored. If `False` then, for each variable,
every row where neither the variable nor its parents are `np.NaN` is used.
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> model.fit(data)
>>> model.get_cpds()
[<TabularCPD representing P(A:2) at 0x7fb98a7d50f0>,
<TabularCPD representing P(B:2) at 0x7fb98a7d5588>,
<TabularCPD representing P(C:2 | A:2, B:2) at 0x7fb98a7b1f98>]
"""
from pgmpy.estimators import MaximumLikelihoodEstimator, BayesianEstimator, BaseEstimator
if estimator_type is None:
estimator_type = MaximumLikelihoodEstimator
else:
if not issubclass(estimator_type, BaseEstimator):
raise TypeError("Estimator object should be a valid pgmpy estimator.")
estimator = estimator_type(self, data, state_names=state_names,
complete_samples_only=complete_samples_only)
cpds_list = estimator.get_parameters(**kwargs)
self.add_cpds(*cpds_list)
def predict(self, data):
"""
Predicts states of all the missing variables.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variables in the model.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(1000, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> train_data = values[:800]
>>> predict_data = values[800:]
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> predict_data = predict_data.copy()
>>> predict_data.drop('E', axis=1, inplace=True)
>>> y_pred = model.predict(predict_data)
>>> y_pred
E
800 0
801 1
802 1
803 1
804 0
... ...
993 0
994 0
995 1
996 1
997 0
998 0
999 0
"""
from pgmpy.inference import VariableElimination
if set(data.columns) == set(self.nodes()):
raise ValueError("No variable missing in data. Nothing to predict")
elif set(data.columns) - set(self.nodes()):
raise ValueError("Data has variables which are not in the model")
missing_variables = set(self.nodes()) - set(data.columns)
pred_values = defaultdict(list)
# Send state_names dict from one of the estimated CPDs to the inference class.
model_inference = VariableElimination(self, state_names=self.get_cpds()[0].state_names)
for index, data_point in data.iterrows():
states_dict = model_inference.map_query(variables=missing_variables, evidence=data_point.to_dict())
for k, v in states_dict.items():
pred_values[k].append(v)
return pd.DataFrame(pred_values, index=data.index)
def predict_probability(self, data):
"""
Predicts probabilities of all states of the missing variables.
Parameters
----------
data : pandas DataFrame object
A DataFrame object with column names same as the variables in the model.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> values = pd.DataFrame(np.random.randint(low=0, high=2, size=(100, 5)),
... columns=['A', 'B', 'C', 'D', 'E'])
>>> train_data = values[:80]
>>> predict_data = values[80:]
>>> model = BayesianModel([('A', 'B'), ('C', 'B'), ('C', 'D'), ('B', 'E')])
>>> model.fit(values)
>>> predict_data = predict_data.copy()
>>> predict_data.drop('B', axis=1, inplace=True)
>>> y_prob = model.predict_probability(predict_data)
>>> y_prob
B_0 B_1
80 0.439178 0.560822
81 0.581970 0.418030
82 0.488275 0.511725
83 0.581970 0.418030
84 0.510794 0.489206
85 0.439178 0.560822
86 0.439178 0.560822
87 0.417124 0.582876
88 0.407978 0.592022
89 0.429905 0.570095
90 0.581970 0.418030
91 0.407978 0.592022
92 0.429905 0.570095
93 0.429905 0.570095
94 0.439178 0.560822
95 0.407978 0.592022
96 0.559904 0.440096
97 0.417124 0.582876
98 0.488275 0.511725
99 0.407978 0.592022
"""
from pgmpy.inference import VariableElimination
if set(data.columns) == set(self.nodes()):
raise ValueError("No variable missing in data. Nothing to predict")
elif set(data.columns) - set(self.nodes()):
raise ValueError("Data has variables which are not in the model")
missing_variables = set(self.nodes()) - set(data.columns)
pred_values = defaultdict(list)
model_inference = VariableElimination(self)
for index, data_point in data.iterrows():
states_dict = model_inference.query(variables=missing_variables, evidence=data_point.to_dict())
for k, v in states_dict.items():
for l in range(len(v.values)):
state = self.get_cpds(k).state_names[k][l]
pred_values[k + '_' + str(state)].append(v.values[l])
return pd.DataFrame(pred_values, index=data.index)
def get_factorized_product(self, latex=False):
# TODO: refer to IMap class for explanation why this is not implemented.
pass
def get_immoralities(self):
"""
Finds all the immoralities in the model
A v-structure X -> Z <- Y is an immorality if there is no direct edge between X and Y .
Returns
-------
set: A set of all the immoralities in the model
Examples
---------
>>> from pgmpy.models import BayesianModel
>>> student = BayesianModel()
>>> student.add_edges_from([('diff', 'grade'), ('intel', 'grade'),
... ('intel', 'SAT'), ('grade', 'letter')])
>>> student.get_immoralities()
{('diff','intel')}
"""
immoralities = set()
for node in self.nodes():
for parents in itertools.combinations(self.predecessors(node), 2):
if not self.has_edge(parents[0], parents[1]) and not self.has_edge(parents[1], parents[0]):
immoralities.add(tuple(sorted(parents)))
return immoralities
def is_iequivalent(self, model):
"""
Checks whether the given model is I-equivalent
Two graphs G1 and G2 are said to be I-equivalent if they have same skeleton
and have same set of immoralities.
Note: For same skeleton different names of nodes can work but for immoralities
names of nodes must be same
Parameters
----------
model : A Bayesian model object, for which you want to check I-equivalence
Returns
--------
boolean : True if both are I-equivalent, False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> G = BayesianModel()
>>> G.add_edges_from([('V', 'W'), ('W', 'X'),
... ('X', 'Y'), ('Z', 'Y')])
>>> G1 = BayesianModel()
>>> G1.add_edges_from([('W', 'V'), ('X', 'W'),
... ('X', 'Y'), ('Z', 'Y')])
>>> G.is_iequivalent(G1)
True
"""
if not isinstance(model, BayesianModel):
raise TypeError('model must be an instance of Bayesian Model')
skeleton = nx.algorithms.isomorphism.GraphMatcher(self.to_undirected(), model.to_undirected())
if skeleton.is_isomorphic() and self.get_immoralities() == model.get_immoralities():
return True
return False
def is_imap(self, JPD):
"""
Checks whether the bayesian model is Imap of given JointProbabilityDistribution
Parameters
-----------
JPD : An instance of JointProbabilityDistribution Class, for which you want to
check the Imap
Returns
--------
boolean : True if bayesian model is Imap for given Joint Probability Distribution
False otherwise
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> from pgmpy.factors.discrete import JointProbabilityDistribution
>>> G = BayesianModel([('diff', 'grade'), ('intel', 'grade')])
>>> diff_cpd = TabularCPD('diff', 2, [[0.2], [0.8]])
>>> intel_cpd = TabularCPD('intel', 3, [[0.5], [0.3], [0.2]])
>>> grade_cpd = TabularCPD('grade', 3,
... [[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'],
... evidence_card=[2, 3])
>>> G.add_cpds(diff_cpd, intel_cpd, grade_cpd)
>>> val = [0.01, 0.01, 0.08, 0.006, 0.006, 0.048, 0.004, 0.004, 0.032,
0.04, 0.04, 0.32, 0.024, 0.024, 0.192, 0.016, 0.016, 0.128]
>>> JPD = JointProbabilityDistribution(['diff', 'intel', 'grade'], [2, 3, 3], val)
>>> G.is_imap(JPD)
True
"""
if not isinstance(JPD, JointProbabilityDistribution):
raise TypeError("JPD must be an instance of JointProbabilityDistribution")
factors = [cpd.to_factor() for cpd in self.get_cpds()]
factor_prod = reduce(mul, factors)
JPD_fact = DiscreteFactor(JPD.variables, JPD.cardinality, JPD.values)
if JPD_fact == factor_prod:
return True
else:
return False
def copy(self):
"""
Returns a copy of the model.
Returns
-------
BayesianModel: Copy of the model on which the method was called.
Examples
--------
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.factors.discrete import TabularCPD
>>> model = BayesianModel([('A', 'B'), ('B', 'C')])
>>> cpd_a = TabularCPD('A', 2, [[0.2], [0.8]])
>>> cpd_b = TabularCPD('B', 2, [[0.3, 0.7], [0.7, 0.3]],
evidence=['A'],
evidence_card=[2])
>>> cpd_c = TabularCPD('C', 2, [[0.1, 0.9], [0.9, 0.1]],
evidence=['B'],
evidence_card=[2])
>>> model.add_cpds(cpd_a, cpd_b, cpd_c)
>>> copy_model = model.copy()
>>> copy_model.nodes()
['C', 'A', 'B']
>>> copy_model.edges()
[('A', 'B'), ('B', 'C')]
>>> copy_model.get_cpds()
[<TabularCPD representing P(A:2) at 0x7f2824930a58>,
<TabularCPD representing P(B:2 | A:2) at 0x7f2824930a90>,
<TabularCPD representing P(C:2 | B:2) at 0x7f2824944240>]
"""
model_copy = BayesianModel()
model_copy.add_nodes_from(self.nodes())
model_copy.add_edges_from(self.edges())
if self.cpds:
model_copy.add_cpds(*[cpd.copy() for cpd in self.cpds])
return model_copy
|
sandeepkrjha/pgmpy
|
pgmpy/models/BayesianModel.py
|
Python
|
mit
| 36,803
|
[
"VisIt"
] |
2f2750e2cf865c64b1d29ece39eb8a3edca24d3b003996ee02a22465a32f5c6f
|
import h5py
import os
#import ipdb
import pdb
import re
import numpy as npexit
from pylayers.antprop.channel import *
from pylayers.util.project import *
import pylayers.util.pyutil as pyu
import ConfigParser
import numpy.linalg as la
from numpy.linalg import svd
from numpy.fft import *
import time
"""
Module to handle scanner data stored and exploited
in hdf5 format.
Author : Mamadou Dialounke BALDE
.. currentmodule:: pylayers.measures.exploith5
.. autosummary::
"""
class Mesh5(PyLayers):
""" Class handling hdf5 measurement files
"""
def __init__(self,_filename='',**kwargs):
"""
"""
if _filename!='':
if '.h5' not in _filename:
_filename = _filename + '.h5'
filename = pyu.getlong(_filename,pstruc['DIRMES'])
if os.path.isfile(filename):
self._filename = _filename
self.filename = filename
fd = h5py.File(self.filename,'r')
lkeys = fd.keys()
lcal = [ c for c in lkeys if 'cal' in c]
lmes = [ m for m in lkeys if 'mes' in m]
self.gcal = {}
for ke in lcal:
num = int(ke.replace('cal',''))
self.gcal[ke] = self.get_dcal(gcal=num)
self.dmes = {}
for ke in lmes:
self.dmes[ke]=''
fd.close
defaults = {
'Nt': 8,
'Nr': 4,
'imes' : 4
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
self.Nr = kwargs.pop('Nr')
self.Nt = kwargs.pop('Nt')
self.imes = kwargs.pop('imes')
def dir(self):
path = pyu.getlong('',pstruc['DIRMES'])
fileh5 = [ f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f)) ]
for k in fileh5:
if '.h5' in k:
print(k)
def __repr__(self):
"""
"""
if "filename" in self.__dict__:
st = ''
st = st + '-------------------------------------\n'
st = st + ' Parameters \n'
st = st + '-------------------------------------\n'
st = st + 'Directory : '+ str(self.filename) +'\n'
st = st + '-------------------------------------\n'
if 'mes' not in self.__dict__:
st = st +'\n No measurement loaded \n'
else:
st = st + 'Index : '+ str(self.ix)+'\n'
st = st + 'Position : '+str(self.pt)+'\n'
if self.mes.calibrated:
st = st+'\n Measurement calibrated : Yes\n'
else:
st = st+'\n Measurement calibrated : No\n'
f = h5py.File(self.filename,'r')
ke = f.keys()
for k in ke:
st = st + k + '\n'
for ja in f[k].attrs.keys():
st = st + ' ' + ja + ' : ' + str(f[k].attrs[ja])+ '\n'
st = st+'\n'
if 'mes' not in k:
try:
for jb in f[k]:
st = st +'\t\t' + jb + '\t'
for jba in f[k][jb].attrs.keys():
st = st + str(f[k][jb].attrs[jba])+ '\t'
st = st+'\n'
try:
for jc in f[k][jb]:
st = st +'\t\t' + jb + '\t' + jc + '\t'
for jca in f[k][jb][jc].attrs.keys():
st = st + str(f[k][jb][jc].attrs[jca])+ '\t'
st = st+'\n'
except:
st = st+'\n'
except:
st = st+'\n'
f.close()
#st = st + 'File : '+ str(self.f) +'\n'
else:
st = 'file not set'
return(st)
def info(self,num=0):
f = h5py.File(self.filename,'r')
ke = f.keys()
if num ==0:
for k in ke:
if 'mes' in k:
print k
print " ",f[k].attrs['time']
print " ",f[k].attrs['comment']
else:
k = 'mes'+str(num)
print k
print " Date of Measurement : ",f[k].attrs['time']
print " Author : ",f[k].attrs['author']
print " Comment : ",f[k].attrs['comment']
print " Moving axe(s) : ",f[k].attrs['axes']
print " Number of positions : ",f[k].attrs['axesn']
print " Transmitting antenna position : ",f[k].attrs['pant']
print " Transmitting antenna orientation : ",f[k].attrs['vant']
print " Group of calibration ",f[k].attrs['gcal']
print " Calibration Index in calibration group ",f[k].attrs['ical']
def get_dcal(self,gcal=1,ical=None):
""" get calibration dictionnary
Parameters
----------
gcal : calibration group
ical : calibration index in group
if == None all calibration are provided in the dict
"""
self.f = h5py.File(self.filename,'r')
k = "cal"+str(gcal)
# pdb.set_trace()
dcal={}
# dcal = dict(zip(self.f[k].attrs.keys(),self.f[k].attrs.values()))
if ical==None:
for ical in self.f[k].keys():
di = dict(zip(self.f[k][ical].attrs.keys(),self.f[k][ical].attrs.values()))
dcal[ical]=di
else:
ical=str(ical)
dcal[ical]=dict(zip(self.f[k][ical].attrs.keys(),self.f[k][ical].attrs.values()))
self.f.close()
return dcal
def readcal(self,gcal=1,ical=1):
""" read calibration files
Parameters
----------
gcal : int
calibration group index
ical : int
calibration index
"""
# calibration dictionnary
self.dcal = self.get_dcal(gcal,ical)
# fmin,fmax,Nf
fminGHz = self.dcal[str(ical)]['fminghz']
fmaxGHz = self.dcal[str(ical)]['fmaxghz']
Nf = self.dcal[str(ical)]['nf']
# get the mimo calibration file
if '_filecalh5' in self.dcal[str(ical)]:
_filecalh5 = self.dcal[str(ical)]['_filecalh5']
# group of calibration mimo
gcalm = self.dcal[str(ical)]['gcalm']
filecalh5 = pyu.getlong(_filecalh5, pstruc['DIRMES'])+'.h5'
self.fmimo = h5py.File(filecalh5,'r')
self.fsiso = h5py.File(self.filename,'r')
km = 'cal'+str(gcalm)
ks = "cal"+str(gcal)
Co = np.array(self.fmimo[km][str(ical)])
Co11 = Co[:,0,0,:]
Cto = Co/Co11[:,None,None,:]
Cn11 = np.array(self.fsiso[ks][str(ical)])
Cn = Cn11*Cto
self.fsiso.close()
else:
self.fmimo = h5py.File(self.filename,'r')
km = 'cal'+str(gcal)
Cn = np.array(self.fmimo[km][str(ical)])
self.fmimo.close()
self.fGHz = np.linspace(fminGHz,fmaxGHz,Nf)
self.cal = Mchannel(x=self.fGHz,y=Cn,filename=self.filename,mes=km,calibrated=False)
def read(self,k,ix=[0,0,0,0],calibrate=True):
""" read measure
Parameters
----------
k : int
index of measure group to read
ix : [ix,iy,iz,ia] list
calibrate : boolean
Notes
-----
For the selected measurement
+ retrieve the corresponding calibration gcal/ical
+ load the corresponding calibration .cal member as a Mchannel
"""
k = 'mes'+str(k)
self.f = h5py.File(self.filename,'r')
gcal = eval(self.f[k].attrs['gcal'].replace('cal',''))
ical = eval(self.f[k].attrs['ical'])
self.author = self.f[k].attrs['author']
self.time = self.f[k].attrs['time']
self.pAnt = self.f[k].attrs['pant']
self.vAnt = self.f[k].attrs['vant']
self.Nr = self.f[k].attrs['nr']
self.Nt = self.f[k].attrs['nt']
# self.power = self.f[k].attrs['power']
self.dmes = dict(zip(self.f[k].attrs.keys(),self.f[k].attrs.values()))
self.f.close()
self.readcal(gcal=gcal,ical=ical)
self.f = h5py.File(self.filename,'r')
self.mes = Mchannel(x=self.fGHz,y=np.array(self.f[k][str(ix[0])][str(ix[1])][str(ix[2])][str(ix[3])]),filename=self.filename,mes=k,calibrated=False)
self.ix = ix
self.pt = self.f[k][str(ix[0])][str(ix[1])][str(ix[2])][str(ix[3])].attrs['pt']
self.pa = self.f[k][str(ix[0])][str(ix[1])][str(ix[2])][str(ix[3])].attrs['pa']
self.pg = self.f[k][str(ix[0])][str(ix[1])][str(ix[2])][str(ix[3])].attrs['pg']
if calibrate:
self.caluncal()
def extract(self,lx='[:]',ly='[:]'):
""" extract channel matrix from h5file
lx : string
frequency string extraction
ly : string
nm,nr,nt string extraction
"""
# paste ix and iy
lystart = re.sub(']$','',ly)
lxend = re.sub('^\[','',lx)
ly = lystart +','+ lxend
if ly == '[:,:]':
return(self.mes)
else:
x = eval('self.mes.x'+lx)
if x.ndim==0:
x = np.array([x])
y = pyu.npextract(self.mes.y,ly)
M = Mchannel(x=x,y=y,calibrated=self.mes.calibrated)
return(M)
def caluncal(self):
""" calibrate or uncalibrate the current loaded measurement
"""
if 'mes' in self.__dict__:
if self.mes.calibrated==False:
self.mes.y = self.mes.y/np.mean(self.cal.y,axis=0)
self.mes.calibrated=True
else:
self.mes.y = self.mes.y*np.mean(self.cal.y,axis=0)
self.mes.calibrated=False
else:
print "read data first"
def open(self,mode='r'):
"""
Parameters
----------
mode : string
"""
# ipdb.set_trace()
filename = pyu.getlong(self._filename,pstruc['DIRMES'])
self.f = h5py.File(filename,mode)
def close(self):
""" close h5 file
"""
self.f.close()
def saveini(self,ical=1,_fileini='vna_config.ini'):
""" save calibration parameters in .ini file
Parameters
----------
_fileini : string
calibration ini file
ical : int
calibration number
"""
dcal = self.dcal[str(ical)]
config = ConfigParser.ConfigParser()
# stimulus section
config.add_section("stimulus")
config.set("stimulus",'fminghz',dcal['fminghz'])
config.set("stimulus",'fmaxghz',dcal['fmaxghz'])
config.set("stimulus",'nf',dcal['nf'])
config.set("stimulus",'power',dcal['power'])
# response section
config.add_section("response")
config.set("response",'param',dcal['param'])
config.set("response",'average','on')
config.set("response",'navrg',dcal['navrg'])
config.set("response",'ifbhz',dcal['ifbhz'])
config.set("response",'win',1)
fileini = pyu.getlong(_fileini,pstruc['DIRMES'])
fd = open(fileini,"w")
config.write(fd)
fd.close()
def aggregate(self,lmes=[],lix=[],lx='[:]',ly='[:]',axis=2,calibrate=False):
"""
Parameters
----------
lmes : list of int
list of of group of measure to aggregate
lix : list of list
for each measure the list of selected scanner axes (x,y,z,a)
lx : string
frequency extraction string
ly : string
MIMO channel extraction string
axis : int
axis of concatenation
Returns
-------
MS : Mchannel
"""
assert (len(lmes)==len(lix))
for k,m in enumerate(lmes):
self.read(m,ix=lix[k],calibrate=calibrate)
M = self.extract(lx=lx,ly=ly) # Nm x Nt x Nr x Nf
try:
y = np.concatenate((y,M.y),axis=axis)
except:
y = M.y
MS = Mchannel(x=M.x,y=y,calibrated=calibrate)
return(MS)
def magre(self, ipos = 0, iNr = 0, npos = 5, loop = True):
""" enable to aggregate the channel matrices
Parameters
----------
ipos : int
number of the ith position
iNr : int
number of the receiver antenna. Note that each of the number of the antenna
can be see as a user
npos : int
number of spatial position of the scanner
loop : boolean
if True
"""
if loop:
for pos in range(npos):
# Mes 1 : h1 u1
self.read(1,ix=[pos,0,0,0])
H11=self.mes.y
# Mes 2 : h2 u1
self.read(2,ix=[pos,0,0,0])
H21=self.mes.y
# Mes 3 : h3 u1
self.read(3,ix=[pos,0,0,0,0])
H31=self.mes.y
# Mes 4 : h4 u1
self.read(4,ix=[pos,0,0,0,0])
H41=self.mes.y
# Mes 5 : h1 u2
self.read(5,ix=[pos,0,0,0,0])
H12=self.mes.y
# Mes 6 : h2 u2
self.read(6,ix=[pos,0,0,0,0])
H22=self.mes.y
# Mes 7 : h3 u2
self.read(7,ix=[pos,0,0,0,0])
H32=self.mes.y
# Mes 8 : h4 u2
self.read(8,ix=[pos,0,0,0,0])
H42=self.mes.y
# Mes 9 : h1 u3
self.read(9,ix=[pos,0,0,0,0])
H13=self.mes.y
# Mes 10 : h2 u3
self.read(10,ix=[pos,0,0,0,0])
H23=self.mes.y
# Mes 11 : h3 u3
self.read(11,ix=[pos,0,0,0,0])
H33=self.mes.y
# Mes 12 : h4 u3
self.read(12,ix=[pos,0,0,0,0])
H43=self.mes.y # (1, Nr, Nt, Nf)
# fGHz = self.mes.x
u1 = np.concatenate((H11[:,:,:,:],H21[:,:,:,:],H31[:,:,:,:],H41[:,:,:,:]),axis=2) # (1, Nr, Ntagr, Nf)
u2 = np.concatenate((H12[:,:,:,:],H22[:,:,:,:],H32[:,:,:,:],H42[:,:,:,:]),axis=2)
u3 = np.concatenate((H13[:,:,:,:],H23[:,:,:,:],H33[:,:,:,:],H43[:,:,:,:]),axis=2)
try:
U1 = np.concatenate((U1,u1),axis=0) # (npos,Nr,Ntagr,Nf)
U2 = np.concatenate((U2,u2),axis=0)
U3 = np.concatenate((U3,u3),axis=0)
except:
U1 = u1
U2 = u2
U3 = u3
return(U1,U2,U3)
# first virtualization of Tx
for i in range(self.imes):
i1 = 1 + i
self.read(i1,ix=[ipos,0,0,0,0])
h = self.mes.y[0,iNr,:,:] # (8, 801)
# ipdb.set_trace()
try:
h1 = np.concatenate((h1,h),axis=0) # Nt*imes x Nf
except:
h1 = h
# return h1
# second virtualization of Tx
for i in range(self.imes):
i2 = 5 + i
self.read(i2,ix=[ipos,0,0,0,0])
h = self.mes.y[0,iNr,:,:] # (8, 801)
try:
h2 = np.concatenate((h2,h),axis=0) # Nt*imes x Nf
except:
h2 = h
# return h2
# third virtualization of Tx
for i in range(self.imes):
i3 = 9 + i
self.read(i3,ix=[ipos,0,0,0,0])
h = self.mes.y[0,iNr,:,:] # (8, 801)
try:
h3 = np.concatenate((h3,h),axis=0) # Nt*imes x Nf
except:
h3 = h
# return h3
Hagr = np.concatenate((h1[None,:,:],h2[None,:,:],h3[None,:,:]),axis=0) # (3,Nt*imes,Nf)
return(Hagr)
def eigenvalues(self,h = np.empty(((5,4,32,801)),dtype=complex)):
"""
Examples
--------
>>> U1,U2,U3 = M.magre(loop=True) # 5 x 4 x 32 x Nf
>>> S1 = M.eigenvalues(h=U1) # 5 x 801 x 4
>>> S2 = M.eigenvalues(h=U2)
>>> S3 = M.eigenvalues(h=U3)
"""
u1 = np.swapaxes(h,0,2) # Ntagr x Nr x npos x Nf
h = u1 # Ntagr x Nr x npos x Nf
hT = np.conj(np.swapaxes(h,0,1)) # Nr x Ntagr x npos x Nf
hhT = np.einsum('ijlm,kilm->jklm',h,hT) # Nr x Nr x npos x Nf
hhT1 = hhT.swapaxes(0,2) # npos x Nr x Nr x Nf
hhTb = hhT1.swapaxes(1,3) # npos x Nf x Nr x Nr
U,S,V = svd(hhTb) # npos x Nf x Nr
return(S)
def singvalues(self,h = np.empty(((5,4,32,801)),dtype=complex)):
"""
Examples
--------
>>> U1,U2,U3 = M.magre(loop=True) # 5 x 4 x 32 x Nf
>>> S1 = M.eigenvalues(h=U1) # 5 x 801 x 4
>>> S2 = M.eigenvalues(h=U2)
>>> S3 = M.eigenvalues(h=U3)
"""
h13 = np.swapaxes(h,1,3) # npos x Nf x Ntagr x Nr
h23 = np.swapaxes(h13,2,3) # npos x Nf x Nr x Ntagr
U,S,V= svd(h23) # npos x Nf x Nr
return(S)
def chancor(self,h = np.empty((2,32),dtype=complex)):
""" calculate the channel correlation
Examples
--------
>>> M = Mesh5('mesMIMO')
>>> Hmes = M.magre(loop=False)
>>> hiid = M.chaniid()
>>> yiid,delta2iid = M.chancor(Hf=hiid)
>>> hmeas = np.sum(Hmes[0:2,:,:],axis=2) # sum over frequency and u1 and u2 are chosen
>>> ymeas,delta2 = M.chancor(Hf=hmeas)
"""
# choose u1 and u2 : Hmes[0:2,:,:] (2, Ntagr, Nf)
# choose u1 and u3 : Hmes[::2,:,:] (2, Ntagr, Nf)
# choose u2 and u3 : Hmes[1:,:,:] (2, Ntagr, Nf)
Mt = self.Nt*self.imes
y = np.empty((Mt),dtype=complex)
delta2 = np.empty((Mt),dtype=complex)
for im in range(Mt):
H = h[:,0:im]
Hh = np.conj(H.T)
G = np.dot(H,Hh)
Gn = G/(np.trace(G)/2.)
# channel corellation
d_2 = np.abs(Gn[0][1]**2)
denum = np.prod(np.diag(Gn))
yk = d_2/denum
y[im]=yk
delta2[im]=y[im]*denum
return(y,delta2) # (Mt,)
def chaniid(self):
"""
Examples
--------
>>> M = Mesh5('mesMIMO')
>>> hiid = M.chaniid()
"""
K = 2
Mt = self.Nt * self.imes
for im in range(Mt):
im = im + 1
Hiid = np.empty((K*1,im),dtype=complex)
for k in range(K):
Re = np.random.randn(1,im) # K x Mt
Im = np.random.randn(1,im) # K x Mt
Hk = Re +1j*Im # K x Mt
Hiid[k,:] = Hk
return(Hiid)
def irplot(self,y=np.empty((5,4,32,801),dtype=complex),npts = 100000, Nz = 0, beta = 8.6):
""" plot the impulse response of the channel U (contained the users).
U : 3 x Ntagr x Nf
beta : kaiser window shape
0 : rectangular
5 : similar to a Hamming
6 : similar to a Hanning
8.6 : similar to a blackman
Examples
--------
>>> M = Mesh5('mesMIMO')
>>> U1,U2,U3 = M.magre(loop=True)
>>> M.irplot(y=U1)
"""
U = y
K = U.shape[1]
fGHz=np.linspace(1.8,2.2,801)
fmin = fGHz[0]
fmax = fGHz[-1]
Nf = len(fGHz)
df = (fmax-fmin)/(Nf-1)
Mt = self.Nt*self.imes
win = np.kaiser(Nf,beta)
fmaxbis = fmax+Nz*df
dfbis = (fmaxbis-fmin)/(Nf+Nz-1)
Tobs = 1/dfbis
tau = np.linspace(-Tobs/2,Tobs/2,Nf+Nz)
for i in range(5):
for r in range(K):
for t in range(Mt):
irU = np.fft.fft(np.conj(np.hstack((U[i,r,t,:]*win,np.zeros(Nz)))),axis=0)
IRU =fftshift(irU)
plt.plot(tau,10*np.log10(np.abs(IRU)),linewidth=1)
plt.title('Impulse Response while axis X moving along 70cm')
plt.xlabel(r'Delay $\tau$(ns)')
plt.ylabel('Amplitude (dB)')
plt.xlim(0,500)
plt.grid()
def su_lund(self,g,rho):
""" handles the performance in terms of SU
if g --> 1 means decorellation between channels
"""
Mt = self.Nt*self.imes
C_su = np.empty(((Mt)),dtype=complex)
for im in range(Mt):
C_su[im] = np.log(1 + rho*(1 + g))/np.log(2) # shape : Nt
return(C_su)
def zf_lund(self,g,rho,ipos,iNr,delta2,Ntrial=500):
""" handles the performance in terms of ZF
if g --> 1 means decorellation between channels
Examples
--------
>>> M = Mesh5('mesMIMO')
>>> Hmes = M.magre(loop=False)
>>> hmeas = Hmes[0:2,:,:] # sum over frequency and u1 and u2 are chosen
>>> y,delta2 = M.chancor(h=np.sum(Hmes[0:2,:,:],axis=2))
>>> czf = M.zf_lund(g=0.3,rho=10.0,ipos = 0,iNr = 0,delta2=delta2,h=hmeas)
"""
K = 2
Mt = self.Nt*self.imes
rho2 = rho**2
g2 = g**2
denum = 1 - g2
treshold = 1 - g2 - (2*g/rho)
C_zf = np.empty((Mt),dtype=complex)
for im in range(Mt):
t = 0
for i in range(Ntrial):
d2 = delta2
if d2[im] <= treshold:
num = (2 + rho * (1 - g2 - d2[im]))**2
denum = 4 * (1 - g2)
c_zf = np.log(num / denum) / np.log(2)
else:
numerat = rho*(1 - g2 - d2[im])
denumerat = 1 - g
c_zf = np.log(1 + (numerat/denumerat))/np.log(2)
t = t + c_zf
C_zf[im] = t / Ntrial # shape : Nt
return(C_zf)
def dpc_lund(self,g,rho,ipos,iNr,delta2):
""" handles the performance in terms of DPC
if g --> 1 means decorellation between channels
"""
K = 2
Mt = self.Nt*self.imes
rho2 = rho**2
g2 = g**2
denum = 1 - g2
treshold = 1 - g2 - (2*g/rho)
C_dpc = np.empty((Mt),dtype=complex)
for im in range(Mt):
d2 = delta2
if d2[im] <= treshold:
num = rho2 * ((1 - g2 - d2[im])**2) + (4 * g2)
denum = 4 * (1 - g2 - d2[im])
c_dpc = np.log(1 + rho + (num / denum)) / np.log(2)
else:
numerat = rho*(1 - g2 - d2[im])
denumerat = 1 - g
c_dpc = np.log(1 + rho*(1 + g))/np.log(2)
C_dpc[im] = c_dpc # shape : Nt
return(C_dpc)
def mmse_lund(self,g,rho,eps=1e-16):
""" handles the performance in terms of ZF, DPC, MMSE and MF
Parameters
----------
eps : is the tolerance level (float)
if g --> 1 means decorellation between channels
"""
K = 2
Mt = 32
rho2 = rho**2
g2 = g**2
denum = 1 - g2
treshold = 1 - g2 - (2*g/rho)
C_mmse = np.empty((Mt))
delta2 = np.empty((Mt))
Hiid = self.iidchan()
Hmes = self.magre()[:,:,10] # we choose two users : (2, 32, 801)
y,delta2 = self.chancor(Hf)
while(P1 - P2 > eps) :
for im in range(Mt):
# Hf = Hmes[:,:,10] # for one specific frequency
Hf = Hmes
d2 = delta2
alpha = K / rho
alpha2 = alpha**2
nter1 = 2 * P1 * g * (alpha2 - 1 + g2 + d2[im])
dter1 = (((1 + g + alpha) * (1 - g - alpha)) - d2[im])**2
nter2 = ((1 + g + alpha)**2) * (1 - g) - (1 + g + 2*alpha) * d2[im]
dter2 = (((1 + g + alpha) * (1 - g - alpha)) - d2[im])**2
ter1 = nter1 / dter2
ter2 = nter2 / dter2
normfac = ter1 + ter2
num1 = rho * P1 * ( ((1 + g)*(1 - g + alpha) - d2[im])**2 )
denum1 = (rho * P2 * alpha2 * d2[im]) + \
(normfac * ((((1 + g + alpha) * (1 - g + alpha)) - d2[im])**2))
num2 = rho * P2 * ( ((1 - g)*(1 + g + alpha) - d2[im])**2 )
denum2 = (rho * P1 * alpha2 * d2[im]) + \
(normfac * ((((1 + g + alpha) * (1 - g + alpha)) - d2[im])**2))
sinr1 = num1 / denum1
sinr2 = num2 / denum2
# C_mmse[im] =
return(C_mmse)
# def montecarlo(self,Ntrial=100, ym = np.empty((32),dtype=complex)):
# """
# """
# ty = 0
# for k in range(Ntrial): # Monte carlo
# ty = ty + ym
# ymont = ty / Ntrial
# return(ymont)
# def plot_perf(self, Ntrial = 100, g = 0.2, rho = 5.0, y = np.empty(((32)),dtype=complex)):
# """ plot performances of the capacity
# """
# Mt = self.Nt * self.imes
# ty = np.zeros((Ntrial,Mt))
# # c = np.zeros((Mt))
# H3 = self.magre(loop=False)
# for k in range(Ntrial):
# y11,delta11 = self.chancormeas(yc = np.mean(H3[0:2,:,:],axis=2))
# ty11 = ty11 + y11
# c = ty11 / Ntrial
# plt.plot(np.arange(32),c)
# plt.title('performances of the sum rates over the corellation : g' +
# ' = ' + str(g) + ' , ' + r'$\rho$' + ' = ' + str(rho))
# plt.xlabel('Number of antennas')
# plt.ylabel('sum rate [bits/s/Hz]')
# plt.legend(loc='best')
# plt.legend()
# plt.grid()
# def plot_chor(self):
# """ plot the channel correlation
# """
# Mt = self.Nt * self.imes
# tymes = np.zeros((801,Mt))
# Hiid = self.iidchan()
# Hmes = self.magre()[0:-1,:,:] # we choose two users : (2, 32, 801)
# for k in range(801): # not good do the monte carlo an another way
# Hf = Hmes[:,:,k]
# y,delta2 = self.chancor(Hf)
# tymes[k,:]=y
# ty_mean = np.mean(Hiid,axis=0)
# tymes_mean = np.mean(tymes,axis=0)
# plt.plot(np.arange(Mt),ty_mean,'b',label='IID Gaussian Channel',linewidth=3)
# plt.plot(np.arange(Mt),tymes_mean,'r',label='Measured Channel',linewidth=3)
# plt.xlabel('Number of base station antennas')
# plt.ylabel('Channel correlation')
# plt.title('Behavior of the channel correlation over base station antennas')
# plt.legend()
# plt.grid()
def plot(self,cmd='mes'):
"""show calibration from vna
Parameters
----------
mes : string
lg : list
cmd : string
mes | cal | ir
"""
#self.read(mes,lg)
plt.ion()
if cmd == 'mes':
if self.mes.calibrated:
self.mes.plot()
plt.suptitle("Measurement Calibrated Channel")
else:
plt.suptitle("Measurement Channel Not Calibrated")
if cmd == 'cal':
self.cal.plot()
plt.suptitle("Calibration transfer function")
if cmd == 'ir':
cir = self.mes.ift(ffts=1)
cir.plot()
plt.suptitle("Impulse Response")
plt.tight_layout()
plt.show()
def eig_val(self):
""" calculate the eigen value of the matrix HdH.
Returns
-------
HdH : U^{\dagger} L U : Hermitian transfer matrix : npos x nt x nt x nf
Ud : Unitary tensor : npos x nf x nt x nt
L : Lambda eigen values : npos x nf x nt
U : Unitary tensor : npos x nf x nt x nt
"""
H = self.matrix() #H : npos x nr x nt x nf
Hd = np.conj(H.swapaxes(1,2)) #Hd : npos x nt x nr x nf
HdH = np.einsum('ijkm,iklm->ijlm',Hd,H) #HdH : npos x nt x nt x nf
HdH = HdH.swapaxes(1,3) #HdH : npos x nt x nt x nf
#Ud : npos x nf x nt x nt
#L : npos x nf x nt
#U : npos x nf x nt x nt
Ud,L,U = la.svd(HdH)
return (HdH,Ud,L,U)
def sing_val(self):
""" calculate the singular values of the matrix H.
Returns
-------
H : U D U^{\dagger} Hermitian transfer matrix : npos x nr x nt x nf
U : Unitary tensor : npos x nf x nt x nt
D : Singular values of the matrix H : npos x nf x nr
Vd : Unitary tensor : npos x nf x nr x nr
"""
H = self.matrix() # H.shape : npos x nr x nt x nf
Hswp = H.swapaxes(1,3)
#U : npos x nf x nt x nt
#D : npos x nf x nr
#Vd : npos x nf x nr x nr
U,D,Vd = la.svd(Hswp)
return (H,U,D,Vd)
def normalize(self):
""" Normalization of channel matrix H
"""
H = self.matrix() #Hnorm.shape : npos x nr x nt x nf
HdH,Ud,L,U = self.eig_val() #HdH : npos x nt x nt x nf
HdHsw = HdH.swapaxes(1,3) #HdHsw : npos x nf x nt x nt
rg = np.real(np.sqrt(np.sum(L,axis=2)/(self.Nt*self.Nr))) #rg2.shape : npos x nf
Hnorm = H / rg[:,None,None,:] # Hnorm2.shape : npos x nr x nt x nf
return(H,Hnorm,rg)
def CBcapacity(self,Pt=np.array([1e-3]),Tp=273,h = np.empty(((5,4,32,801)),dtype=complex)):
""" BLAST algorithm
calculates the deterministic single user MIMO channel capacity
Parameters
----------
Pt : np.array (,NPt)
the total power is assumed uniformaly distributed over the whole bandwidth
Tp : Receiver Temperature (K)
Returns
-------
C : spectral efficiency (bit/s/Hz)
np.array (Nf,NPt)
rho : SNR
np.array (Nf,Nt,NPt)
log_2(det(I+(Et/(N0Nt))HH^{H})
N0 : spectral noise density
"""
fGHz=np.linspace(1.8,2.2,801)
fmin = fGHz[0]
fmax = fGHz[-1]
Nf = len(fGHz)
BGHz = fGHz[-1]-fGHz[0]
dfGHz = fGHz[1]-fGHz[0]
U1 = h
S = self.eigenvalues(h=U1) # npos x Nf x Nr
if type(Pt)==float:
Pt=np.array([Pt])
# White Noise definition
kB = 1.03806488e-23
# N0 ~ J ~ W/Hz ~ W.s
N0 = kB*Tp
#Pt.shape = (power,)
Ps = Pt/(self.Nt) #Ps.shape = (power,)
Pb = N0*BGHz*1e9 # Watts ; Pb.shape = (power,)
rho = (Ps[None,None,None,:]/Pb)*S[:,:,:,None] # rho.shape : npos x Nf x Nr x power
#We sum along the Nr
CB = dfGHz*np.sum(np.log(1+rho)/np.log(2),axis=2) # CB.shape : npos x nf x power
return(CB)
def WFcapacity(self,Pt=np.array([1e-3]),Tp=273,h = np.empty(((5,4,32,801)),dtype=complex)):
""" waterfilling algorithm
calculates the deterministic single user MIMO channel capacity
waterfilling algorithm :
$P_i$ = ($\mu$ - (1/$\sigma$^2))^+ ; i = 1,2,...,r and r = min(Nr,Nt)
$a$^+ is defined as max(a,0) and $\mu$ is chosen such as sum(Pi) = Pt
The water level ($\mu$) that touch the worst channel is the hihest one.
Pt is the Available power.
Ps is the powerin each channel.
step 1 : calculates the water level that touch the worst channel and then transmits
zero power in this worst channel.
step 2 : calculates the power (Ps) in each channel for this channel :
if the sum of all these power in Ps < Pt:
Divide the remaining power equally among all the channels (in order to
increase $\mu$).
if the sum of all of these powers in Ps > Pt:
Remove the worst channel and repeat the process.
NB : at low SNR, the waterfilling algorithm allocates all power to the strongest of the r higest $\lambda$
at high SNR, the waterfilling algorithm allocates approximately equal power over the eigenvalues of HHh.
Parameters
----------
Pt : the total power to be distributed over the different spatial
channels using water filling
Tp : Receiver Noise Temperature (K)
Returns
-------
C : capacity (bit/s) : npos x nf x power
rho : SNR (in linear scale) : npos x nf x nt x power
formulas log_2(det(It + HH^{H})
"""
npos = h.shape[0]
Nr = h.shape[1]
fGHz=np.linspace(1.8,2.2,801)
Nf = len(fGHz)
BGHz = fGHz[-1]-fGHz[0]
dfGHz = fGHz[1]-fGHz[0]
U1 = h
S = self.eigenvalues(h=U1) # npos x Nf x Nr
# White Noise definition
kB = 1.03806488e-23 #Boltzman constant
N0 = kB*Tp #N0 ~ J ~ W/Hz ~ W.s
#
#Iterative implementation of Water Filling algorithm
#
pb = N0*dfGHz*1e9*np.ones((npos,Nf,Nr)) # npos x nf x nr
pt = Pt[None,None,None,:]/((Nf-1)*Nr) # power x power x power x power
mu = pt # power x power x power x power
Q0 = np.maximum(0,mu-pb[:,:,:,None] / S[:,:,:,None]) # npos x Nf x Nr x power
u = np.where(Q0>0)[0]
Peff = np.sum(np.sum(Q0,axis=1),axis=1) #Peff.shape : npos x power
deltamu = pt
#while (np.abs(Peff-Pt)>1e-16).any():
while (np.abs(Peff-Pt)>1e-8).any():
mu = mu + deltamu
x = pb[:,:,:,None]/S[:,:,:,None] # npos x nf x nt x power
Q0 = np.maximum(0,mu-x) # npos x nf x nr x power
Peff = np.sum(np.sum(Q0,axis=1),axis=1) # npos x power
usup = np.where(Peff>Pt)[0]
mu[:,:,:,usup] = mu[:,:,:,usup] - deltamu[:,:,:,usup]
deltamu[:,:,:,usup] = deltamu[:,:,:,usup]/2.
# ipdb.set_trace()
Qn = Q0/pb[:,:,:,None]
rho = Qn*S[:,:,:,None] # npos x nf x nr x power
Cwf = dfGHz*np.sum(np.log(1+rho)/np.log(2),axis=2) # npos x nf x power
return(Cwf)
def zf_precoding(self,kusers=85):
""" zero forcing channel inversions
"""
Qn,rho,Cwf = self.WFcapacity()
fGHz,Nf,BGHz,dfGHz = self.freq()
Hs = self.matrix()
H = Hs.swapaxes(1,3) # H : npos x nf x nt x nr
Hh = np.conj(H.swapaxes(2,3)) # Hh : npos x nf x nr x nt
HHh = np.einsum('ijkm,ijml->ijkl',H,Hh) # HHh : npos x nf x nt x nt
iHHh = np.linalg.inv(HHh) # iHHh : npos x nf x nt x nt
# Design of the precoding matrix W
W = np.einsum('ijkl,ijlm->ijkl',Hh,iHHh) # W : npos x nf x nr x nt
W1 = W.swapaxes(2,3) # W1 : npos x nf x nt x nr
# print "H.shape :",H.shape
# print "Hh.shape :",Hh.shape
# print "HHh.shape :",HHh.shape
# print "iHHh.shape :",iHHh.shape
# print "W.shape :",W.shape
hk = np.sum(H,axis=0) # hk : nf x nt x nr
wk = np.sum(W1,axis=0) # hk : nf x nt x nr
HW = np.abs(hk*wk)**2
rho = Qn/kusers
Czf = dfGHz*np.sum(np.log(1+(rho*HW)/np.log(2)),axis=0)
#print "W1.shape :",W1.shape
#print "rho.shape :",rho.shape
#print "Czf.shape :",Czf.shape
return(W1,rho,Czf)
if __name__=='__main__':
doctest.testmod()
M = Mesh5('mes_4_8')
H = M.matrix()
# fGHz,Nf,BGHz,dfGHz =M.freq()
# rhoB,CB = M.Bcapacity(Pt = np.logspace(-9,-3,100))
# rhowf,Cwf = M.WFcapacity(Pt = np.logspace(-9,-3,100))
# Cwf11 = np.sum(Cwf[0,:,:],axis=0)
# CB11 = np.sum(CB[0,:,:],axis=0)
# Cwf12 = np.sum(Cwf[84,:,:],axis=0)
# CB12 = np.sum(CB[84,:,:],axis=0)
# Pt = np.logspace(-9,-3,100)
# plt.ion()
# plt.figure(figsize=(10,10))
# plt.subplot(211)
# plt.semilogx(Pt,CB11,label='BLAST_npos0')
# plt.semilogx(Pt,Cwf11,label='WF_npos0')
# plt.legend(loc='best')
# plt.subplot(212)
# plt.semilogx(Pt,CB12,label='BLAST_npos84')
# plt.semilogx(Pt,Cwf12,label='WF_npos84')
# plt.legend(loc='best')
# plt.show()
# npos = 85
# for n in range(npos):
# Pt = np.logspace(-9,-3,100)
# Cwf1 = np.sum(Cwf[n,:,:],axis=0)
# plt.ion()
# plt.subplot(211)
# plt.semilogx(Pt,Cwf1)
# plt.subplot(212)
# CB1 = np.sum(CB[n,:,:],axis=0)
# plt.semilogx(Pt,CB1)
# plt.show()
|
dialounke/pylayers
|
pylayers/measures/exploith5.py
|
Python
|
mit
| 38,000
|
[
"BLAST",
"Gaussian"
] |
833adca3201bbf99f0b3dd714876cad26fbdca3deaef6345880ed99a3a50a365
|
from modules.basemodule import BaseModule
import collections
import json
import os
import pprint
import re
import shutil
import time
def log(*args, **kwargs):
print(*args, **kwargs)
def roomnr(x):
# CoffeeMud, Mob Factory exits are negative but room IDs are positive
return str(abs(x))
reverse = {
'n': 's',
'e': 'w',
's': 'n',
'w': 'e',
'u': 'd',
'd': 'u'
}
# Room IDs are strings (JSON keys must be)
class Map(object):
def __init__(self, serialized=None):
if serialized:
self.m = json.loads(serialized)
if 'areas' not in self.m:
self.m['areas'] = {}
else:
self.m = {'areas': {}, 'bookmarks': {}, 'rooms': {}}
def serialize(self):
return json.dumps(self.m)
def getBookmarks(self):
return self.m['bookmarks']
def setBookmarks(self, bm):
self.m['bookmarks'] = bm
def addRoom(self, num, name, data, exits):
num = str(num)
self.m['rooms'][num] = {
'name': name,
'data': data,
'exits': exits,
}
def roomExists(self, num):
return num in self.m['rooms']
def getRoomName(self, num):
num = str(num)
return self.m['rooms'][num]['name']
def getRoomData(self, num):
num = str(num)
if num not in self.m['rooms']:
return {}
return self.m['rooms'][num]['data']
def addArea(self, area, room):
if area not in self.m['areas']:
self.m['areas'][area] = room
def setAreaStart(self, area, room):
self.m['areas'][area] = room
def getAreas(self):
return self.m['areas']
def getRoomCoords(self, num):
num = str(num)
# log("Warning: room coords not impl")
return (0, 0, 0)
def getRoomExits(self, num):
num = str(num)
if num not in self.m['rooms']:
return {}
return self.m['rooms'][num]['exits']
def setExitData(self, source, direction, data):
self.m['rooms'][source]['exits'][direction]['data'] = data
def getExitData(self, num, direction):
num = str(num)
if 'data' not in self.m['rooms'][num]['exits'][direction]:
return {}
return self.m['rooms'][num]['exits'][direction]['data']
def findRoomsByName(self, name, zone=None):
out = []
for num in self.m['rooms']:
if 'name' in self.m['rooms'][num] and self.m['rooms'][num]['name'] and self.m['rooms'][num]['name'].find(name) != -1 and (not zone or self.m['rooms'][num]['data']['zone'] and zone == self.m['rooms'][num]['data']['zone'] == zone):
out.append((num, self.m['rooms'][num]['name'], self.m['rooms'][num]['data']['zone']))
return out
def findRoomsByZone(self, zone):
out = []
for num in self.m['rooms']:
if 'data' in self.m['rooms'][num] and self.m['rooms'][num]['data'] and 'zone' in self.m['rooms'][num]['data'] and self.m['rooms'][num]['data']['zone'] == zone:
out.append(num)
return out
def delRoom(self, room):
if room in self.m['rooms']:
del self.m['rooms'][room]
def isLocked(self, exit):
if 'data' not in exit:
return False
if 'lock' not in exit['data']:
return False
return True # TODO: check level
def findPath(self, here, there):
here = str(here)
there = str(there)
visited = set()
paths = {here: []}
roomq = collections.deque()
roomq.append(here)
while roomq:
room = roomq.popleft()
if room not in visited: # A given room might end up in the queue through different paths
ex = self.m['rooms'][room]['exits']
for exDir in ex:
if self.isLocked(ex[exDir]):
continue
tgt = ex[exDir]['tgt']
paths[tgt] = paths[room] + [exDir]
roomq.append(tgt)
if room == there:
return paths[there]
visited.add(room)
def assemble(cmds1, mode="go"):
# return ';'.join(paths)
cmds = []
for cmd in cmds1:
cmds += cmd.split(';')
def direction(elem):
return elem in ['n', 'e', 's', 'w', 'ne', 'se', 'sw', 'nw', 'u', 'd']
def runifyDirs(directions):
if not directions:
return ""
count = 1
# directions hold strings like {n n n e e s}. Transform them to 3n 2e s
out = ""
first = True;
for i in range(1, len(directions)):
if directions[i - 1] == directions[i]:
count += 1
else:
if first:
first = False
else:
out += ' '
out += ("" if count == 1 else str(count)) + directions[i - 1]
count = 1
if not first:
out += ' '
out += ("" if count == 1 else str(count)) + directions[-1]
if len(out) == 1:
return out;
else:
return mode + " " + out;
out = []
directions = [] # accumulates directions between non-directions
while cmds:
current = cmds[0]
if direction(current):
directions.append(current)
cmds = cmds[1:]
else:
if directions:
out.append(runifyDirs(directions))
directions = []
out.append(current)
cmds = cmds[1:]
if directions:
out.append(runifyDirs(directions))
return ';'.join(out)
class Mapper(BaseModule):
def help(self, args):
strs = ["Commands:"]
for cmd in self.commands.keys():
strs.append(cmd)
self.log('\n'.join(strs))
def current(self):
return roomnr(self.world.gmcp['room']['info']['num'])
def currentZone(self):
return self.m.getRoomData(self.current())['zone']
def here(self, args):
if args:
this = int(args[0])
else:
this = self.current()
bm = None
for name, dest in self.m.getBookmarks().items():
if dest == this:
bm = name
break
self.log('\n' + pprint.pformat({
'num': this,
'name': self.m.getRoomName(this),
'data': self.m.getRoomData(this),
'coords': self.m.getRoomCoords(this),
'exits': self.m.getRoomExits(this),
'bookmark': bm,
}))
def path2(self, here, there, mode='go'):
self.log(there)
if there.isdigit() and int(there) > 0 and 'map-find-result' in self.world.state and len(self.world.state['map-find-result']) >= int(there):
self.log("Pathing to {}th item = {}".format(int(there)-1, self.world.state['map-find-result'][int(there) - 1][0]))
there = self.world.state['map-find-result'][int(there) - 1][0]
elif there in self.m.getBookmarks():
there = self.m.getBookmarks()[there]
elif there in self.m.getAreas():
there = self.m.getAreas()[there]
else:
try:
there = int(there)
except ValueError:
self.log("No such bookmark")
return
if here == there:
self.log("Already there!")
return ''
then = time.time()
raw = self.m.findPath(here, there)
if raw:
path = assemble(raw, mode)
self.log("{} (found in {} ms)".format(path, (time.time() - then)*1000))
return path
else:
self.log("Path not found in {} ms".format((time.time() - then)*1000))
def path(self, there, mode='go'):
return self.path2(self.current(), there, mode)
def go(self, room, mode):
path = self.path(room, mode)
if path:
self.send(path.replace(';', '\n'))
def bookmarks(self, args):
self.log('Bookmarks:\n' + pprint.pformat(self.m.getBookmarks()))
def bookmark(self, args):
arg = ' '.join(args)
if arg:
self.m.getBookmarks()[arg] = self.current()
self.bookmarks([])
else:
return self.bookmarks()
def getExitData(self, source, to):
return self.m.getExitData(source, to)
def addExitData(self, source, target, data):
exd = self.m.getExitData(source, target)
exd.update(data)
self.m.setExitData(source, target, exd)
def draw(self, sizeX=None, sizeY=None):
# Draw room at x,y,z. Enumerate exits. For each exit target, breadth-first, figure out its new dimensions, rinse, repeat.
# █▓▒░
if sizeX and sizeY:
columns, lines = sizeX, sizeY
else:
columns, lines = 60, 100 # shutil.get_terminal_size((21, 22))
def adjustExit(x, y, d, prev):
m = re.match(r'open .+;(.+)', d)
if m:
return adjustExit(x, y, m.group(1), prev)
if d == 'n':
return x, y-1, '│', '↑', '║'
if d == 'w':
return x-1, y, '─', '←', '═'
if d == 's':
return x, y+1, '│', '↓', '║'
if d == 'e':
return x+1, y, '─', '→', '═'
if d == 'd':
if prev == '▲':
return x, y, '◆', '◆', '◆'
else:
return x, y, '▼', '▼', '▼'
if d == 'u':
if prev == '▼':
return x, y, '◆', '◆', '◆'
else:
return x, y, '▲', '▲', '▲'
if d == 'nw':
return x-1, y-1, '\\', '\\', '\\'
if d == 'sw':
return x-1, y+1, '/', '/', '/'
if d == 'se':
return x+1, y+1, '\\', '\\', '\\'
if d == 'ne':
return x+1, y-1, '/', '/', '/'
out = [] # NB! indices are out[y][x] because the greater chunks are whole lines
for _ in range(lines - 1): # -1 for the next prompt
out.append([' '] * columns)
# The only room coordinates that matter are the start room's -- the rest get calculated by tracing paths.
startX, startY, startZ = (0, 0, 0) # self.m.getRoomCoords(self.current())
centerX, centerY = (columns-1)//2, (lines-1)//2
data = self.m.getRoomData(self.current())
area = data['zone']
roomq = collections.deque()
roomq.append((centerX, centerY, self.current()))
visited = set()
def getExitLen(source, to):
exitData = self.getExitData(source, to)
if not exitData or 'len' not in exitData:
return 0
return int(exitData['len'] * 2)
def fits(x, y):
return 0 <= x and x < columns and 0 <= y and y < lines-1
# TODO: one-way exits
# TODO: draw doors
coordCache = {} # Remember where we drew each room, to search for broken-looking exits
while roomq:
drawX, drawY, room = roomq.popleft()
if room not in visited: # A given room might end up in the queue through different paths
mapX, mapY, mapZ = self.m.getRoomCoords(room)
visited.add(room)
# It's possible to keep walking through z layers and end up back on z=initial, which might produce nicer maps -- but we'll have to walk the _whole_ map, or bound by some range.
out[drawY][drawX] = '█'
coordCache[room] = (drawX, drawY)
# out[drawY][drawX] = str(count % 10)
# count += 1
exits = self.m.getRoomExits(room)
for d, tgt in exits.items():
tgt = tgt['tgt']
if d in ['n', 'e', 's', 'w', 'u', 'd', 'ne', 'se', 'sw', 'nw'] or re.match(r'open .+;[neswud]+', d):
dataD = self.m.getRoomData(tgt)
exists = False
nextArea = None
if dataD:
exists = True
nextArea = dataD['zone'] if 'zone' in dataD else None
sameAreas = self.drawAreas or nextArea == area
if not exists or not sameAreas:
exitLen = 1
else:
exitLen = getExitLen(room, d) + 1
exX = drawX
exY = drawY
roomX, roomY = exX, exY
# Figure out the coordinates of the target room
for _ in range(exitLen + 1): # exitlen for the exit, +1 for the target room
roomX, roomY, _, _, _ = adjustExit(roomX, roomY, d, ' ')
exitData = self.m.getExitData(room, d)
if 'draw' in exitData and not exitData['draw']:
nexX, nexY, _, _, _ = adjustExit(exX, exY, d, out[drawY][drawX])
out[nexY][nexX] = '.'
else:
# Mark exits that break map (if the target room is already drawn, but not adjacent to this one)
mark = False
if tgt in visited:
tgtX, tgtY = coordCache[tgt]
if tgtX != roomX or tgtY != roomY:
# print("Offset detected:", roomX - tgtX, roomY-tgtX)
mark = True
# draw a long exit for beautification
for _ in range(exitLen):
exX, exY, regularExit, hiddenExit, markedExit = adjustExit(exX, exY, d, out[drawY][drawX])
if fits(exX, exY):
# If the map grid element we'd occupy is already occupied, don't go there
nextX, nextY, _, _, _ = adjustExit(exX, exY, d, ' ') # Adjust again, ie. go one step further in the same direction for the target room
# Don't overwrite already drawn areas
free = fits(exX, exY) and (not fits(nextX, nextY) or out[nextY][nextX] == ' ') or tgt in visited
if mark:
out[exY][exX] = markedExit
elif free and exists and sameAreas:
out[exY][exX] = regularExit
else:
out[exY][exX] = hiddenExit
visit = (exists
and tgt not in visited
and sameAreas
and d not in ['u', 'd']
and fits(roomX, roomY)
and out[roomY][roomX] == ' '
)
if visit:
roomq.append((roomX, roomY, tgt))
# Special marking for start room:
if out[centerY][centerX] == '▼':
out[centerY][centerX] = '▿'
elif out[centerY][centerX] == '▲':
out[centerY][centerX] = '▵'
elif out[centerY][centerX] == '◆':
out[centerY][centerX] = '◇'
else:
out[centerY][centerX] = '░'
outlines = [''.join(char) for char in out]
outstr = ""
for l in outlines:
if l.strip(' '):
outstr += l + '\n'
return outstr
def quit(self, args=None):
# self.save([self.mapfname])
if 'visited' not in self.world.state:
self.world.state['visited'] = set()
self.log("Visited {} rooms today!".format(len(self.world.state['visited'])))
def save(self, args):
if len(args) == 1:
self.mapfname = args[0]
with open(self.mapfname, 'w') as f:
f.write(self.m.serialize())
self.log("Serialized map to", self.mapfname)
def door(self, args):
if len(args) != 1:
self.log("Usage: #map door [n/e/s/w/u/d]")
return
direction = args[0]
if direction not in "neswud":
self.log("Usage: #map door [n/e/s/w/u/d]")
return
srcNr = self.current()
exitsInSrcRoom = self.m.getRoomExits(srcNr)
exitsInSrcRoom["open {direction}\n{direction}".format(direction=direction)] = exitsInSrcRoom[direction]
dstNr = exitsInSrcRoom[direction]['tgt']
exitsInDstRoom = self.m.getRoomExits(dstNr)
exitsInDstRoom["open {direction}\n{direction}".format(direction=reverse[direction])] = exitsInDstRoom[reverse[direction]]
self.log(exitsInSrcRoom)
self.log(exitsInDstRoom)
self.m.addRoom(
srcNr,
self.m.getRoomName(srcNr),
self.m.getRoomData(srcNr),
exitsInSrcRoom)
self.m.addRoom(
dstNr,
self.m.getRoomName(dstNr),
self.m.getRoomData(dstNr),
exitsInDstRoom)
self.log("Added custom exit, both ways: open {direction};{direction}".format(direction=direction))
def startExit(self, args):
self.exitKw = ' '.join(args)
nr = roomnr(self.world.gmcp['room']['info']['num'])
room = self.world.gmcp['room']['info']
self.exitFrom = {}
self.exitFrom['exits'] = {}
self.exitFrom['id'] = nr
self.exitFrom['name'] = room['name']
self.exitFrom['data'] = dict(zone=room['zone'], terrain = room['terrain'])
for k, v in room['exits'].items():
self.exitFrom['exits'][k.lower()] = {'tgt': roomnr(v)}
self.log("Type '#map endexit' when you're in the right room, or #map endexit abort")
self.exitKw = self.exitKw.replace(';', '\n')
self.exitKw = self.exitKw.replace('~', '\n')
self.log("Exit: " + repr(self.exitKw))
self.send(self.exitKw)
def endExit(self, args):
if len(args) != 0:
self.log("Aborted.")
return
self.exitFrom['exits'][self.exitKw] = {'tgt': self.current()}
self.m.addRoom(
self.exitFrom['id'],
self.exitFrom['name'],
self.exitFrom['data'],
self.exitFrom['exits'])
self.exitKw = None
self.log("Done.")
def lockExit(self, args):
direction, level = args if len(args) > 1 else (args[0], -1)
tgt = self.getRoomByDirection(direction)
if not tgt:
self.mud.log("Exit doesn't exist")
return
self.addExitData(self.current(), direction, {'lock': int(level)})
return self.here([self.current()])
def startRoom(self, args):
self.m.setAreaStart(self.currentZone(), self.current())
def noDraw(self, args):
direction = args[0]
tgt = self.getRoomByDirection(direction)
if not tgt:
self.mud.log("Exit doesn't exist")
return
exitData = self.m.getExitData(self.current(), direction)
draw = 'draw' in exitData and not exitData['draw']
self.log("Drawing exit {} is now {}".format(direction, draw))
self.addExitData(self.current(), direction, {'draw': draw})
return self.here([self.current()])
def getRoomByDirection(self, direction):
here = self.current()
exits = self.m.getRoomExits(here)
if direction.lower() not in exits:
self.log("No such direction")
return None
return exits[direction.lower()]['tgt']
def exitLen(self, direction, increment):
here = self.current()
there = self.getRoomByDirection(direction)
def do(here, there):
exits = self.m.getRoomExits(here)
for dir, tgt in exits.items():
if tgt['tgt'] == there:
data = self.m.getExitData(here, dir)
if 'len' not in data:
data['len'] = 0
data['len'] += increment
if data['len'] <= 0:
data['len'] = 0
self.m.setExitData(here, dir, data)
break
do(here, there)
do(there, here)
self.show(self.draw())
def inc(self, args):
self.exitLen(args[0], 1)
def dec(self, args):
self.exitLen(args[0], -1)
def load(self, args):
# TODO: memory usage and map size can be reduced by storing
# terrains/zones in mapdata, and referencing them by index in rooms
if len(args) == 1:
self.mapfname = args[0]
try:
with open(self.mapfname, 'r') as f:
ser = f.read()
self.m = Map(ser)
print("Loaded map from", self.mapfname)
except FileNotFoundError:
self.m = Map()
print("Created a new map")
def find(self, args):
res = self.world.state['map-find-result'] = self.m.findRoomsByName(' '.join(args))
res.sort(key=lambda x: x[1])
res.sort(key=lambda x: x[2])
count = 1
for nr, name, area in res:
self.show("{count}\t{nr}\t{name}\t\t{area}\n".format(count=count, nr=nr, name=name, area=area))
count += 1
def currentArea(self):
return self.m.getRoomData(self.current())['zone']
def unmapped(self, unvisited, inArea, one):
if 'visited' not in self.world.state:
self.world.state['visited'] = set()
out = [] # A set would probably be smaller, but a list is in the order of closeness.
visited = set()
roomq = collections.deque()
roomq_check = set([self.current()]) # prevent enqueuing the same room a zillon times
roomq.append(self.current())
visited.add(self.current())
startArea = self.currentArea()
while roomq:
room = roomq.popleft()
roomq_check.remove(room)
visited.add(room)
exits = self.m.getRoomExits(room)
for d, tgt in exits.items():
tgt = tgt['tgt']
edata = self.m.getExitData(room, d)
rdata = self.m.getRoomData(tgt)
if 'lock' not in edata:
if not rdata: # unexplored
if one:
return [tgt]
else:
out.append(tgt)
else:
sameZone = not inArea or rdata['zone'] == startArea
if (unvisited and tgt not in self.world.state['visited'] and sameZone):
out.append(tgt)
else:
if tgt not in visited and sameZone and tgt not in roomq_check:
roomq_check.add(tgt)
roomq.append(tgt)
return list(dict.fromkeys(out)) # dedupe
def autoVisit(self, args=None):
if not args or args[0] != 'exit':
self.world.state['autoVisitArea'] = self.currentArea()
if args and args[0] == 'stop':
del self.world.state['autoVisitTarget']
del self.world.state['autoVisitArea']
self.log("Stopped autovisit")
return
unmapped = self.unmapped(False, 'autoVisitArea' in self.world.state, True)
if unmapped:
self.world.state['autoVisitTarget'] = unmapped[0]
self.log("Visiting " + unmapped[0])
self.go(self.world.state['autoVisitTarget'], 'go')
else:
self.log("Done!")
def areas(self, args):
for name in sorted(self.m.getAreas().keys()):
num = self.m.getAreas()[name]
self.show("{}\t{}\n".format(num, name))
def delExits(self, args):
value = self.world.gmcp['room']['info']
id = roomnr(value['num'])
data = dict(zone=value['zone'], terrain = value['terrain'])
name = value['name']
self.m.addRoom(id, name, data, {})
return self.here([self.current()])
def delZone(self, args):
if args:
zone = ' '.join(args)
else:
zone = self.currentZone()
self.log("Deleting " + zone)
rooms = self.m.findRoomsByZone(zone)
self.log(rooms)
for room in rooms:
self.m.delRoom(room)
self.log(zone + " deleted")
def __init__(self, mud, drawAreas, mapfname, spacesInRun=True):
super().__init__(mud)
self.drawAreas = drawAreas
self.spacesInRun = spacesInRun
self.load([mapfname])
self.commands = {
'lock': self.lockExit,
'unmapped': lambda args: self.log('\n' + '\n'.join([str(i) for i in self.unmapped(False, True, False)])),
'unvisited': lambda args: self.log('\n' + '\n'.join([str(i) for i in self.unmapped(True, True, False)])),
'gounmapped': lambda args: self.go(self.unmapped(False, True, True)[0], 'go'),
'av': self.autoVisit,
'areas': self.areas,
'find': self.find,
'load': self.load,
'read': self.load,
'help': self.help,
'here': self.here,
'add': self.bookmark,
'bookmark': self.bookmark,
'name': self.bookmark,
'bookmarks': self.bookmarks,
'path': lambda args: self.path(' '.join(args), 'go'),
'go': lambda args: self.go(' '.join(args), 'go'),
'run': lambda args: self.go(' '.join(args), 'run'),
'save': self.save,
'write': self.save,
'door': self.door,
'startexit': self.startExit,
'endexit': self.endExit,
'inc': self.inc,
'dec': self.dec,
'delexits': self.delExits,
'delzone': self.delZone,
'dump': lambda args: self.log(self.m.m),
'startroom': self.startRoom,
'nodraw': self.noDraw,
'draw': lambda args: self.show(self.draw(int(args[0]), int(args[0]))),
}
# for creating custom exits
self.exitKw = None
self.exitFrom = None
def alias(self, line):
words = line.split(' ')
if words[0].lower() != '#map':
return
if len(words) == 1:
self.show(self.draw())
return True
cmd = words[1]
if cmd in self.commands:
self.commands[cmd](words[2:])
else:
self.help(words[2:])
return True
def handleGmcp(self, cmd, value):
# CoffeeMUD's room.info
# {'coord': {'cont': 0, 'id': 0, 'x': -1, 'y': -1},
# 'desc': '',
# 'details': '',
# 'exits': {'N': -565511209},
# 'id': 'Homes#1226',
# 'name': 'An empty room',
# 'num': -565511180,
# 'terrain': 'cave',
# 'zone': 'Homes'}
# SneezyMUD's room.info
# {'coord': {'cont': 0, 'id': -1, 'x': -1, 'y': -1},
# 'details': '',
# 'exit_kw': {'s': 'door'},
# 'exits': {'e': 753, 'n': 757, 's': 114, 'w': 751},
# 'name': 'Church Entry',
# 'num': 752,
# 'terrain': 'Temperate Building',
# 'zone': '13'}
if cmd == 'room.info':
id = roomnr(value['num'])
if 'visited' not in self.world.state:
self.world.state['visited'] = set()
self.world.state['visited'].add(id)
name = value['name']
self.m.addArea(value['zone'], id)
data = dict(zone=value['zone'], terrain = value['terrain'])
exits = self.m.getRoomExits(id) # retain custom exits
for direction, target in value['exits'].items():
tgt = roomnr(target)
dir = direction.lower()
if dir not in exits:
exits[dir] = {'tgt': tgt}
if not self.m.roomExists(tgt): # doesn't exist yet, insert stub for easy pathfinding
self.m.addRoom(tgt, None, {}, {})
if 'exit_kw' in value:
for direction, door in value['exit_kw'].items():
exits['open {door} {direction};{direction}'.format(door=door, direction=direction)] = exits[direction.lower()]
self.m.addRoom(id, name, data, exits)
if 'autoVisitTarget' in self.world.state and self.world.state['autoVisitTarget'] == id:
if 'char' in self.world.gmcp and self.world.gmcp['char']['vitals']['moves'] < 60:
self.log("Autovisiting, but near out of moves")
elif 'autoVisitArea' in self.world.state and self.world.state['autoVisitArea'] != self.currentArea():
self.log("Autovisiting, but changed areas")
else:
self.autoVisit(['exit'] if 'autoVisitArea' not in self.world.state else None)
# self.show(self.draw())
|
cizra/pycat
|
modules/mapper.py
|
Python
|
unlicense
| 29,718
|
[
"VisIt"
] |
6efb8e230de3b1ba349aaf052e831220c3d55c202e050fc5cc9f8e2b8c92e4a8
|
import pyspeckit
import numpy as np
from pyspeckit.spectrum.models import voigtfitter
# technically, the voigt fitter works as a singlefitter (i.e., you can fit the
# background level and the peak simultaneously)
# in practice, however, you need to fit the background independently except for
# gaussians. I don't know why this is.
xarr = pyspeckit.spectrum.units.SpectroscopicAxis(np.linspace(-100,100,500),unit='km/s',refX=1e9,refX_units='Hz')
VF = voigtfitter.voigt_fitter()
nvoigt = VF.n_voigt
sp1 = pyspeckit.Spectrum(xarr=xarr, data=nvoigt([1,-30,6.5,0.5,0.5,35,1.5,6.5])(xarr) + np.random.randn(xarr.shape[0])/20., error=np.ones(xarr.shape[0])/20.)
sp1.plotter()
sp1.specfit(fittype='gaussian',guesses=[0.5,-25,3,0.2,40,5],composite_fit_color='b',clear=False,annotate=False)
sp1.specfit(fittype='lorentzian',guesses=[0.5,-25,3,0.2,40,5],composite_fit_color='g',clear=False,annotate=False)
sp1.specfit(fittype='voigt', guesses=[0.5,-30,2,2,0.5,45,2,2], tied=['','','','','','p[1]+65','',''],
composite_fit_color='r',clear=False,annotate=True)
sp1.baseline(excludefit=True)
sp1.baseline.annotate()
# this approach doesn't work right now, but it will (there's a bug I'm working on)
# it's a lot more verbose, so it's kinda ugly, but it is (in principle) more flexible
parinfo = pyspeckit.parinfo.ParinfoList()
parinfo.append(pyspeckit.parinfo.Parinfo(parname='AMP',value=0.5))
parinfo.append(pyspeckit.parinfo.Parinfo(parname='VELO',value=-30))
parinfo.append(pyspeckit.parinfo.Parinfo(parname='GWIDTH',value=2))
parinfo.append(pyspeckit.parinfo.Parinfo(parname='LWIDTH',value=2))
parinfo.append(pyspeckit.parinfo.Parinfo(parname='AMP',value=0.5))
parinfo.append(pyspeckit.parinfo.Parinfo(parname='VELO',tied='p[1]+65'))
parinfo.append(pyspeckit.parinfo.Parinfo(parname='GWIDTH',value=2))
parinfo.append(pyspeckit.parinfo.Parinfo(parname='LWIDTH',value=2))
sp1.specfit(fittype='voigt', parinfo=parinfo,
composite_fit_color='r',clear=False,annotate=True)
|
keflavich/pyspeckit-obsolete
|
examples/multivoigt.py
|
Python
|
mit
| 1,985
|
[
"Gaussian"
] |
0ea2eb14e337a57e3a1b898a3261b5d9fba1ce69351a3138215957449d8d6607
|
import io
import os
from django.conf import settings
from django.contrib import messages
from django.http import FileResponse
from django.shortcuts import get_object_or_404, redirect
from django.template import loader
from django.urls import reverse, reverse_lazy
from django.utils import timezone
from django.utils.text import slugify
from stages.views.base import EmailConfirmationBaseView
from candidats.models import Candidate, Interview
from .pdf import InscriptionSummaryPDF
class CandidateConfirmationView(EmailConfirmationBaseView):
person_model = Candidate
success_url = reverse_lazy('admin:candidats_candidate_changelist')
error_message = "Échec d’envoi pour le candidat {person} ({err})"
candidate_date_field = None
def on_success(self, candidate):
setattr(candidate, self.candidate_date_field, timezone.now())
candidate.save()
class ConfirmationView(CandidateConfirmationView):
"""
Email confirming the receipt of the registration form
"""
success_message = "Une confirmation d’inscription a été envoyée à {person}"
candidate_date_field = 'confirmation_date'
title = "Confirmation de réception de dossier"
def get(self, request, *args, **kwargs):
candidate = Candidate.objects.get(pk=self.kwargs['pk'])
if candidate.section not in {'ASA', 'ASE', 'ASSC', 'EDE', 'EDS'}:
messages.error(request, "Ce formulaire n'est disponible que pour les candidats FE ou ES")
elif candidate.confirmation_date:
messages.error(request, 'Une confirmation a déjà été envoyée!')
elif candidate.canceled_file:
messages.error(request, 'Ce dossier a été annulé!')
else:
return super().get(request, *args, **kwargs)
return redirect(reverse("admin:candidats_candidate_change", args=(candidate.pk,)))
def get_initial(self):
initial = super().get_initial()
candidate = Candidate.objects.get(pk=self.kwargs['pk'])
to = [candidate.email]
if candidate.section == 'EDE':
src_email = 'email/candidate_confirm_EDE.txt'
elif candidate.section == 'EDS':
src_email = 'email/candidate_confirm_EDS.txt'
elif candidate.section in {'ASA', 'ASE', 'ASSC'}:
src_email = 'email/candidate_confirm_FE.txt'
if candidate.corporation and candidate.corporation.email:
to.append(candidate.corporation.email)
if candidate.instructor and candidate.instructor.email:
to.append(candidate.instructor.email)
msg_context = {
'candidate': candidate,
'sender': self.request.user,
}
initial.update({
'cci': self.request.user.email,
'to': '; '.join(to),
'subject': "Inscription à la formation {0}".format(candidate.section_option),
'message': loader.render_to_string(src_email, msg_context),
'sender': self.request.user.email,
})
return initial
class ValidationView(CandidateConfirmationView):
success_message = "Le message de validation a été envoyé pour le candidat {person}"
candidate_date_field = 'validation_date'
title = "Validation des examens par les enseignant-e-s EDE"
def get(self, request, *args, **kwargs):
candidate = Candidate.objects.get(pk=self.kwargs['pk'])
if candidate.validation_date:
messages.error(request, 'Une validation a déjà été envoyée!')
return redirect(reverse("admin:candidats_candidate_change", args=(candidate.pk,)))
elif not candidate.has_interview:
messages.error(request, "Aucun interview attribué à ce candidat pour l’instant")
return redirect(reverse("admin:candidats_candidate_change", args=(candidate.pk,)))
return super().get(request, *args, **kwargs)
def get_initial(self):
initial = super().get_initial()
candidate = Candidate.objects.get(pk=self.kwargs['pk'])
msg_context = {
'candidate': candidate,
'sender': self.request.user,
}
initial.update({
'cci': self.request.user.email,
'to': ';'.join([
candidate.interview.teacher_int.email, candidate.interview.teacher_file.email
]),
'subject': "Validation de l'entretien d'admission",
'message': loader.render_to_string('email/validation_enseignant_EDE.txt', msg_context),
'sender': self.request.user.email,
})
return initial
class ConvocationView(CandidateConfirmationView):
success_message = "Le message de convocation a été envoyé pour le candidat {person}"
candidate_date_field = 'convocation_date'
title = "Convocation aux examens d'admission EDE/EDS"
def get(self, request, *args, **kwargs):
candidate = Candidate.objects.get(pk=self.kwargs['pk'])
if not candidate.has_interview:
messages.error(request, "Impossible de convoquer sans d'abord définir un interview!")
return redirect(reverse("admin:candidats_candidate_change", args=(candidate.pk,)))
return super().get(request, *args, **kwargs)
def get_initial(self):
initial = super().get_initial()
candidate = Candidate.objects.get(pk=self.kwargs['pk'])
# Define required documents depending on candidate diploma
common_docs = [
'registration_form', 'certificate_of_payement', 'police_record', 'cv', 'reflexive_text',
'has_photo', 'marks_certificate',
]
dipl_docs = {
0: [],
1: ['work_certificate'], # CFC ASE
2: ['certif_of_800_childhood', 'work_certificate'],
3: ['certif_of_800_general', 'certif_of_800_childhood', 'work_certificate'],
4: ['certif_of_800_general', 'certif_of_800_childhood', 'work_certificate'],
}[candidate.diploma]
docs_required = dipl_docs + common_docs
missing_documents = {
'candidate': candidate,
'documents': ', '.join([
Candidate._meta.get_field(doc).verbose_name for doc in docs_required
if not getattr(candidate, doc)
]),
}
msg_context = {
'candidate': candidate,
'candidate_name': " ".join([candidate.civility, candidate.first_name, candidate.last_name]),
'filiere': "Éducation de l’enfance" if candidate.section == 'EDE' else "Éducation sociale",
'date_lieu_examen': settings.DATE_LIEU_EXAMEN_EDE if candidate.section == 'EDE' else settings.DATE_LIEU_EXAMEN_EDS,
'duree_examen': '2h30' if candidate.section == 'EDE' else '3h00',
'date_entretien': candidate.interview.date_formatted,
'salle_entretien': candidate.interview.room,
'sender_name': " ".join([self.request.user.first_name, self.request.user.last_name]),
'sender_email': self.request.user.email,
}
if missing_documents['documents']:
msg_context['rappel'] = loader.render_to_string('email/rappel_document_EDE.txt', missing_documents)
initial.update({
'cci': self.request.user.email,
'to': candidate.email,
'subject': "Procédure d'admission",
'message': loader.render_to_string('email/candidate_convocation_EDE.txt', msg_context),
'sender': self.request.user.email,
})
return initial
def inscription_summary(request, pk):
"""
Print a PDF summary of inscription
"""
candidat = get_object_or_404(Candidate, pk=pk)
buff = io.BytesIO()
pdf = InscriptionSummaryPDF(buff)
pdf.produce(candidat)
filename = slugify('{0}_{1}'.format(candidat.last_name, candidat.first_name)) + '.pdf'
buff.seek(0)
return FileResponse(buff, as_attachment=True, filename=filename)
|
epcoullery/epcstages
|
candidats/views.py
|
Python
|
agpl-3.0
| 7,969
|
[
"ASE"
] |
8ed645561a364df1460a5d9b39fe14a71224ded72ef50e59b4c7879cdbe80695
|
from kivy.app import App
from kivy.core.window import Window
from kivy.event import EventDispatcher
from kivy.logger import Logger
from kivy.metrics import Metrics
from kivy.properties import * # NumericProperty, ReferenceProperty...
from kivy.uix.screenmanager import ScreenManager, SlideTransition
''' Notes on lazy instantiation:
In Python, because function call overhead is high, singletons are more
than just annoying syntax; they cost a lot of performance. A
module-level global is identical in function to a singleton in compiled
languages.
The difference is that with a singleton, you don't always have to keep
track of when objects have been instantiated. Because of your API
always calling getSingleton whenever it's needed, you don't worry about
whether the name is there or not.
Avoiding this in Python leaves us with an interesting world. Classes
want to use singletons before those singletons can truly be
instantiated. Singletons want to use widgets in their methods.
However, global dynamic name resolution to the rescue. The names inside
the function bodies will not be resolved until the function is called.
This means only a few functions will ever need to deal with
instantiation order even though I'm skipping singletons.
The recipe is simple:
* Define classes that intantiate themselves lazily, without calling their
supers immeditately.
* Instantiate them to create their names.
* Whenever each one needs to use it's real API the first time, call
late_init.
* Even better, since only some controllers need to init against each
other (for property binding) just call them in the correct order.
'''
class StateController(EventDispatcher):
''' This class's job is to take care of the State stack,
which is basically a stack of screens that the user has navigated
through and can be reloaded when the application restarts.
The concept is somewhat Analogous to Android Activities.
Subscribe to the state controller to watch the app pause and
resume.
If an object needs to know when the State has changed, this is
the place to subscribe.
For implementing back functionality, the StateController already
knows if the stack has anywhere to go back to, so it's an obvious
choice.
In order to support rich states, it will be necessary to have some
kind of state object instead of just the simple string id's.
'''
current_state = StringProperty('')
def late_init(self):
self.stack = [False,]
self.load_state('sayhi')
Window.bind(on_key_down=self.on_key_down)
def load_state(self, state):
stack = self.stack
# ignored same state
if state == stack[-1]:
Logger.info('Ignored identical state: ' + state)
return
# loading previous state
elif stack[-1] and state == stack[-2]:
stack.pop()
# non-linear navigation
elif state in stack:
stack.remove(state)
stack.append(state)
# loading totally new state
else:
stack.append(state)
# set the current, triggering screen loads etc
self.current_state = stack[-1]
def back(self):
''' API sweetener to load the last state'''
state = self.stack[-2]
if state:
self.load_state(state)
return state
def pause(self, *args):
pass
def stop(self, *args):
pass
def on_current_state(self, i, c):
''' Probably best for other classes to subscribe to state than
to make the StateController know about the other classes.'''
pass
def on_key_down(self, instance, key, keycode, scancode, *args):
if keycode == 4:
if not self.back():
Logger.info('There was no previous state. Close the App')
return True
if keycode == 82:
self.load_state('settings')
return True
class ViewController(ScreenManager):
''' This class is a lazier version of the ScreenManager that has
methods to fine-tune screen loading responsiveness.'''
def __init__(self, **kwargs):
# pack up your args. you're instantiating later.
self._init_args = (kwargs)
def late_init(self):
# where we do the real init
# map states to screen class names.
self.states = {'sayhi' : SayHiScreen,
'stub' : StubScreen,
}
kwargs = self._init_args
t = SlideTransition(duration=0.2)
super(ViewController, self).__init__(transition=t, **kwargs)
s_ctl.bind(current_state=self.load_state)
def lazy_load(self, screen_name):
if screen_name not in self.screen_names:
c = self.states[screen_name]
self.add_widget(c(name=screen_name))
def load_state(self, i, screen_name):
self.lazy_load(screen_name)
self.current = screen_name
# because of button bar, many 1.6 devices will up less than 1.6 in
# portrait and more than 1.6 in landscape
LANDSCAPE_ASPECT_RATIO_CUTOFF = 1.7
PORTRAIT_ASPECT_RATIO_CUTOFF = 1.6
# this will cut off slightly over galaxy note
# physical screen major dimension, 4.49in
TABLET_SIZE_CUTOFF = 4.75
class Orient(object):
''' Just a quick object to store the data in. I hate writing keys
over and over. This was for use in kv. Eventually I started using
almost exclusively the DimensionController, which really is the more
useful way to go.'''
__slots__ = ('portrait', 'tall', 'tablet')
def __init__(self):
self.portrait = self.tall = self.tablet = None
class OrientController(EventDispatcher):
''' Currently this class is badly named. It grew to encompass
several measurements as boolean values that were only somewhat
helpful. After mapping these booleans to a dictionary and merging
dimension rulesets, the whole system became really useful.'''
orient = ObjectProperty()
def __init__(self, **kwargs):
super(OrientController, self).__init__(**kwargs)
self.orient = Orient()
Window.bind(size=self.detect)
self.detect(Window, Window.size)
def detect(self, win, size):
w, h = map(float, size)
ratio = h / w
portrait = ratio > 1.
if portrait:
aspect = h / w
major = h / Metrics.dpi
tall = aspect > LANDSCAPE_ASPECT_RATIO_CUTOFF
else:
aspect = w / h
major = w / Metrics.dpi
tall = aspect > PORTRAIT_ASPECT_RATIO_CUTOFF
tablet = major > TABLET_SIZE_CUTOFF
o = self.orient
if tall != o.tall or portrait != o.portrait or tablet != o.tablet:
o.portrait = portrait
o.tall = tall
o.tablet = tablet
self.property('orient').dispatch(self)
Logger.debug('portrait: {}, tall: {}, tablet: {}'.format(
o.portrait, o.tall, o.tablet)
)
def get_tall(self):
return self.orient.tall
tall = AliasProperty(get_tall, None, bind=('orient',))
def get_tablet(self):
return self.orient.tablet
tablet = AliasProperty(get_tablet, None, bind=('orient',))
def get_portrait(self):
return self.orient.portrait
portrait = AliasProperty(get_portrait, None, bind=('orient',))
class DimensionsController(EventDispatcher):
''' This class kicks a lot of ass. Use it. The ability to merge
rule sets is the right direction. Right now it watches
Orient-controller but in the future, Orient-controller's cutoff
system should be changed to be more configureable and integrated
into the DimensionsController. Did you know that before I also had
a RulesController?! Working code is part of the information needed
to write good code =)'''
def __init__(self, **kwargs):
super(DimensionsController, self).__init__(**kwargs)
o_ctl.bind(orient=self.feed_layouts)
self.feed_layouts(o_ctl, o_ctl.orient)
def feed_layouts(self, instance, orient):
orientation = 'portrait' if orient.portrait else 'landscape'
aspect = 'tall' if orient.tall else 'short'
size = 'tablet' if orient.tablet else 'phone'
styles = (orientation, aspect, size)
# Get the dimensions from the dimensions file, adding one
# levels of specificity and merging over.
rules = dict(dimensions.default)
style = None
for s in styles:
if style is not None:
style = style + '_' + s
else:
style = s
rule_set = getattr(dimensions, style, None)
if rule_set:
rules.update(rule_set)
# add the properties to the controller if they don't exist
# then set them to the value in calculated rules
for r, v in rules.iteritems():
if not hasattr(self, r):
self.create_property(r)
setattr(self, r, v)
# need the rules before we can instantiate dimensions
import dimensions
o_ctl = OrientController()
d_ctl = DimensionsController()
# the lazy instantiation, defining global names
s_ctl = StateController()
v_ctl = ViewController()
from views import *
|
knappador/kivy-android-nativer
|
src/controllers.py
|
Python
|
mit
| 9,416
|
[
"Galaxy"
] |
e6f570c0d1789d130d99e73d3d823187d27e36ebf36287a067da2e0728c634ce
|
import logging
import os
from tool_shed.util import common_util
log = logging.getLogger( __name__ )
# String separator
STRSEP = '__ESEP__'
def generate_repository_dependencies_key_for_repository( toolshed_base_url, repository_name, repository_owner,
changeset_revision, prior_installation_required,
only_if_compiling_contained_td ):
"""
Assumes tool shed is current tool shed since repository dependencies across tool sheds
is not yet supported.
"""
# The tool_shed portion of the key must be the value that is stored in the tool_shed_repository.tool_shed column
# of the Galaxy database for an installed repository. This value does not include the protocol, but does include
# the port if there is one.
tool_shed = common_util.remove_protocol_from_tool_shed_url( toolshed_base_url )
return '%s%s%s%s%s%s%s%s%s%s%s' % ( tool_shed,
STRSEP,
str( repository_name ),
STRSEP,
str( repository_owner ),
STRSEP,
str( changeset_revision ),
STRSEP,
str( prior_installation_required ),
STRSEP,
str( only_if_compiling_contained_td ) )
def get_components_from_key( key ):
"""
Assumes tool shed is current tool shed since repository dependencies across tool sheds is not
yet supported.
"""
items = key.split( STRSEP )
toolshed_base_url = items[ 0 ]
repository_name = items[ 1 ]
repository_owner = items[ 2 ]
changeset_revision = items[ 3 ]
if len( items ) == 5:
prior_installation_required = items[ 4 ]
return toolshed_base_url, repository_name, repository_owner, changeset_revision, prior_installation_required
elif len( items ) == 6:
prior_installation_required = items[ 4 ]
only_if_compiling_contained_td = items[ 5 ]
return toolshed_base_url, \
repository_name, \
repository_owner, \
changeset_revision, \
prior_installation_required, \
only_if_compiling_contained_td
else:
# For backward compatibility to the 12/20/12 Galaxy release we have to return the following, and callers
#$ must handle exceptions.
return toolshed_base_url, repository_name, repository_owner, changeset_revision
def print_folders( pad, folder ):
# For debugging...
pad_str = ''
for i in range( 1, pad ):
pad_str += ' '
print '%sid: %s key: %s' % ( pad_str, str( folder.id ), folder.key )
for repository_dependency in folder.repository_dependencies:
print ' %s%s' % ( pad_str, repository_dependency.listify )
for sub_folder in folder.folders:
print_folders( pad+5, sub_folder )
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/tool_shed/util/container_util.py
|
Python
|
gpl-3.0
| 3,141
|
[
"Galaxy"
] |
8c19195c89bf9e3aebbaf0d28ec501d78221541a7997ecf5f12f187229f1617a
|
# ast.py
# Copyright (C) Mako developers
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from StringIO import StringIO
from mako import exceptions, util
import operator
if util.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('arg')
else:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('id')
try:
import _ast
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException("(%s) %s (%s)" % (e.__class__.__name__, str(e), repr(code[0:50])), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if util.py3k:
# ExceptHandler is in Python 2, but this
# block only works in Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id)
for statement in node.body:
self.visit(statement)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.args.args:
if arg_id(arg) in self.local_ident_stack:
saved[arg_id(arg)] = True
else:
self.local_ident_stack[arg_id(arg)] = True
for n in node.body:
self.visit(n)
self.in_function = inf
for arg in node.args.args:
if arg_id(arg) not in saved:
del self.local_ident_stack[arg_id(arg)]
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
self._add_declared(node.id)
if node.id not in reserved and \
node.id not in self.listener.declared_identifiers and \
node.id not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitFunction(self,node, *args):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name not in self.listener.declared_identifiers and node.name not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) #, walker=walker())
class FindTuple(object):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) #, walker=walker())
class ParseFunc(object):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr):
visitor.walk(expr, self)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) #, walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write("(")
self.visit(node.left, *args)
self.buf.write(" %s " % op)
self.visit(node.right, *args)
self.buf.write(")")
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(" " + op + " ")
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator("*", node, *args)
def visitAnd(self, node, *args):
self.booleanop("and", node, *args)
def visitOr(self, node, *args):
self.booleanop("or", node, *args)
def visitBitand(self, node, *args):
self.booleanop("&", node, *args)
def visitBitor(self, node, *args):
self.booleanop("|", node, *args)
def visitBitxor(self, node, *args):
self.booleanop("^", node, *args)
def visitAdd(self, node, *args):
self.operator("+", node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write(".%s" % node.attrname)
def visitSub(self, node, *args):
self.operator("-", node, *args)
def visitNot(self, node, *args):
self.buf.write("not ")
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator("/", node, *args)
def visitFloorDiv(self, node, *args):
self.operator("//", node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
[self.visit(x) for x in node.subs]
self.buf.write("]")
def visitUnarySub(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
if node.lower is not None:
self.visit(node.lower)
self.buf.write(":")
if node.upper is not None:
self.visit(node.upper)
self.buf.write("]")
def visitDict(self, node):
self.buf.write("{")
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(": ")
self.visit(c[i+1])
if i<len(c) -2:
self.buf.write(", ")
self.buf.write("}")
def visitTuple(self, node):
self.buf.write("(")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write(")")
def visitList(self, node):
self.buf.write("[")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write("]")
def visitListComp(self, node):
self.buf.write("[")
self.visit(node.expr)
self.buf.write(" ")
for n in node.quals:
self.visit(n)
self.buf.write("]")
def visitListCompFor(self, node):
self.buf.write(" for ")
self.visit(node.assign)
self.buf.write(" in ")
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(" if ")
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write("(")
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(", ")
self.visit(a)
self.buf.write(")")
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print "Node:", str(node)
#print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)
|
codendev/rapidwsgi
|
src/mako/pyparser.py
|
Python
|
gpl-3.0
| 17,291
|
[
"VisIt"
] |
4fc462b220a69b0d65b416c447478102516caab743cf1bba67cc2d84f383e4e3
|
__all__ = ["install"]
from panda3d.core import *
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import fastRepr
import sys
import traceback
notify = directNotify.newCategory("ExceptionVarDump")
reentry = 0
def _varDump__init__(self, *args, **kArgs):
global reentry
if reentry > 0:
return
reentry += 1
# frame zero is this frame
f = 1
self._savedExcString = None
self._savedStackFrames = []
while True:
try:
frame = sys._getframe(f)
except ValueError as e:
break
else:
f += 1
self._savedStackFrames.append(frame)
self._moved__init__(*args, **kArgs)
reentry -= 1
sReentry = 0
def _varDump__print(exc):
global sReentry
global notify
if sReentry > 0:
return
sReentry += 1
if not exc._savedExcString:
s = ''
foundRun = False
for frame in reversed(exc._savedStackFrames):
filename = frame.f_code.co_filename
codename = frame.f_code.co_name
if not foundRun and codename != 'run':
# don't print stack frames before run(),
# they contain builtins and are huge
continue
foundRun = True
s += '\nlocals for %s:%s\n' % (filename, codename)
locals = frame.f_locals
for var in locals:
obj = locals[var]
rep = fastRepr(obj)
s += '::%s = %s\n' % (var, rep)
exc._savedExcString = s
exc._savedStackFrames = None
notify.info(exc._savedExcString)
sReentry -= 1
oldExcepthook = None
# store these values here so that Task.py can always reliably access them
# from its main exception handler
wantStackDumpLog = False
wantStackDumpUpload = False
variableDumpReasons = []
dumpOnExceptionInit = False
class _AttrNotFound:
pass
def _excepthookDumpVars(eType, eValue, tb):
origTb = tb
excStrs = traceback.format_exception(eType, eValue, origTb)
s = 'printing traceback in case variable repr crashes the process...\n'
for excStr in excStrs:
s += excStr
notify.info(s)
s = 'DUMPING STACK FRAME VARIABLES'
#import pdb;pdb.set_trace()
#foundRun = False
foundRun = True
while tb is not None:
frame = tb.tb_frame
code = frame.f_code
# this is a list of every string identifier used in this stack frame's code
codeNames = set(code.co_names)
# skip everything before the 'run' method, those frames have lots of
# not-useful information
if not foundRun:
if code.co_name == 'run':
foundRun = True
else:
tb = tb.tb_next
continue
s += '\n File "%s", line %s, in %s' % (
code.co_filename, frame.f_lineno, code.co_name)
stateStack = Stack()
# prime the stack with the variables we should visit from the frame's data structures
# grab all of the local, builtin and global variables that appear in the code's name list
name2obj = {}
for name, obj in frame.f_builtins.items():
if name in codeNames:
name2obj[name] = obj
for name, obj in frame.f_globals.items():
if name in codeNames:
name2obj[name] = obj
for name, obj in frame.f_locals.items():
if name in codeNames:
name2obj[name] = obj
# show them in alphabetical order
names = list(name2obj.keys())
names.sort()
# push them in reverse order so they'll be popped in the correct order
names.reverse()
traversedIds = set()
for name in names:
stateStack.push([name, name2obj[name], traversedIds])
while len(stateStack) > 0:
name, obj, traversedIds = stateStack.pop()
#notify.info('%s, %s, %s' % (name, fastRepr(obj), traversedIds))
r = fastRepr(obj, maxLen=10)
if type(r) is str:
r = r.replace('\n', '\\n')
s += '\n %s = %s' % (name, r)
# if we've already traversed through this object, don't traverse through it again
if id(obj) not in traversedIds:
attrName2obj = {}
for attrName in codeNames:
attr = getattr(obj, attrName, _AttrNotFound)
if (attr is not _AttrNotFound):
# prevent infinite recursion on method wrappers (__init__.__init__.__init__...)
try:
className = attr.__class__.__name__
except:
pass
else:
if className == 'method-wrapper':
continue
attrName2obj[attrName] = attr
if len(attrName2obj):
# show them in alphabetical order
attrNames = list(attrName2obj.keys())
attrNames.sort()
# push them in reverse order so they'll be popped in the correct order
attrNames.reverse()
ids = set(traversedIds)
ids.add(id(obj))
for attrName in attrNames:
obj = attrName2obj[attrName]
stateStack.push(['%s.%s' % (name, attrName), obj, ids])
tb = tb.tb_next
if foundRun:
s += '\n'
if wantStackDumpLog:
notify.info(s)
if wantStackDumpUpload:
excStrs = traceback.format_exception(eType, eValue, origTb)
for excStr in excStrs:
s += excStr
timeMgr = None
try:
timeMgr = base.cr.timeManager
except:
try:
timeMgr = simbase.air.timeManager
except:
pass
if timeMgr:
timeMgr.setStackDump(s)
oldExcepthook(eType, eValue, origTb)
def install(log, upload):
global oldExcepthook
global wantStackDumpLog
global wantStackDumpUpload
global dumpOnExceptionInit
wantStackDumpLog = log
wantStackDumpUpload = upload
dumpOnExceptionInit = ConfigVariableBool('variable-dump-on-exception-init', False)
if dumpOnExceptionInit:
# this mode doesn't completely work because exception objects
# thrown by the interpreter don't get created until the
# stack has been unwound and an except block has been reached
if not hasattr(Exception, '_moved__init__'):
Exception._moved__init__ = Exception.__init__
Exception.__init__ = _varDump__init__
else:
if sys.excepthook is not _excepthookDumpVars:
oldExcepthook = sys.excepthook
sys.excepthook = _excepthookDumpVars
|
grimfang/panda3d
|
direct/src/showbase/ExceptionVarDump.py
|
Python
|
bsd-3-clause
| 6,999
|
[
"VisIt"
] |
e0bfd2d450cacf4f4d5043b90f51101bb4e6594ea0e103c15db2017557b7374a
|
import pysam
import sys
def vcf_record_count(vcf):
v = pysam.VariantFile(vcf)
print(len(list(v.fetch())))
if __name__ == "__main__":
vcf_record_count(sys.argv[1])
|
uc-cdis/cwl
|
genomel/docker/variant_calling/post_freebayes/pysam_vcf_check.py
|
Python
|
apache-2.0
| 177
|
[
"pysam"
] |
765949f39593f344d1879602d6515bf9e25143b65c970a94db0482c711979712
|
import csv
import mapdamage
import pysam
import math
import logging
import time
class RescaleError(RuntimeError):
pass
def _phred_pval_to_char(pval):
""" Transforming error rate to ASCII character using the Phred scale"""
return chr(int(round(-10 * math.log10(abs(pval))) + 33))
def _phred_char_to_pval(ch):
""" Transforming ASCII character in the Phred scale to the error rate"""
return 10 ** (-(float(ord(ch)) - float(33)) / 10)
def _get_corr_prob(filepath, rescale_length_5p, rescale_length_3p):
"""Reads the damage probability correction table, and returns a dictionary with the
structure {(ref_nt, read_nt, position): probability}
"""
logger = logging.getLogger(__name__)
logger.info("Reading corrected probabilities from '%s'", filepath)
try:
with filepath.open(newline="") as handle:
reader = csv.DictReader(handle, strict=True)
corr_prob = {}
for line in reader:
position = int(line["Position"])
# Exclude probabilities for positions outside of user-specified region
if -rescale_length_3p <= position <= rescale_length_5p:
corr_prob[("C", "T", position)] = float(line["C.T"])
corr_prob[("G", "A", position)] = float(line["G.A"])
return corr_prob
except FileNotFoundError:
raise RescaleError("File does not exist; please re-run mapDamage")
except csv.Error as error:
raise RescaleError("Error while reading line %d: %s" % (reader.line_num, error))
def _corr_this_base(corr_prob, nt_seq, nt_ref, pos, length, direction="both"):
"""
The position specific damaging correction, using the input
corr_prob dictionary holding the damage correcting values
nt_seq nucleotide in the sequence
nt_ref nucleotide in the reference
pos relative position from the 5' end
length length of the sequence
direction which end to consider the rescaling
returns the correction probability for this particular set
"""
if pos == 0:
# not using 0 based indexing
raise SystemError
# position from 3' end
back_pos = pos - length - 1
if direction == "both":
if pos >= abs(back_pos):
pos = back_pos
elif direction == "reverse":
pos = back_pos
elif direction != "forward":
# this should not happen
raise RescaleError(
"Abnormal direction in the rescaling procedure (%r); please submit a bug-"
"report on github" % (direction,)
)
return corr_prob.get((nt_ref, nt_seq, pos), 0)
def _initialize_subs():
"""Initialize a substitution table, to track the expected substitution counts"""
per_qual = dict(zip(range(130), [0] * 130))
subs = {
"CT-before": per_qual.copy(),
"TC-before": per_qual.copy(),
"GA-before": per_qual.copy(),
"AG-before": per_qual.copy(),
"CT-after": per_qual.copy(),
"TC-after": per_qual.copy(),
"GA-after": per_qual.copy(),
"AG-after": per_qual.copy(),
"A": 0,
"C": 0,
"G": 0,
"T": 0,
"CT-pvals": 0.0,
"CT-pvals_before": 0.0,
"TC-pvals": 0.0,
"GA-pvals": 0.0,
"GA-pvals_before": 0.0,
"AG-pvals": 0.0,
}
return subs
def _record_subs(subs, nt_seq, nt_ref, nt_qual, nt_newqual, prob_corr):
""" record the expected substitution change, prob_corr is the excact version for nt_qual"""
if nt_seq == "T" and nt_ref == "C":
sub_type = "CT"
subs["CT-pvals"] += prob_corr
subs["CT-pvals_before"] += 1 - _phred_char_to_pval(nt_qual)
elif nt_seq == "A" and nt_ref == "G":
sub_type = "GA"
subs["GA-pvals"] += prob_corr
subs["GA-pvals_before"] += 1 - _phred_char_to_pval(nt_qual)
elif nt_seq == "C" and nt_ref == "T":
sub_type = "TC"
subs["TC-pvals"] += 1 - _phred_char_to_pval(nt_qual)
if nt_qual != nt_newqual:
raise SystemError(
"Internal error: rescaling qualities for the wrong transitions"
)
elif nt_seq == "G" and nt_ref == "A":
sub_type = "AG"
subs["AG-pvals"] += 1 - _phred_char_to_pval(nt_qual)
if nt_qual != nt_newqual:
raise SystemError(
"Internal error: rescaling qualities for the wrong transitions"
)
else:
sub_type = "NN"
if sub_type != "NN":
# record only transitions
subs[sub_type + "-before"][ord(nt_qual) - 33] += 1
subs[sub_type + "-after"][ord(nt_newqual) - 33] += 1
if nt_ref in ["A", "C", "G", "T"]:
subs[nt_ref] += 1
def _qual_summary_subs(subs):
"""Calculates summary statistics for the substition table subs"""
for i in [
"CT-before",
"TC-before",
"GA-before",
"AG-before",
"CT-after",
"TC-after",
"GA-after",
"AG-after",
]:
for lv in [0, 10, 20, 30, 40]:
for qv in subs[i]:
if qv >= lv:
key = i + "-Q" + str(lv)
if key in subs:
subs[key] += subs[i][qv]
else:
subs[key] = subs[i][qv]
def _print_subs(subs):
"""Print the substition table"""
log = logging.getLogger(__name__).info
log("Expected substition frequencies before and after rescaling:")
for sub in ("CT", "TC", "GA", "AG"):
base_count = subs[sub[0]]
if base_count:
pvals_key = sub + "-pvals"
pvals = subs[pvals_key]
pvals_before = subs.get(pvals_key + "_before", pvals)
log(
" %s>%s %.4f %.4f",
sub[0],
sub[1],
pvals_before / base_count,
pvals / base_count,
)
else:
log("\t%s\tNA\t\tNA", sub)
log("Quality metrics before and after scaling:")
for sub in ("CT", "GA"):
for qual in (0, 10, 20, 30, 40):
before = subs["%s-before-Q%i" % (sub, qual)]
after = subs["%s-after-Q%i" % (sub, qual)]
log(" %s-Q%02i% 10i% 10i", sub, qual, before, after)
def _rescale_qual_read(bam, read, ref, corr_prob, subs, direction="both"):
"""
bam a pysam bam object
read a pysam read object
ref a pysam fasta ref file
reflengths a dictionary holding the length of the references
subs a dictionary holding the corrected number of substition before and after scaling
corr_prob dictionary from _get_corr_prob
returns a read with rescaled quality score
Iterates through the read and reference, rescales the quality
according to corr_prob
"""
raw_seq = read.query
# external coordinates 5' and 3' , 0-based offset
coordinate = mapdamage.align.get_coordinates(read)
# fetch reference name, chromosome or contig names
chrom = bam.getrname(read.tid)
refseq = ref.fetch(chrom, min(coordinate), max(coordinate)).upper()
# add gaps to qualities and mask read and reference nucleotides if below desired threshold
(seq, qual, refseq) = mapdamage.align.align_with_qual(
read.cigar, raw_seq, read.qqual, -100, refseq
)
length_read = len(raw_seq)
length_align = len(seq)
# reverse complement read and reference when mapped reverse strand
if read.is_reverse:
refseq = mapdamage.seq.revcomp(refseq)
seq = mapdamage.seq.revcomp(seq)
qual = qual[::-1]
new_qual = [-100] * length_read
pos_on_read = 0
number_of_rescaled_bases = 0.0
for (_, nt_seq, nt_ref, nt_qual) in zip(range(length_align), seq, refseq, qual):
# rescale the quality according to the triplet position,
# pair of the reference and the sequence
if (nt_seq == "T" and nt_ref == "C") or (nt_seq == "A" and nt_ref == "G"):
# need to rescale this subs.
pdam = 1 - _corr_this_base(
corr_prob,
nt_seq,
nt_ref,
pos_on_read + 1,
length_read,
direction=direction,
)
pseq = 1 - _phred_char_to_pval(nt_qual)
newp = pdam * pseq # this could be numerically unstable
newq = _phred_pval_to_char(1 - newp)
number_of_rescaled_bases += 1 - pdam
else:
# don't rescale, other bases
newp = 1 - _phred_char_to_pval(nt_qual)
newq = nt_qual
if pos_on_read < length_read:
new_qual[pos_on_read] = newq
_record_subs(subs, nt_seq, nt_ref, nt_qual, new_qual[pos_on_read], newp)
if nt_seq != "-":
pos_on_read += 1
# done with the aligned portion of the read
else:
logger = logging.getLogger(__name__)
logger.warning(
"The aligment of the read is longer than the actual read %s",
read.qname,
)
break
new_qual = "".join(new_qual)
if read.is_reverse:
new_qual = new_qual[::-1]
if read.cigar[0][0] == 4:
# check for soft clipping at forward end
new_qual = read.qual[0 : read.cigar[0][1]] + new_qual
if read.cigar[-1][0] == 4:
# the same backwards
new_qual = new_qual + read.qual[-read.cigar[-1][1] :]
read.qual = new_qual
# truncate this to 5 digits
number_of_rescaled_bases = float("%.5f" % number_of_rescaled_bases)
if read.has_tag("MR"):
raise SystemExit("Read: %s already has a MR tag, can't rescale" % read)
read.set_tag("MR", number_of_rescaled_bases, "f")
return read
def _rescale_qual_core(ref, options):
"""Iterates through BAM file, writing new BAM file with rescaled qualities."""
corr_prob = _get_corr_prob(
filepath=options.folder / "Stats_out_MCMC_correct_prob.csv",
rescale_length_5p=options.rescale_length_5p,
rescale_length_3p=options.rescale_length_3p,
)
n_pairs = 0
n_improper_pairs = 0
n_reads_without_quals = 0
subs = _initialize_subs()
with pysam.AlignmentFile(options.filename) as bam_in:
with pysam.AlignmentFile(options.rescale_out, "wb", template=bam_in) as bam_out:
for hit in bam_in:
if hit.is_unmapped:
pass
elif not hit.qual:
n_reads_without_quals += 1
elif hit.is_paired:
n_pairs += 1
# 5p --------------> 3p
# 3p <-------------- 5p
# pair 1 (inwards)
# 5p ---->
# <---- 5p
# A B
# pair 2 (outwards); this is not supported
# ----> 3p
# 3p <----
# A B
# Correct outwards pairs from the 3p and inwards pairs with the 5p end
if (
(not hit.is_reverse)
and hit.mate_is_reverse
and (hit.pnext > hit.pos)
and hit.tid == hit.mrnm
):
# the inwards case mate A
hit = _rescale_qual_read(
bam_in, hit, ref, corr_prob, subs, direction="forward"
)
elif (
hit.is_reverse
and (not hit.mate_is_reverse)
and (hit.pnext < hit.pos)
and hit.tid == hit.mrnm
):
# the inwards case mate B
hit = _rescale_qual_read(
bam_in, hit, ref, corr_prob, subs, direction="forward"
)
else:
n_improper_pairs += 1
# cannot do much with conflicting pairing information
else:
hit = _rescale_qual_read(bam_in, hit, ref, corr_prob, subs)
bam_out.write(hit)
logger = logging.getLogger(__name__)
if n_pairs:
logger.warning(
"Processed %i paired reads, assumed to be non-overlapping, facing inwards "
"and correctly paired; %i of these were excluded as improperly paired.",
n_pairs,
n_improper_pairs,
)
if n_reads_without_quals:
logger.warning("Skipped %i reads without quality scores", n_reads_without_quals)
if subs["TC-before"] != subs["TC-after"] or subs["AG-before"] != subs["AG-after"]:
raise RescaleError(
"Qualities for T.C and A.G transitions should not change in the rescaling. "
"Please file a bug on github."
)
_qual_summary_subs(subs)
_print_subs(subs)
def rescale_qual(ref, options):
logger = logging.getLogger(__name__)
logger.info("Rescaling BAM: '%s' -> '%s'", options.filename, options.rescale_out)
start_time = time.time()
try:
_rescale_qual_core(ref, options)
except RescaleError as error:
logger.error("%s", error)
return 1
except Exception as error:
logger.error("Unhandled exception: %s", error)
return 1
logger.debug("Rescaling completed in %f seconds", time.time() - start_time)
return 0
|
ginolhac/mapDamage
|
mapdamage/rescale.py
|
Python
|
mit
| 13,609
|
[
"pysam"
] |
8cb91050d17dfc608e3f4b7e4f927b701e62dedd5297080f349b6de87c481c09
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import unittest
from MooseDocs.tree import html
class TestHTML(unittest.TestCase):
"""
Tests for html tree structure.
"""
def testTag(self):
tag = html.Tag(None, 'section')
self.assertEqual(tag.write(), '<section></section>')
def testString(self):
tag = html.String(content='section')
self.assertEqual(tag.write(), 'section')
tag = html.String(content='<section>', escape=True)
self.assertEqual(tag.write(), '<section>')
def testTagString(self):
tag = html.Tag(None, 'h1')
html.String(content='foo', parent=tag)
self.assertEqual(tag.write(), '<h1>foo</h1>')
def testBool(self):
tag = html.Tag(None, 'video', autoplay=True)
self.assertEqual(tag.write(), '<video autoplay></video>')
tag['controls'] = False
self.assertEqual(tag.write(), '<video autoplay></video>')
tag['controls'] = True
self.assertEqual(tag.write(), '<video autoplay controls></video>')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
harterj/moose
|
python/MooseDocs/test/tree/test_html.py
|
Python
|
lgpl-2.1
| 1,407
|
[
"MOOSE"
] |
33829c4e560d0da5d3fc7e7bfac2e310c860a53e7c3e730985bab24d21a29789
|
# Copyright 2001-2004 Brad Chapman.
# Revisions copyright 2009-2013 by Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""General mechanisms to access applications in Biopython.
This module is not intended for direct use. It provides the basic objects which
are subclassed by our command line wrappers, such as:
- Bio.Align.Applications
- Bio.Blast.Applications
- Bio.Emboss.Applications
- Bio.Sequencing.Applications
These modules provide wrapper classes for command line tools to help you
construct command line strings by setting the values of each parameter.
The finished command line strings are then normally invoked via the built-in
Python module subprocess.
"""
from __future__ import print_function
from Bio._py3k import basestring
import os
import platform
import sys
import subprocess
import re
from subprocess import CalledProcessError as _ProcessCalledError
from Bio import File
__docformat__ = "restructuredtext en"
# Use this regular expression to test the property names are going to
# be valid as Python properties or arguments
_re_prop_name = re.compile(r"^[a-zA-Z][a-zA-Z0-9_]*$")
assert _re_prop_name.match("t")
assert _re_prop_name.match("test")
assert _re_prop_name.match("_test") is None # we don't want private names
assert _re_prop_name.match("-test") is None
assert _re_prop_name.match("any-hyphen") is None
assert _re_prop_name.match("underscore_ok")
assert _re_prop_name.match("test_name")
assert _re_prop_name.match("test2")
# These are reserved names in Python itself,
_reserved_names = ["and", "del", "from", "not", "while", "as", "elif",
"global", "or", "with", "assert", "else", "if", "pass",
"yield", "break", "except", "import", "print", "class",
"exec", "in", "raise", "continue", "finally", "is",
"return", "def", "for", "lambda", "try"]
# These are reserved names due to the way the wrappers work
_local_reserved_names = ["set_parameter"]
class ApplicationError(_ProcessCalledError):
"""Raised when an application returns a non-zero exit status.
The exit status will be stored in the returncode attribute, similarly
the command line string used in the cmd attribute, and (if captured)
stdout and stderr as strings.
This exception is a subclass of subprocess.CalledProcessError.
>>> err = ApplicationError(-11, "helloworld", "", "Some error text")
>>> err.returncode, err.cmd, err.stdout, err.stderr
(-11, 'helloworld', '', 'Some error text')
>>> print(err)
Non-zero return code -11 from 'helloworld', message 'Some error text'
"""
def __init__(self, returncode, cmd, stdout="", stderr=""):
self.returncode = returncode
self.cmd = cmd
self.stdout = stdout
self.stderr = stderr
def __str__(self):
# get first line of any stderr message
try:
msg = self.stderr.lstrip().split("\n", 1)[0].rstrip()
except:
msg = ""
if msg:
return "Non-zero return code %d from %r, message %r" \
% (self.returncode, self.cmd, msg)
else:
return "Non-zero return code %d from %r" \
% (self.returncode, self.cmd)
def __repr__(self):
return "ApplicationError(%i, %s, %s, %s)" \
% (self.returncode, self.cmd, self.stdout, self.stderr)
class AbstractCommandline(object):
"""Generic interface for constructing command line strings.
This class shouldn't be called directly; it should be subclassed to
provide an implementation for a specific application.
For a usage example we'll show one of the EMBOSS wrappers. You can set
options when creating the wrapper object using keyword arguments - or
later using their corresponding properties:
>>> from Bio.Emboss.Applications import WaterCommandline
>>> cline = WaterCommandline(gapopen=10, gapextend=0.5)
>>> cline
WaterCommandline(cmd='water', gapopen=10, gapextend=0.5)
You can instead manipulate the parameters via their properties, e.g.
>>> cline.gapopen
10
>>> cline.gapopen = 20
>>> cline
WaterCommandline(cmd='water', gapopen=20, gapextend=0.5)
You can clear a parameter you have already added by 'deleting' the
corresponding property:
>>> del cline.gapopen
>>> cline.gapopen
>>> cline
WaterCommandline(cmd='water', gapextend=0.5)
Once you have set the parameters you need, you can turn the object into
a string (e.g. to log the command):
>>> str(cline)
Traceback (most recent call last):
...
ValueError: You must either set outfile (output filename), or enable filter or stdout (output to stdout).
In this case the wrapper knows certain arguments are required to construct
a valid command line for the tool. For a complete example,
>>> from Bio.Emboss.Applications import WaterCommandline
>>> water_cmd = WaterCommandline(gapopen=10, gapextend=0.5)
>>> water_cmd.asequence = "asis:ACCCGGGCGCGGT"
>>> water_cmd.bsequence = "asis:ACCCGAGCGCGGT"
>>> water_cmd.outfile = "temp_water.txt"
>>> print(water_cmd)
water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5
>>> water_cmd
WaterCommandline(cmd='water', outfile='temp_water.txt', asequence='asis:ACCCGGGCGCGGT', bsequence='asis:ACCCGAGCGCGGT', gapopen=10, gapextend=0.5)
You would typically run the command line via a standard Python operating
system call using the subprocess module for full control. For the simple
case where you just want to run the command and get the output:
stdout, stderr = water_cmd()
Note that by default we assume the underlying tool is installed on the
system $PATH environment variable. This is normal under Linux/Unix, but
may need to be done manually under Windows. Alternatively, you can specify
the full path to the binary as the first argument (cmd):
>>> from Bio.Emboss.Applications import WaterCommandline
>>> water_cmd = WaterCommandline("C:\Program Files\EMBOSS\water.exe",
... gapopen=10, gapextend=0.5,
... asequence="asis:ACCCGGGCGCGGT",
... bsequence="asis:ACCCGAGCGCGGT",
... outfile="temp_water.txt")
>>> print(water_cmd)
"C:\Program Files\EMBOSS\water.exe" -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5
Notice that since the path name includes a space it has automatically
been quoted.
"""
# TODO - Replace the above example since EMBOSS doesn't work properly
# if installed into a folder with a space like "C:\Program Files\EMBOSS"
#
# Note the call example above is not a doctest as we can't handle EMBOSS
# (or any other tool) being missing in the unit tests.
parameters = None # will be a list defined in subclasses
def __init__(self, cmd, **kwargs):
"""Create a new instance of a command line wrapper object."""
# Init method - should be subclassed!
#
# The subclass methods should look like this:
#
# def __init__(self, cmd="muscle", **kwargs):
# self.parameters = [...]
# AbstractCommandline.__init__(self, cmd, **kwargs)
#
# i.e. There should have an optional argument "cmd" to set the location
# of the executable (with a sensible default which should work if the
# command is on the path on Unix), and keyword arguments. It should
# then define a list of parameters, all objects derived from the base
# class _AbstractParameter.
#
# The keyword arguments should be any valid parameter name, and will
# be used to set the associated parameter.
self.program_name = cmd
try:
parameters = self.parameters
except AttributeError:
raise AttributeError("Subclass should have defined self.parameters")
# Create properties for each parameter at run time
aliases = set()
for p in parameters:
if not p.names:
assert isinstance(p, _StaticArgument), p
continue
for name in p.names:
if name in aliases:
raise ValueError("Parameter alias %s multiply defined"
% name)
aliases.add(name)
name = p.names[-1]
if _re_prop_name.match(name) is None:
raise ValueError("Final parameter name %s cannot be used as "
"an argument or property name in python"
% repr(name))
if name in _reserved_names:
raise ValueError("Final parameter name %s cannot be used as "
"an argument or property name because it is "
"a reserved word in python" % repr(name))
if name in _local_reserved_names:
raise ValueError("Final parameter name %s cannot be used as "
"an argument or property name due to the "
"way the AbstractCommandline class works"
% repr(name))
# Beware of binding-versus-assignment confusion issues
def getter(name):
return lambda x: x._get_parameter(name)
def setter(name):
return lambda x, value: x.set_parameter(name, value)
def deleter(name):
return lambda x: x._clear_parameter(name)
doc = p.description
if isinstance(p, _Switch):
doc += "\n\nThis property controls the addition of the %s " \
"switch, treat this property as a boolean." % p.names[0]
else:
doc += "\n\nThis controls the addition of the %s parameter " \
"and its associated value. Set this property to the " \
"argument value required." % p.names[0]
prop = property(getter(name), setter(name), deleter(name), doc)
setattr(self.__class__, name, prop) # magic!
for key, value in kwargs.items():
self.set_parameter(key, value)
def _validate(self):
"""Make sure the required parameters have been set (PRIVATE).
No return value - it either works or raises a ValueError.
This is a separate method (called from __str__) so that subclasses may
override it.
"""
for p in self.parameters:
# Check for missing required parameters:
if p.is_required and not(p.is_set):
raise ValueError("Parameter %s is not set."
% p.names[-1])
# Also repeat the parameter validation here, just in case?
def __str__(self):
"""Make the commandline string with the currently set options.
e.g.
>>> from Bio.Emboss.Applications import WaterCommandline
>>> cline = WaterCommandline(gapopen=10, gapextend=0.5)
>>> cline.asequence = "asis:ACCCGGGCGCGGT"
>>> cline.bsequence = "asis:ACCCGAGCGCGGT"
>>> cline.outfile = "temp_water.txt"
>>> print(cline)
water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5
>>> str(cline)
'water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5'
"""
self._validate()
commandline = "%s " % _escape_filename(self.program_name)
for parameter in self.parameters:
if parameter.is_set:
# This will include a trailing space:
commandline += str(parameter)
return commandline.strip() # remove trailing space
def __repr__(self):
"""Return a representation of the command line object for debugging.
e.g.
>>> from Bio.Emboss.Applications import WaterCommandline
>>> cline = WaterCommandline(gapopen=10, gapextend=0.5)
>>> cline.asequence = "asis:ACCCGGGCGCGGT"
>>> cline.bsequence = "asis:ACCCGAGCGCGGT"
>>> cline.outfile = "temp_water.txt"
>>> print(cline)
water -outfile=temp_water.txt -asequence=asis:ACCCGGGCGCGGT -bsequence=asis:ACCCGAGCGCGGT -gapopen=10 -gapextend=0.5
>>> cline
WaterCommandline(cmd='water', outfile='temp_water.txt', asequence='asis:ACCCGGGCGCGGT', bsequence='asis:ACCCGAGCGCGGT', gapopen=10, gapextend=0.5)
"""
answer = "%s(cmd=%s" % (self.__class__.__name__, repr(self.program_name))
for parameter in self.parameters:
if parameter.is_set:
if isinstance(parameter, _Switch):
answer += ", %s=True" % parameter.names[-1]
else:
answer += ", %s=%s" \
% (parameter.names[-1], repr(parameter.value))
answer += ")"
return answer
def _get_parameter(self, name):
"""Get a commandline option value."""
for parameter in self.parameters:
if name in parameter.names:
if isinstance(parameter, _Switch):
return parameter.is_set
else:
return parameter.value
raise ValueError("Option name %s was not found." % name)
def _clear_parameter(self, name):
"""Reset or clear a commandline option value."""
cleared_option = False
for parameter in self.parameters:
if name in parameter.names:
parameter.value = None
parameter.is_set = False
cleared_option = True
if not cleared_option:
raise ValueError("Option name %s was not found." % name)
def set_parameter(self, name, value=None):
"""Set a commandline option for a program (OBSOLETE).
Every parameter is available via a property and as a named
keyword when creating the instance. Using either of these is
preferred to this legacy set_parameter method which is now
OBSOLETE, and likely to be DEPRECATED and later REMOVED in
future releases.
"""
set_option = False
for parameter in self.parameters:
if name in parameter.names:
if isinstance(parameter, _Switch):
if value is None:
import warnings
warnings.warn("For a switch type argument like %s, "
"we expect a boolean. None is treated "
"as FALSE!" % parameter.names[-1])
parameter.is_set = bool(value)
set_option = True
else:
if value is not None:
self._check_value(value, name, parameter.checker_function)
parameter.value = value
parameter.is_set = True
set_option = True
if not set_option:
raise ValueError("Option name %s was not found." % name)
def _check_value(self, value, name, check_function):
"""Check whether the given value is valid.
No return value - it either works or raises a ValueError.
This uses the passed function 'check_function', which can either
return a [0, 1] (bad, good) value or raise an error. Either way
this function will raise an error if the value is not valid, or
finish silently otherwise.
"""
if check_function is not None:
is_good = check_function(value) # May raise an exception
assert is_good in [0, 1, True, False]
if not is_good:
raise ValueError("Invalid parameter value %r for parameter %s"
% (value, name))
def __setattr__(self, name, value):
"""Set attribute name to value (PRIVATE).
This code implements a workaround for a user interface issue.
Without this __setattr__ attribute-based assignment of parameters
will silently accept invalid parameters, leading to known instances
of the user assuming that parameters for the application are set,
when they are not.
>>> from Bio.Emboss.Applications import WaterCommandline
>>> cline = WaterCommandline(gapopen=10, gapextend=0.5, stdout=True)
>>> cline.asequence = "a.fasta"
>>> cline.bsequence = "b.fasta"
>>> cline.csequence = "c.fasta"
Traceback (most recent call last):
...
ValueError: Option name csequence was not found.
>>> print(cline)
water -stdout -asequence=a.fasta -bsequence=b.fasta -gapopen=10 -gapextend=0.5
This workaround uses a whitelist of object attributes, and sets the
object attribute list as normal, for these. Other attributes are
assumed to be parameters, and passed to the self.set_parameter method
for validation and assignment.
"""
if name in ['parameters', 'program_name']: # Allowed attributes
self.__dict__[name] = value
else:
self.set_parameter(name, value) # treat as a parameter
def __call__(self, stdin=None, stdout=True, stderr=True,
cwd=None, env=None):
"""Executes the command, waits for it to finish, and returns output.
Runs the command line tool and waits for it to finish. If it returns
a non-zero error level, an exception is raised. Otherwise two strings
are returned containing stdout and stderr.
The optional stdin argument should be a string of data which will be
passed to the tool as standard input.
The optional stdout and stderr argument may be filenames (string),
but otherwise are treated as a booleans, and control if the output
should be captured as strings (True, default), or ignored by sending
it to /dev/null to avoid wasting memory (False). If sent to a file
or ignored, then empty string(s) are returned.
The optional cwd argument is a string giving the working directory
to run the command from. See Python's subprocess module documentation
for more details.
The optional env argument is a dictionary setting the environment
variables to be used in the new process. By default the current
process' environment variables are used. See Python's subprocess
module documentation for more details.
Default example usage::
from Bio.Emboss.Applications import WaterCommandline
water_cmd = WaterCommandline(gapopen=10, gapextend=0.5,
stdout=True, auto=True,
asequence="a.fasta", bsequence="b.fasta")
print("About to run: %s" % water_cmd)
std_output, err_output = water_cmd()
This functionality is similar to subprocess.check_output() added in
Python 2.7. In general if you require more control over running the
command, use subprocess directly.
As of Biopython 1.56, when the program called returns a non-zero error
level, a custom ApplicationError exception is raised. This includes
any stdout and stderr strings captured as attributes of the exception
object, since they may be useful for diagnosing what went wrong.
"""
if not stdout:
stdout_arg = open(os.devnull, "w")
elif isinstance(stdout, basestring):
stdout_arg = open(stdout, "w")
else:
stdout_arg = subprocess.PIPE
if not stderr:
stderr_arg = open(os.devnull, "w")
elif isinstance(stderr, basestring):
if stdout == stderr:
stderr_arg = stdout_arg # Write both to the same file
else:
stderr_arg = open(stderr, "w")
else:
stderr_arg = subprocess.PIPE
# We may not need to supply any piped input, but we setup the
# standard input pipe anyway as a work around for a python
# bug if this is called from a Windows GUI program. For
# details, see http://bugs.python.org/issue1124861
#
# Using universal newlines is important on Python 3, this
# gives unicode handles rather than bytes handles.
# Windows 7, 8 and 8.1 want shell = True
# TODO: Test under Windows 10 and revisit platform detection.
if sys.platform != "win32":
use_shell = True
else:
win_ver = platform.win32_ver()[0]
if win_ver in ["7", "8", "post2012Server"]:
use_shell = True
else:
use_shell = False
child_process = subprocess.Popen(str(self), stdin=subprocess.PIPE,
stdout=stdout_arg, stderr=stderr_arg,
universal_newlines=True,
cwd=cwd, env=env,
shell=use_shell)
# Use .communicate as can get deadlocks with .wait(), see Bug 2804
stdout_str, stderr_str = child_process.communicate(stdin)
if not stdout:
assert not stdout_str, stdout_str
if not stderr:
assert not stderr_str, stderr_str
return_code = child_process.returncode
# Particularly important to close handles on Jython and PyPy
# (where garbage collection is less predictable) and on Windows
# (where cannot delete files with an open handle):
if not stdout or isinstance(stdout, basestring):
# We opened /dev/null or a file
stdout_arg.close()
if not stderr or (isinstance(stderr, basestring) and stdout != stderr):
# We opened /dev/null or a file
stderr_arg.close()
if return_code:
raise ApplicationError(return_code, str(self),
stdout_str, stderr_str)
return stdout_str, stderr_str
class _AbstractParameter(object):
"""A class to hold information about a parameter for a commandline.
Do not use this directly, instead use one of the subclasses.
"""
def __init__(self):
raise NotImplementedError
def __str__(self):
raise NotImplementedError
class _Option(_AbstractParameter):
"""Represent an option that can be set for a program.
This holds UNIXish options like --append=yes and -a yes,
where a value (here "yes") is generally expected.
For UNIXish options like -kimura in clustalw which don't
take a value, use the _Switch object instead.
Attributes:
o names -- a list of string names (typically two entries) by which
the parameter can be set via the legacy set_parameter method
(eg ["-a", "--append", "append"]). The first name in list is used
when building the command line. The last name in the list is a
"human readable" name describing the option in one word. This
must be a valid Python identifier as it is used as the property
name and as a keyword argument, and should therefore follow PEP8
naming.
o description -- a description of the option. This is used as
the property docstring.
o filename -- True if this argument is a filename and should be
automatically quoted if it contains spaces.
o checker_function -- a reference to a function that will determine
if a given value is valid for this parameter. This function can either
raise an error when given a bad value, or return a [0, 1] decision on
whether the value is correct.
o equate -- should an equals sign be inserted if a value is used?
o is_required -- a flag to indicate if the parameter must be set for
the program to be run.
o is_set -- if the parameter has been set
o value -- the value of a parameter
"""
def __init__(self, names, description, filename=False, checker_function=None,
is_required=False, equate=True):
self.names = names
assert isinstance(description, basestring), \
"%r for %s" % (description, names[-1])
self.is_filename = filename
self.checker_function = checker_function
self.description = description
self.equate = equate
self.is_required = is_required
self.is_set = False
self.value = None
def __str__(self):
"""Return the value of this option for the commandline.
Includes a trailing space.
"""
# Note: Before equate was handled explicitly, the old
# code would do either "--name " or "--name=value ",
# or " -name " or " -name value ". This choice is now
# now made explicitly when setting up the option.
if self.value is None:
return "%s " % self.names[0]
if self.is_filename:
v = _escape_filename(self.value)
else:
v = str(self.value)
if self.equate:
return "%s=%s " % (self.names[0], v)
else:
return "%s %s " % (self.names[0], v)
class _Switch(_AbstractParameter):
"""Represent an optional argument switch for a program.
This holds UNIXish options like -kimura in clustalw which don't
take a value, they are either included in the command string
or omitted.
o names -- a list of string names (typically two entries) by which
the parameter can be set via the legacy set_parameter method
(eg ["-a", "--append", "append"]). The first name in list is used
when building the command line. The last name in the list is a
"human readable" name describing the option in one word. This
must be a valid Python identifer as it is used as the property
name and as a keyword argument, and should therefore follow PEP8
naming.
o description -- a description of the option. This is used as
the property docstring.
o is_set -- if the parameter has been set
NOTE - There is no value attribute, see is_set instead,
"""
def __init__(self, names, description):
self.names = names
self.description = description
self.is_set = False
self.is_required = False
def __str__(self):
"""Return the value of this option for the commandline.
Includes a trailing space.
"""
assert not hasattr(self, "value")
if self.is_set:
return "%s " % self.names[0]
else:
return ""
class _Argument(_AbstractParameter):
"""Represent an argument on a commandline.
The names argument should be a list containing one string.
This must be a valid Python identifer as it is used as the
property name and as a keyword argument, and should therefore
follow PEP8 naming.
"""
def __init__(self, names, description, filename=False,
checker_function=None, is_required=False):
# if len(names) != 1:
# raise ValueError("The names argument to _Argument should be a "
# "single entry list with a PEP8 property name.")
self.names = names
assert isinstance(description, basestring), \
"%r for %s" % (description, names[-1])
self.is_filename = filename
self.checker_function = checker_function
self.description = description
self.is_required = is_required
self.is_set = False
self.value = None
def __str__(self):
if self.value is None:
return " "
elif self.is_filename:
return "%s " % _escape_filename(self.value)
else:
return "%s " % self.value
class _ArgumentList(_Argument):
"""Represent a variable list of arguments on a command line, e.g. multiple filenames."""
# TODO - Option to require at least one value? e.g. min/max count?
def __str__(self):
assert isinstance(self.value, list), \
"Arguments should be a list"
assert self.value, "Requires at least one filename"
# A trailing space is required so that parameters following the last filename
# do not appear merged.
# e.g.: samtools cat in1.bam in2.bam-o out.sam [without trailing space][Incorrect]
# samtools cat in1.bam in2.bam -o out.sam [with trailing space][Correct]
if self.is_filename:
return " ".join(_escape_filename(v) for v in self.value) + " "
else:
return " ".join(self.value) + " "
class _StaticArgument(_AbstractParameter):
"""Represent a static (read only) argument on a commandline.
This is not intended to be exposed as a named argument or
property of a command line wrapper object.
"""
def __init__(self, value):
self.names = []
self.is_required = False
self.is_set = True
self.value = value
def __str__(self):
return "%s " % self.value
def _escape_filename(filename):
"""Escape filenames with spaces by adding quotes (PRIVATE).
Note this will not add quotes if they are already included:
>>> print((_escape_filename('example with spaces')))
"example with spaces"
>>> print((_escape_filename('"example with spaces"')))
"example with spaces"
"""
# Is adding the following helpful
# if os.path.isfile(filename):
# # On Windows, if the file exists, we can ask for
# # its alternative short name (DOS style 8.3 format)
# # which has no spaces in it. Note that this name
# # is not portable between machines, or even folder!
# try:
# import win32api
# short = win32api.GetShortPathName(filename)
# assert os.path.isfile(short)
# return short
# except ImportError:
# pass
if " " not in filename:
return filename
# We'll just quote it - works on Windows, Mac OS X etc
if filename.startswith('"') and filename.endswith('"'):
# Its already quoted
return filename
else:
return '"%s"' % filename
def _test():
"""Run the Bio.Application module's doctests."""
import doctest
doctest.testmod(verbose=1)
if __name__ == "__main__":
# Run the doctests
_test()
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/Application/__init__.py
|
Python
|
apache-2.0
| 30,870
|
[
"BLAST",
"Biopython"
] |
6e4156ec8652798fabe19158003743cddbe34c10eb96ca64d53ddf8d6a4e2db6
|
import cv2
import numpy as np
import sys
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype="float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis=1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype="float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def check_include(centre_list, x_centre, y_centre):
for point in centre_list:
x_difference = point[0] - x_centre
y_difference = point[1] - y_centre
if abs(x_difference) < 10 and abs(y_difference) < 10:
return False
return True
def find_centre(cnts):
# x_axis is a list, store all the x_axis data of one contour
# y_axis is a list, store all the y_axis data of same contour
# cnts[0] is a list of point, which is one rectangle
centre_list = []
for cnt in cnts:
x_axis = []
y_axis = []
for point in cnt:
x_axis.append(point[0][0])
y_axis.append(point[0][1])
# print cnts[0][0][0][0]
x_axis = sorted(x_axis)
y_axis = sorted(y_axis)
x_centre = int((x_axis[0] + x_axis[-1]) / 2)
y_centre = int((y_axis[0] + y_axis[-1]) / 2)
# print "The smallest x coordinate is",x_axis[0]
# print "The smallest y coordinate is",y_axis[0]
# print "The biggest x coordinate is",x_axis[-1]
# print "The biggest y coordinate is",y_axis[-1]
# print "The centre of this rectangle is (%d,%d)" %(x_centre, y_centre)
if (check_include(centre_list, x_centre, y_centre)):
centre_list.append((x_centre, y_centre))
# print "The centre of this rectangle is (%d,%d)" %(x_centre, y_centre)
return centre_list
def process_centre_list(centre_list):
# this function loop want to put same rows of answer area into same list.
# And use a list to hold all of rows. So it is a 2D list.
# the centre_list is in the order of y-axis from small to large.
# In this particular case, every row has three question and each question has 4 rectangles.
# In each line, the y-axis is almost same, so we can calculate the difference between different
# y-axis to determine whether the two rectangle is in same line.
# current_total_delta is total difference of y-axis in one row.
# current_total_delta_copy tries to store the old data in for loop.
# current_average_number is number of rectangles we calculate
current_total_delta = 0
current_total_delta_copy = 0
current_average_number = 1
# current_average_delta = current_total_delta/current_average_number
# current_average_delta_copy tries to store the old data.
current_average_delta = 0
current_average_delta_copy = 0
# row_list is a list of column_list
# column_list is a list of point of every line of answer area
row_list = []
column_list = []
for i in range(len(centre_list) - 1):
delta_y1 = (centre_list[i + 1][1] - centre_list[i][1])
# print delta_y1
current_total_delta_copy = current_total_delta
current_total_delta += delta_y1
current_average_delta = 1.0 * current_total_delta / current_average_number
current_average_number += 1
if current_average_delta > current_average_delta_copy * 3 and current_average_delta_copy != 0:
# print "this is average number ",current_average_number
# print "This is current_average_delta " , current_average_delta
# print "This is current_average_delta_copy " , current_average_delta_copy
current_total_delta = current_total_delta_copy # restore total delta from copy
column_list.append(centre_list[i])
row_list.append(column_list)
column_list = []
current_total_delta = 0
current_total_delta_copy = 0
current_average_number = 1
continue
column_list.append(centre_list[i])
current_average_delta_copy = current_average_delta
return row_list
# This function want to find the answer student choose.
# centre_list: list. Hold all the coordinate of centre of rectangle.
# thresh1: image object. The image after threshold.
def find_answer(centre_list, thresh1):
# the point is the centre of rectangle.
# We choose a 80*80 square, to detect whether there is black pixel in this square.
for point in centre_list:
px = 0
x_start, x_end = point[0] - 40, point[0] + 40
y_start, y_end = point[1] - 40, point[1] + 40
for x in range(x_start, x_end):
for y in range(y_start, y_end):
px += thresh1[y, x]
# print "this is pixel " , px
# 1532000 is a threshold. The value under the 1532000 means student has handwriting
# in this region.
if px < 1532000:
cv2.circle(thresh1, (x - 40, y - 40), 40, (0, 0, 0))
# this function want to find the answer rectangle which are not found by findContours
# function
def find_missing_rectangle(centre_list, centre_list_col, x_uncertainty, y_uncertainty):
row_list = []
total_list = []
# print centre_list_col
base = centre_list_col[0][1] # use column point as the base
y_max = base + y_uncertainty # add base and y_uncertainty
for i in range(len(centre_list_col)):
if centre_list_col[i][1] < y_max:
row_list.append(centre_list_col[i])
else:
# in this case, we end up one line, and change to another line
# so I set a new base.
y_max = centre_list_col[i][1] + y_uncertainty
total_list.append(row_list)
row_list = [] # renew the row_list
# add the first element of next line into new row_list
row_list.append(centre_list_col[i])
# add final row list into total list.
total_list.append(row_list)
# ============================================================
# for test
# ============================================================
# sum = 0
# for i in range(len(total_list)):
# # pass
# print sorted(total_list[i])
# print "length is ", len(total_list[i])
# sum += len(total_list[i])
# print("\n")
# # print "\n"
# # print(total_list)
# print sum
# ============================================================
# end test
# ============================================================
# to get the max_length of a row of question.
# and then get a base_list of row_list
max_length = len(total_list[0])
base_list = []
for row_list in total_list:
if len(row_list) > max_length:
max_length = len(row_list)
base_list = row_list
# print "length of half rectangle is ", x_uncertainty
total_list_copy = []
# sort base list
base_list = sorted(base_list)
for row_list in total_list:
# print "this is row_list" , row_list
# print '\n'
row_list = sorted(row_list)
if len(row_list) == max_length:
total_list_copy.append(row_list)
continue
for i in range(max_length):
try:
base = base_list[i][0] - x_uncertainty
if row_list[i][0] > base:
x_axis = base_list[i][0]
y_axis = row_list[0][1]
row_list.insert(i, (x_axis, y_axis))
centre_list.append((x_axis, y_axis))
# print "length of row list is ", len(row_list)
if len(row_list) == max_length:
total_list_copy.append(row_list)
break
except:
x_axis = base_list[i][0]
y_axis = row_list[0][1]
row_list.insert(i, (x_axis, y_axis))
centre_list.append((x_axis, y_axis))
if len(row_list) == max_length:
total_list_copy.append(row_list)
break
return total_list_copy
# answer_list is a list. It contains x elements, x is rows of the answer sheet. It is also list
# every row_list contains also list which are centre points of rectangle.
def find_answer2(answer_list,number_of_choice,thresh1,pixel=40, number_of_question=40):
column = len(answer_list[0])/number_of_choice
assert(column == 3)
answer = []
number_of_question = 0
number_of_answer = 0
for i in range(column):
# print number_of_question
if number_of_answer==40:
break
for j in range(len(answer_list)):
boundary = 1532000
number_of_answer = 0
while(True):
# print boundary
# print number_of_answer
# print "i j k" , i ,j
for k in range(i*4,i*4+number_of_choice):
point = answer_list[j][k]
px = 0
x_start, x_end = point[0] - pixel, point[0] + pixel
y_start, y_end = point[1] - pixel, point[1] + pixel
for x in range(x_start, x_end):
for y in range(y_start, y_end):
px += thresh1[y, x]
# print "this is pixel " , px
# 1532000 is a threshold. The value under the 1532000 means student has handwriting
# in this region.
# print px
if px < boundary:
cv2.circle(thresh1, (x - pixel, y - pixel), 40, (0, 0, 0))
number_of_answer += 1
choice = str(k)
if number_of_answer == 1:
number_of_question += 1
answer.append(choice)
break
if number_of_question==40:
break
if number_of_answer == 0:
boundary = boundary * (1.01)
number_of_answer = 0
else:
boundary = boundary / 1.01
number_of_answer = 0
if number_of_answer==40:
break
return answer
# answers is a string contains all of choice of student
# number_of_choice is a integer contains the choice of this paper
# This function want to change the number in answers into ABCD letter
def change_num_into_choice(answers, num_of_choice):
# this is return value
new_answer = ""
for answer in answers:
# the answer is the column number of the answer sheet.
# so first mod the number of choice to get 0~3
answer = str(int(answer) % num_of_choice )
answer = ord(answer) # get the ascii number of answer
answer += 17 # 17 is difference from 0 to A, 1 to B, 2 to C, 3 to D
answer = chr(answer) # to change the ascii number into char.
new_answer += answer
return new_answer
def grade_answer(correct_answer,answer):
temp = ""
result = []
for letter in correct_answer:
if letter.isalpha()==True :
temp += letter
correct_answer = temp
print(len(correct_answer))
print(len(answer))
if len(correct_answer) != len(answer):
print("The number of answer is inconsistent with correct answer.")
return None
for i in range(len(answer)):
temp = []
if answer[i] != correct_answer[i]:
temp.append(i)
temp.append(answer[i])
temp.append(correct_answer[i])
# temp += (answer[i] + correct_answer[i])
result.append(temp)
return result
def grading(image1, answer_file_name):
name = "upload/" + image1
image = cv2.imread(name)
f = open("upload/"+answer_file_name)
correct_answer = f.read()
# ratio = 1000.0 / image.shape[1]
# # new dimension for image
# dim = (1000, int(image.shape[0] * ratio))
# # perform the actual resizing of the image and show it
# # interpolation = cv2.INTER_AREA this is the algorithm we used. Do worry now
# image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
ratio = image.shape[0] / 500.0
orig = image.copy()
# convert image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# blur the image slightly to remove noise.
gray = cv2.bilateralFilter(gray, 11, 17, 17)
# gray = cv2.GaussianBlur(gray, (5, 5), 0) is an alternative way to blur the image
# canny edge detection
edged = cv2.Canny(gray, 30, 200)
# two threshold method.
# The first one is normal threshold method
# The second one is use Gaussian method which has better effect.
# ret,thresh1 = cv2.threshold(gray,150,150,cv2.THRESH_BINARY)
thresh1 = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# cv2.imshow("gray image", thresh1)
# cv2.imwrite('thresh1.png',thresh1)
# cv2.waitKey(15000)
# find contours in the edged image, keep only the largest ones, and initialize
# our screen contour
# findContours takes three parameter:
# First parameter: the image we want to find counter. Need to copy since this method will
# destroy the image.
# Second parameter: cv2.RETR_TREE tells OpenCV to compute the hierarchy (relationship)
# between contours
# Third parameter: compress the contours to save space using cv2.CV_CHAIN_APPROX_SIMPLE
try:
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
except:
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# the number of returned parameter is different depending on the version of openCV
# for 2.x it is (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# for 3.x it is (_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# sort the counter. The reference is the countourArea. And we only get largest 10
# countour.
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:1000]
# cnts = sorted(cnts, key = cv2.contourArea,reverse=True)[:500]
# a new list to store all the rectangle counter
cnts_rect = []
# initialize the screenCnt.
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
# This function gives the number of vertices of the figure
# For example, approx returns 4 if the shape is rectangle and 5 if the shape is pentagon
# k is constant, it can be changing from 0.005 to 0.1
# k = 0.005
k = 0.1
approx = cv2.approxPolyDP(c, k * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4 and cv2.contourArea(c) > 15000:
screenCnt = approx
cnts_rect.append(approx)
# print "this is coutour area ", cv2.contourArea(c)
# the print is for test
# print screenCnt[0][0]
# to draw the contours in the original image.
# print len(cnts_rect)
cv2.drawContours(image, cnts_rect, -1, (0, 255, 0), 3)
# to find height and length of the rectangle
height = cnts_rect[0][2][0][1] - cnts_rect[0][0][0][1]
length = cnts_rect[0][2][0][0] - cnts_rect[0][0][0][0]
# x_axis is a list, store all the x_axis data of one contour
# y_axis is a list, store all the y_axis data of same contour
# cnts[0] is a list of point, which is one rectangle
centre_list = find_centre(cnts_rect)
# print len(centre_list)
# print "this length of centre_list is ", len(centre_list)
centre_list_col = sorted(centre_list, key=lambda point: point[1])
# answer_list is a list. It contains x elements, x is rows of the answer sheet. It is also list
# every row_list contains also list which are centre points of rectangle.
answer_list = find_missing_rectangle(centre_list, centre_list_col, length // 2, height // 2)
# ============================================================
# for test print point in centre list
# ============================================================
# print len(answer_list)
# for list1 in answer_list:
# print("the length of list1 is ", len(list1))
# for element in list1:
# print element
# print len(answer_list)
# ============================================================
# end test
# ============================================================
number_of_choice = 4
answer = find_answer2(answer_list,number_of_choice,thresh1,pixel=40,number_of_question=40)
answer = change_num_into_choice(answer,number_of_choice)
# print "length is " ,len(answer)
# print answer
result = grade_answer(correct_answer,answer)
print(result)
return result
# i = 0
# print len(centre_list_col)
# for i in range(150):
# print centre_list_col[i]
centre_list = sorted(centre_list, key=lambda point: point[0])
# print "The number of centre point " , len(centre_list)
# # for test.
# i = 0
# print len(centre_list)
# for i in range(138):
# print centre_list[i]
# cv2.circle(image,centre_list[i],20,(0,0,0))
# row_list = process_centre_list(centre_list)
# find_answer(centre_list, thresh1)
# cv2.imshow("Game Boy Screen", image)
# cv2.imshow("gray image", thresh1)
cv2.imwrite('contours.png', image)
cv2.imwrite('thresh1.png',thresh1)
# cv2.waitKey(15000)
# apply the four point transform to obtain a top-down
# view of the original image
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)
warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
ret, thresh1 = cv2.threshold(warped, 80, 85, cv2.THRESH_BINARY)
# cv2.imshow("Binary",thresh1 )
warped = warped.astype("uint8") * 255
# cv2.waitKey(10000)
cv2.imwrite('messigray.png', image)
if __name__ == '__main__':
pass
# image_file = raw_input("Please input the name of student answer sheet(image file) -> ")
# answer_file = raw_input("Please input the name of answer(text file) -> ")
#
# answer = grading(image, answer_file)
# print("This is the output of the main function ", answer)
#
#
#
|
Hubert51/AutoGrading
|
Web/main.py
|
Python
|
mit
| 20,714
|
[
"Gaussian"
] |
d0d1f1f843797365832fceaf383542824b229bf69071ef836010936467390538
|
import logging
from pytopol.parsers import blocks
lgr = logging.getLogger('mainapp.utils')
def build_res_chain(m):
# using a molecule object with atoms, builds residues and chains
R = None
residues = []
for i, a in enumerate(m.atoms):
if R is None or (a.resname != R.name or
a.resnumb != R.number or
a.chain != R.chain_name):
R = blocks.Residue()
R.name = a.resname
R.number = a.resnumb
R.chain_name = a.chain
residues.append(R)
R.atoms.append(a)
a.residue = R
m.residues = residues
# chains
C = None # current chain object
chains = []
for i, r in enumerate(m.residues):
if C is None or (r.chain_name != C.name):
C = blocks.Chain()
C.name = r.chain_name
chains.append(C)
C.residues.append(r)
r.chain = C
m.chains = chains
def build_pairs(m, format):
assert format in ('charmm', 'gromacs', None)
# using a molecule with bonds, angles and dihedrals, build pairs
# print('building pairs with %d bonds, %d angles and %d dihedrals' % (
# len(m.bonds), len(m.angles), len(m.dihedrals)))
_bonds = []
for bond in m.bonds:
_bonds.append((bond.atom1.number, bond.atom2.number))
_angles = []
for ang in m.angles:
_angles.append((ang.atom1.number, ang.atom3.number))
_bonds = set(_bonds)
_angles = set(_angles)
_pairs = set([])
for dih in m.dihedrals:
p1 = dih.atom1.number
p4 = dih.atom4.number
if (p1,p4) in _bonds or (p1,p4) in _angles or \
(p4,p1) in _bonds or (p4,p1) in _angles:
continue
if (p1,p4) in _pairs or (p4,p1) in _pairs:
continue
_pairs.add((p1,p4))
thispair = blocks.InteractionType(format)
thispair.atom1 = dih.atom1
thispair.atom2 = dih.atom4
m.pairs.append(thispair)
|
resal81/PyTopol
|
pytopol/parsers/utils.py
|
Python
|
gpl-3.0
| 2,017
|
[
"CHARMM",
"Gromacs"
] |
b2e2ad5b4bb9af08417c77aec69321555270998404fa0fe811e80256d533a1d7
|
"""
==============
PopVis Example
==============
"""
# Authors: Mayank Agrawal <mayankagrawal96@gmail.com>
#
# License: MIT
########################################################
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from spykes.plot.neurovis import NeuroVis
from spykes.plot.popvis import PopVis
from spykes.io.datasets import load_reward_data
import random
########################################################
# 0 Initialization
# -----------------------------
#
# 0.1 Download Data
# ~~~~~~~~~~~~~
#
# Download all files [`here
# <https://figshare.com/articles/Ramkumar_et_al_2016_Premotor_and_motor_cortices_encode_reward/3573447>`__]
# However, we'll only be looking at Mihili_08062013.mat (Monkey M, Session 4)
#
# 0.2 Read In Data
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
_, mat = load_reward_data()
########################################################
#
# 0.3 Initialize Variables
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
event = 'rewardTime'
condition = 'rewardBool'
window = [-500, 1500]
binsize = 10
########################################################
# 1 PopVis
# -----------------------------
#
# 1.1 Initiate all Neurons
# ~~~~~~~~~~~~~
def get_spike_time(raw_data, neuron_number):
spike_times = raw_data['alldays'][0][
'PMd_units'][0][:][neuron_number - 1][0][1:]
spike_times = [i[0] for i in spike_times]
return spike_times
########################################################
def initiate_neurons(raw_data):
neuron_list = list()
for i in range((raw_data['alldays'][0]['PMd_units'][0][:]).shape[0]):
spike_times = get_spike_time(raw_data, i + 1)
# instantiate neuron
neuron = NeuroVis(spike_times, name='PMd %d' % (i + 1))
neuron_list.append(neuron)
return neuron_list
########################################################
neuron_list = initiate_neurons(mat)
########################################################
#
# 1.2 Get Event Times
# ~~~~~~~~~~~~~
def create_data_frame(raw_data):
data_df = pd.DataFrame()
uncertainty_conditions = list()
center_target_times = list()
reward_times = list()
reward_outcomes = list()
for i in range(raw_data['alldays'].shape[0]):
meta_data = raw_data['alldays'][i]['tt'][0]
uncertainty_conditions.append(meta_data[:, 2])
center_target_times.append(meta_data[:, 3])
reward_times.append(meta_data[:, 6])
reward_outcomes.append(meta_data[:, 7])
data_df['uncertaintyCondition'] = np.concatenate(uncertainty_conditions)
data_df['centerTargetTime'] = np.concatenate(center_target_times)
data_df['rewardTime'] = np.concatenate(reward_times)
data_df['rewardOutcome'] = np.concatenate(reward_outcomes)
data_df['rewardBool'] = data_df['rewardOutcome'].map(lambda s: s == 32)
# find time in between previous reward onset and start of current trial
# shouldn't be more than 1500ms
start_times = data_df['centerTargetTime']
last_reward_times = np.roll(data_df['rewardTime'], 1)
diffs = start_times - last_reward_times
diffs[0] = 0
data_df['consecutiveBool'] = diffs.map(lambda s: s <= 1.5)
return data_df[((data_df['uncertaintyCondition'] == 5.0) |
(data_df['uncertaintyCondition'] == 50.0)) &
data_df['consecutiveBool']]
########################################################
data_df = create_data_frame(mat)
print(len(data_df))
data_df.head()
########################################################
#
# 1.3 Create PopVis Object
# ~~~~~~~~~~~~~
neuron_list = initiate_neurons(mat)[:10] # let's just look at first 10 neurons
pop = PopVis(neuron_list)
########################################################
#
# 1.3.1 Plot Heat Map
# ^^^^^^^^^^^^^^^^^^^
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(hspace=.3)
all_psth = pop.get_all_psth(
event=event, df=data_df, conditions=condition, window=window,
binsize=binsize, plot=True)
########################################################
#
# 1.3.2 Plot Heat Map. Sort by Peak Latency
# ^^^^^^^^^^^^^^^^^^^
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(hspace=.3)
pop.plot_heat_map(all_psth, sortby='latency')
########################################################
#
# 1.3.3 Plot Heat Map. Sort by Avg Firing Rate in Ascending Order.
# ^^^^^^^^^^^^^^^^^^^
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(hspace=.3)
pop.plot_heat_map(all_psth, sortby='rate', sortorder='ascend')
########################################################
#
# 1.3.4 Plot Heat Map. Normalize Each Neuron Individually.
# ^^^^^^^^^^^^^^^^^^^
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(hspace=.3)
pop.plot_heat_map(all_psth, normalize='each')
########################################################
#
# 1.3.5 Plot Heat Map. Normalize All Neurons and Sort in Specified Order.
# ^^^^^^^^^^^^^^^^^^^
random_list = range(10)
random.shuffle(random_list)
print(random_list)
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(hspace=.3)
pop.plot_heat_map(all_psth, normalize='all', sortby=random_list)
########################################################
#
# 1.3.5. Plot Population PSTH
# ^^^^^^^^^^^^^^^^^^^
plt.figure(figsize=(10, 5))
pop.plot_population_psth(all_psth=all_psth)
|
codekansas/spykes
|
examples/plot_popvis_example.py
|
Python
|
mit
| 5,338
|
[
"NEURON"
] |
9e195ede43901b5a3d7b00c5b5fe60eaea82984ee7bc5f5696dc5f03aebfb37f
|
""" Code used for running the package directly from the Command Line """
import os.path
import logging
import argparse
import sys
from .db import AvailabilityDB
from .core import AvailabilityInfo
from .reports import ALL_REPORTS
from .settings import get_settings
def run():
# -------------------------- Logging Settings ---------------------------
logger = logging.getLogger(__name__)
main_logger = logging.getLogger('dn_availability')
_handler = logging.StreamHandler()
_formatter = logging.Formatter('%(levelname)s(%(name)s): %(message)s')
_handler.setFormatter(_formatter)
main_logger.addHandler(_handler)
# -------------------------- Argument Parser -----------------------------
parser = argparse.ArgumentParser(
description='A utility for managing available numbers for a Cisco UCM system',
epilog='For more information, visit the project page at: https://github.com/supernathan23/dn_availability')
subparsers = parser.add_subparsers(title='Actions', dest='action',
metavar='<action>',
description='You can enter "<action> -h" for details '
'on that action',
help='Available actions: add_report backup example_settings export '
'gen_report import init list restore')
# global args
parser.add_argument('-f', dest='settings_file',
help='Settings File. See the example_settings.cfg file for details')
parser.add_argument('-c', '--confirm', action='store_true',
dest='confirm',
help='Prompt for comfirmation before doing anything. '
'Default is to only prompt when deleting data')
parser.add_argument('-q', '--quiet', action='store_true',
help='Do not prompt for confirmations')
parser.add_argument('-v', '--verbose', action='count',
help='Display log messages. (Will override -q | --quiet)')
# Example Settings
parser_settings = subparsers.add_parser('example_settings')
parser_settings.add_argument('-o', '--output_file',
help='Output filename (will be overwritten if it exists!!)')
# list subcommand
parser_list = subparsers.add_parser('list')
parser_list.add_argument('-t', '--table',
help='Table to list data from, if not provided will display a list of '
'tables')
# init subcommand
parser_init = subparsers.add_parser('init_db')
parser_init.add_argument('-D', '--drop', action='store_true', default=False,
help='Drops existing tables, erasing existing data, before initializing')
# import subcommand
parser_import = subparsers.add_parser('import')
parser_import.add_argument('table',
help='Table to store the imported data. (use the list command to get a '
'list of the available tables)')
parser_import.add_argument('filename',
help='CSV filename to import')
# export subcommand
parser_export = subparsers.add_parser('export')
parser_export.add_argument('table',
help='Table to export. (use the list command to get a list of the '
'available tables)')
parser_export.add_argument('filename',
help='Destination filename (will be overwritten if it exists!!)')
# backup subcommand
parser_backup = subparsers.add_parser('backup')
parser_backup.add_argument('filename',
help='Destination filename (will be overwritten if it exists!!)')
# restore subcommand
parser_restore = subparsers.add_parser('restore')
parser_restore.add_argument('filename',
help='Source filename')
parser_restore.add_argument('-D', '--drop', action='store_true', default=False,
help='Drops existing tables, erasing existing data, before restoring backup')
# add_report subcommand
parser_add_report = subparsers.add_parser('add_report')
group_add_report = parser_add_report.add_mutually_exclusive_group(required=True)
group_add_report.add_argument('-t', '--timestamp',
help='Timestamp of when the report was generated.')
group_add_report.add_argument('-a', '--auto_timestamp',
action='store_true', default=False,
help='Obtain the timestamp from the file\'s creation date. Will prompt to '
'confirm that the timestamp is correct.')
parser_add_report.add_argument('-c', '--confirm_timestamp',
action='store_true', default=False,
help='Prompts to confirm the timestamp is correct. Timestamp is shown '
'in the systems standard format to make things easier. (Enabled by '
'default when -a (--auto_timestamp) is used')
parser_add_report.add_argument('system_id',
help='Phone System ID (can be obtained by using "list -t PhoneSystem" '
'subcommand')
parser_add_report.add_argument('filename',
help='Device report filename to be added to the system')
# gen_report subcommand
parser_gen_report = subparsers.add_parser('gen_report')
parser_gen_report.add_argument('report_name',
choices=ALL_REPORTS.keys(), metavar='report_name',
help='Name of the report. Available Reports: {}'.format(
', '.join(ALL_REPORTS.keys()))
)
parser_gen_report.add_argument('-s', '--system_id', action='append',
help='System ID (use the list -t PhoneSystem" subcommand for a list of'
' systems)')
parser_gen_report.add_argument('-g', '--number_group', action='append',
help='Number Group ID (use the "list -t NumberGroup" subcommand for a '
'list of number groups')
parser_gen_report.add_argument('-o', '--output_filename',
help='Destination filename (will be overwritten if it exists!!)')
# ---------------------------Setup----------------------------------------
args = parser.parse_args()
if args.verbose:
log_level = logging.INFO
if args.verbose > 1:
log_level = logging.DEBUG
elif args.quiet:
log_level = logging.ERROR
else:
log_level = logging.WARNING
main_logger.setLevel(log_level)
logger.info('Log verbosity set to %s', log_level)
app_settings = get_settings(args.settings_file)
db = AvailabilityDB(app_settings['DEFAULT']['db_url'])
info = AvailabilityInfo(db)
# -------------------------- Actions -------------------------------------
if args.action == 'list':
if not args.table:
logger.info('Listing tables')
print('Active Tables:')
print('\n'.join(info.db.metadata.tables.keys()))
sys.exit()
logger.info('Listing records for table %s', args.table)
conn = info.db.connect()
table = info.db.get_table(args.table)
results = conn.execute(table.select())
for row in results:
print(row)
if args.action == 'example_settings':
from pkg_resources import resource_string
settings_data = resource_string('dn_availability', 'example_settings.cfg').decode()
if args.output_file:
if args.confirm:
print('About to export an example settings file to "{}". (If file '
'exists it will be overwritten)'.format(args.output_file))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Exporting example settings file to "%s"', args.output_file)
with open(args.output_file, 'w') as f:
f.write(settings_data)
else:
print(settings_data)
elif args.action == 'init_db':
if args.drop:
if not args.quiet:
print('You are about to re-initialize the DB, '
'ALL EXISTING DATA WILL BE ERASED!!!')
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
info.db.teardown_db()
logger.info('DB torn down')
info.db.setup_db()
logger.info('DB initialized')
elif args.action == 'import':
if args.confirm:
print('About to import data from "{}" into table "{}"'.format(
args.filename, args.table))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Importing data from "%s" into table %s', args.table,
args.filename)
info.db.csv_import(args.table, args.filename)
logger.info('Import complete')
elif args.action == 'export':
if args.confirm:
print('About to export data from table {} to "{}". (If file exists it '
'will be overwritten)'.format(args.table, args.filename))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Exporting data from table %s to file "%s"', args.table,
args.filename)
info.db.csv_export(args.table, args.filename)
logger.info('Export complete')
elif args.action == 'backup':
if args.confirm:
print('About to backup data to file "{}". (If file exists it will be '
'overwritten)'.format(args.filename))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Backing up date to file %s', args.filename)
info.db.backup_data(args.filename)
logger.info('Backup complete')
elif args.action == 'restore':
if args.confirm or args.drop:
print('About to restore data from backup "{}".format(args.filename)')
if args.drop:
print('Existing data will be ERASED!')
else:
print('Existing data will be maintained, but may interfere with '
'the backup data')
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
logger.info('Restoring data from file %s')
if args.drop:
logger.info('Existing data will be ERASED')
info.db.restore_data(args.filename, args.drop)
logger.info('Restore complete.')
elif args.action == 'add_report':
timestamp = args.timestamp
if args.auto_timestamp:
timestamp = os.path.getctime(args.filename)
logger.debug('Received timestamp %s from file', timestamp)
print('About to add device report "{}" for system ID {} and timestamp '
'{}'.format(args.filename, args.system_id, timestamp))
if (args.auto_timestamp and not args.quiet) or args.confirm:
prompt = 'Is timestamp "{}" correct?'.format(time.ctime(timestamp))
if not confirmation(prompt):
logger.info('Next time, use the -t (--timestamp) option to manually '
'specify the timestamp')
logger.info('Operation cancelled')
sys.exit()
logger.info('Adding device report "%s" for system ID %s and timestamp '
'%s', args.filename, args.system_id, timestamp)
info.add_device_report(args.filename, args.system_id, timestamp)
logger.info('Report added.')
elif args.action == 'gen_report':
ReportDef = ALL_REPORTS[args.report_name]
if args.output_filename:
if args.confirm:
print('About to generate report {} to file "{}". (If file '
'exists it will be overwritten!)'.format(
args.report_name, args.output_filename))
if not confirmation():
logger.info('Operation cancelled')
sys.exit()
f = open(args.output_filename, 'w')
else:
f = sys.stdout
ReportDef(
avail_info_obj=info,
output_file=f,
args=args)
def confirmation(question='Are you sure?', default=None):
""" Prompts the user for a yes/no confirmation.
question is the text provided to the user.
default value is used when the user just presses enter.
Ff the value is None, will continue to prompt the user
for an acceptable answer.
(based on http://code.activestate.com/recipes/577058/)
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = ' [y/n] '
elif default:
prompt = ' [Y/n] '
else:
prompt = ' [y/N] '
while True:
response = input(question + prompt).lower()
if not response and default is not None:
return default
try:
return valid[response]
except KeyError:
print('YES OR NO!?')
if __name__ == '__main__':
run()
|
supernathan23/dn_availability
|
dn_availability/cli.py
|
Python
|
mit
| 13,300
|
[
"VisIt"
] |
96494def86ae796a7b7d2b4709d12c470f738bcefec968ac9c06bf18e3cb6499
|
"""
The Grid module contains several utilities for grid operations
"""
import os
import re
from DIRAC.Core.Utilities.Os import sourceEnv
from DIRAC.FrameworkSystem.Client.ProxyManagerClient import gProxyManager
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.ConfigurationSystem.Client.Helpers import Local
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
from DIRAC.Core.Utilities.Subprocess import systemCall, shellCall
import DIRAC.Core.Utilities.Glue2 as Glue2
__RCSID__ = "$Id$"
def executeGridCommand( proxy, cmd, gridEnvScript = None ):
"""
Execute cmd tuple after sourcing GridEnv
"""
currentEnv = dict( os.environ )
if not gridEnvScript:
# if not passed as argument, use default from CS Helpers
gridEnvScript = Local.gridEnv()
if gridEnvScript:
command = gridEnvScript.split()
ret = sourceEnv( 10, command )
if not ret['OK']:
return S_ERROR( 'Failed sourcing GridEnv: %s' % ret['Message'] )
gridEnv = ret['outputEnv']
#
# Preserve some current settings if they are there
#
if currentEnv.has_key( 'X509_VOMS_DIR' ):
gridEnv['X509_VOMS_DIR'] = currentEnv['X509_VOMS_DIR']
if currentEnv.has_key( 'X509_CERT_DIR' ):
gridEnv['X509_CERT_DIR'] = currentEnv['X509_CERT_DIR']
else:
gridEnv = currentEnv
if not proxy:
res = getProxyInfo()
if not res['OK']:
return res
gridEnv['X509_USER_PROXY' ] = res['Value']['path']
elif isinstance( proxy, basestring ):
if os.path.exists( proxy ):
gridEnv[ 'X509_USER_PROXY' ] = proxy
else:
return S_ERROR( 'Can not treat proxy passed as a string' )
else:
ret = gProxyManager.dumpProxyToFile( proxy )
if not ret['OK']:
return ret
gridEnv[ 'X509_USER_PROXY' ] = ret['Value']
result = systemCall( 120, cmd, env = gridEnv )
return result
def ldapsearchBDII(filt=None, attr=None, host=None, base=None, selectionString="Glue"):
""" Python wrapper for ldapserch at bdii.
:param filt: Filter used to search ldap, default = '', means select all
:param attr: Attributes returned by ldapsearch, default = '*', means return all
:param host: Host used for ldapsearch, default = 'lcg-bdii.cern.ch:2170', can be changed by $LCG_GFAL_INFOSYS
:return: standard DIRAC answer with Value equals to list of ldapsearch responses
Each element of list is dictionary with keys:
'dn': Distinguished name of ldapsearch response
'objectClass': List of classes in response
'attr': Dictionary of attributes
"""
if filt is None:
filt = ''
if attr is None:
attr = ''
if host is None:
host = 'lcg-bdii.cern.ch:2170'
if base is None:
base = 'Mds-Vo-name=local,o=grid'
if isinstance( attr, list ):
attr = ' '.join( attr )
cmd = 'ldapsearch -x -LLL -o ldif-wrap=no -h %s -b %s "%s" %s' % ( host, base, filt, attr )
result = shellCall( 0, cmd )
response = []
if not result['OK']:
return result
status = result['Value'][0]
stdout = result['Value'][1]
stderr = result['Value'][2]
if status != 0:
return S_ERROR( stderr )
lines = []
for line in stdout.split( "\n" ):
if line.find( " " ) == 0:
lines[-1] += line.strip()
else:
lines.append( line.strip() )
record = None
for line in lines:
if line.find( 'dn:' ) == 0:
record = {'dn':line.replace( 'dn:', '' ).strip(),
'objectClass':[],
'attr':{'dn':line.replace( 'dn:', '' ).strip()}}
response.append( record )
continue
if record:
if line.find( 'objectClass:' ) == 0:
record['objectClass'].append( line.replace( 'objectClass:', '' ).strip() )
continue
if line.find(selectionString) == 0:
index = line.find( ':' )
if index > 0:
attr = line[:index]
value = line[index + 1:].strip()
if record['attr'].has_key( attr ):
if isinstance( record['attr'][attr], list ):
record['attr'][attr].append( value )
else:
record['attr'][attr] = [record['attr'][attr], value]
else:
record['attr'][attr] = value
return S_OK( response )
def ldapSite( site, attr = None, host = None ):
""" Site information from bdii.
:param site: Site as it defined in GOCDB or part of it with globing, for example: \UKI-*
:return: standard DIRAC answer with Value equals to list of sites.
Each site is dictionary which contains attributes of site.
For example result['Value'][0]['GlueSiteLocation']
"""
filt = '(GlueSiteUniqueID=%s)' % site
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
sites = []
for value in result['Value']:
sites.append( value['attr'] )
return S_OK( sites )
def ldapCluster( ce, attr = None, host = None ):
""" CE (really SubCluster in definition of bdii) information from bdii.
It contains by the way host information for ce.
:param ce: ce or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of clusters.
Each cluster is dictionary which contains attributes of ce.
For example result['Value'][0]['GlueHostBenchmarkSI00']
"""
filt = '(GlueClusterUniqueID=%s)' % ce
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
clusters = []
for value in result['Value']:
clusters.append( value['attr'] )
return S_OK( clusters )
def ldapCE( ce, attr = None, host = None ):
""" CE (really SubCluster in definition of bdii) information from bdii.
It contains by the way host information for ce.
:param ce: ce or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of clusters.
Each cluster is dictionary which contains attributes of ce.
For example result['Value'][0]['GlueHostBenchmarkSI00']
"""
filt = '(GlueChunkKey=GlueClusterUniqueID=%s)' % ce
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
ces = []
for value in result['Value']:
ces.append( value['attr'] )
return S_OK( ces )
def ldapCEState( ce, vo, attr = None, host = None ):
""" CEState information from bdii. Only CE with CEAccessControlBaseRule=VO:lhcb are selected.
:param ce: ce or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of ceStates.
Each ceState is dictionary which contains attributes of ce.
For example result['Value'][0]['GlueCEStateStatus']
"""
voFilters = '(GlueCEAccessControlBaseRule=VOMS:/%s/*)' % vo
voFilters += '(GlueCEAccessControlBaseRule=VOMS:/%s)' % vo
voFilters += '(GlueCEAccessControlBaseRule=VO:%s)' % vo
filt = '(&(GlueCEUniqueID=%s*)(|%s))' % ( ce, voFilters )
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
states = []
for value in result['Value']:
states.append( value['attr'] )
return S_OK( states )
def ldapCEVOView( ce, vo, attr = None, host = None ):
""" CEVOView information from bdii. Only CE with CEAccessControlBaseRule=VO:lhcb are selected.
:param ce: ce or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of ceVOViews.
Each ceVOView is dictionary which contains attributes of ce.
For example result['Value'][0]['GlueCEStateRunningJobs']
"""
voFilters = '(GlueCEAccessControlBaseRule=VOMS:/%s/*)' % vo
voFilters += '(GlueCEAccessControlBaseRule=VOMS:/%s)' % vo
voFilters += '(GlueCEAccessControlBaseRule=VO:%s)' % vo
filt = '(&(GlueCEUniqueID=%s*)(|%s))' % ( ce, voFilters )
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
ces = result['Value']
filt = '(&(objectClass=GlueVOView)(|%s))' % ( voFilters )
views = []
for ce in ces:
dn = ce['dn']
result = ldapsearchBDII( filt, attr, host, base = dn )
if result['OK']:
views.append( result['Value'][0]['attr'] ) #pylint: disable=unsubscriptable-object
return S_OK( views )
def ldapSE( site, vo, attr = None, host = None ):
""" SE/SA information from bdii.
:param site: site with globing, for example, "ce0?.tier2.hep.manchester*" or just "*"
:param vo: VO name with globing, "*" if all VOs
:return: standard DIRAC answer with Value equals to list of SE/SA merged items.
Each SE is dictionary which contains attributes of SE and corresponding SA.
For example result['Value'][0]['GlueSESizeFree']
"""
voFilters = '(GlueSAAccessControlBaseRule=VOMS:/%s/*)' % vo
voFilters += '(GlueSAAccessControlBaseRule=VOMS:/%s)' % vo
voFilters += '(GlueSAAccessControlBaseRule=VO:%s)' % vo
filt = '(&(objectClass=GlueSA)(|%s))' % voFilters
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
sas = result['Value']
saDict = {}
seIDFilter = ''
for sa in sas:
chunk = sa['attr'].get('GlueChunkKey','')
if chunk:
seID = sa['attr']['GlueChunkKey'].replace('GlueSEUniqueID=','')
saDict[seID] = sa['attr']
seIDFilter += '(GlueSEUniqueID=%s)' % seID
if vo == "*":
filt = '(&(objectClass=GlueSE)(GlueForeignKey=GlueSiteUniqueID=%s))' % site
else:
filt = '(&(objectClass=GlueSE)(|%s)(GlueForeignKey=GlueSiteUniqueID=%s))' % ( seIDFilter, site )
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
ses = result['Value']
seDict = {}
for se in ses:
seID = se['attr']['GlueSEUniqueID']
seDict[seID] = se['attr']
siteName = se['attr']['GlueForeignKey'].replace('GlueSiteUniqueID=','')
seDict[seID]['GlueSiteUniqueID'] = siteName
if seID in saDict:
seDict[seID].update( saDict[seID] )
seList = seDict.values()
return S_OK( seList )
def ldapSEAccessProtocol( se, attr = None, host = None ):
""" SE access protocol information from bdii
:param se: se or part of it with globing, for example, "ce0?.tier2.hep.manchester*"
:return: standard DIRAC answer with Value equals to list of access protocols.
"""
filt = '(&(objectClass=GlueSEAccessProtocol)(GlueChunkKey=GlueSEUniqueID=%s))' % se
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
protocols = []
for value in result['Value']:
protocols.append( value['attr'] )
return S_OK( protocols )
def ldapService( serviceID = '*', serviceType = '*', vo = '*', attr = None, host = None):
""" Service BDII info for a given VO
:param service: service type, e.g. SRM
:return: standard DIRAC answer with Value equals to list of services
"""
voFilters = '(GlueServiceAccessControlBaseRule=VOMS:/%s/*)' % vo
voFilters += '(GlueServiceAccessControlBaseRule=VOMS:/%s)' % vo
voFilters += '(GlueServiceAccessControlBaseRule=VO:%s)' % vo
filt = '(&(GlueServiceType=%s)(GlueServiceUniqueID=%s)(|%s))' % ( serviceType, serviceID, voFilters )
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
services = []
for value in result['Value']:
services.append( value['attr'] )
return S_OK( services )
def ldapSEVOInfo( vo, seID, attr = ["GlueVOInfoPath","GlueVOInfoAccessControlBaseRule"], host = None ):
""" VOInfo for a given SE
"""
filt = '(GlueChunkKey=GlueSEUniqueID=%s)' % seID
filt += '(GlueVOInfoAccessControlBaseRule=VO:%s*)' % vo
filt += '(objectClass=GlueVOInfo)'
filt = '(&%s)' % filt
result = ldapsearchBDII( filt, attr, host )
if not result['OK']:
return result
voInfo = []
for value in result['Value']:
voInfo.append( value['attr'] )
return S_OK( voInfo )
def getBdiiCEInfo(vo, host=None, glue2=False):
""" Get information for all the CEs/queues for a given VO
:param str vo: BDII VO name
:param str host: url to query for information
:param bool glue2: if True query the GLUE2 information schema
:return: result structure: result['Value'][siteID]['CEs'][ceID]['Queues'][queueName]. For
each siteID, ceID, queueName all the BDII/Glue parameters are retrieved
"""
if glue2:
return Glue2.getGlue2CEInfo(vo, host=host)
result = ldapCEState( '', vo, host = host )
if not result['OK']:
return result
siteDict = {}
ceDict = {}
queueDict = {}
for queue in result['Value']:
queue = dict(queue)
clusterID = queue.get('GlueForeignKey','').replace('GlueClusterUniqueID=','')
ceID = queue.get('GlueCEUniqueID','').split(':')[0]
queueDict[queue['GlueCEUniqueID']] = queue
queueDict[queue['GlueCEUniqueID']]['CE'] = ceID
if not ceID in ceDict:
result = ldapCluster( clusterID, host = host )
if not result['OK']:
continue
if not result['Value']:
continue
ce = result['Value'][0]
ceDict[ceID] = ce
fKey = ce['GlueForeignKey'] #pylint: disable=unsubscriptable-object
siteID = ''
for key in fKey:
if key.startswith('GlueSiteUniqueID'):
siteID = key.replace('GlueSiteUniqueID=','')
ceDict[ceID]['Site'] = siteID
result = ldapCE( clusterID, host = host )
ce = {}
if result['OK'] and result['Value']:
ce = result['Value'][0]
ceDict[ceID].update( ce )
if not siteID in siteDict:
site = {}
result = ldapSite( siteID, host = host )
if result['OK'] and result['Value']:
site = result['Value'][0]
siteDict[siteID] = site
for ceID in ceDict:
siteID = ceDict[ceID]['Site']
if siteID in siteDict:
siteDict[siteID].setdefault('CEs',{})
siteDict[siteID]['CEs'][ceID] = ceDict[ceID]
for queueID in queueDict:
ceID = queueDict[queueID]['CE']
siteID = ceDict[ceID]['Site']
siteDict[siteID]['CEs'][ceID].setdefault('Queues',{})
queueName = re.split( r':\d+/', queueDict[queueID]['GlueCEUniqueID'] )[1]
siteDict[siteID]['CEs'][ceID]['Queues'][queueName] = queueDict[queueID]
return S_OK( siteDict )
def getBdiiSEInfo( vo, host = None ):
""" Get information for all the SEs for a given VO
:param vo: BDII VO name
:return result structure: result['Value'][siteID]['SEs'][seID]. For
each siteID, seIDall the BDII/Glue SE/SA parameters are retrieved
"""
result = ldapSE( '*', vo, host = host )
if not result['OK']:
return result
ses = result['Value']
pathDict = {}
result = ldapSEVOInfo( vo, '*' )
if result['OK']:
for entry in result['Value']:
voPath = entry['GlueVOInfoPath']
seID = ''
for en in entry['dn'].split(','):
if en.startswith( 'GlueSEUniqueID=' ):
seID = en.replace( 'GlueSEUniqueID=', '' )
break
if seID:
pathDict[seID] = voPath
siteDict = {}
for se in ses:
siteName = se['GlueSiteUniqueID']
siteDict.setdefault( siteName, { "SEs": {} } )
seID = se['GlueSEUniqueID']
siteDict[siteName]["SEs"][seID] = se
result = ldapSEAccessProtocol( seID, host = host )
siteDict[siteName]["SEs"][seID]['AccessProtocols'] = {}
if result['OK']:
for entry in result['Value']:
apType = entry['GlueSEAccessProtocolType']
if apType in siteDict[siteName]["SEs"][seID]['AccessProtocols']:
count = 0
for p in siteDict[siteName]["SEs"][seID]['AccessProtocols']:
if p.startswith( apType+'.' ):
count += 1
apType = '%s.%d' % ( apType, count + 1 )
siteDict[siteName]["SEs"][seID]['AccessProtocols'][apType] = entry
else:
siteDict[siteName]["SEs"][seID]['AccessProtocols'][apType] = entry
else:
continue
if seID in pathDict:
siteDict[siteName]["SEs"][seID]['VOPath'] = pathDict[seID]
return S_OK( siteDict )
|
andresailer/DIRAC
|
Core/Utilities/Grid.py
|
Python
|
gpl-3.0
| 15,874
|
[
"DIRAC"
] |
f3cac8a3db307ff3e51bf893255d839dc0dc8be0d081a3ff031d674a50a5cc0a
|
# -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N, N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert_equal(arg, 'list of str')
assert_equal(arg_type, '')
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
|
AdaptiveApplications/carnegie
|
tarc_bus_locator_client/numpy-1.8.1/doc/sphinxext/numpydoc/tests/test_docscrape.py
|
Python
|
mit
| 18,326
|
[
"Gaussian"
] |
472280f74ebd9cca80d80caa85764c70465a63e5b9a18c07112c276461c05116
|
"""
Author: Remi Lafage <remi.lafage@onera.fr>
This package is distributed under New BSD license.
Mixture of Experts
"""
# TODO : support for best number of clusters
# TODO : implement verbosity 'print_global'
# TODO : documentation
import numpy as np
import warnings
OLD_SKLEARN = False
try: # scikit-learn < 0.20.0
from sklearn.mixture import GMM as GaussianMixture
OLD_SKLEARN = True
except:
from sklearn.mixture import GaussianMixture
from scipy.stats import multivariate_normal
from smt.utils.options_dictionary import OptionsDictionary
from smt.applications.application import SurrogateBasedApplication
from smt.utils.misc import compute_rms_error
from smt.surrogate_models.surrogate_model import SurrogateModel
warnings.filterwarnings("ignore", category=DeprecationWarning)
MOE_EXPERT_NAMES = [
"KRG",
"KPLS",
"KPLSK",
"LS",
"QP",
"RBF",
"IDW",
"RMTB",
"RMTC",
]
class MOESurrogateModel(SurrogateModel):
"""Wrapper class exposing MOE features as a SurrogateModel subclass."""
name = "MOE"
def _initialize(self):
super(MOESurrogateModel, self)._initialize()
# Copy over options from MOE object
self.moe = moe = MOE()
for key, data in moe.options._declared_entries.items():
self.options._declared_entries[key] = data
value = moe.options[key]
if value is not None:
self.options[key] = value
def _setup(self):
for key in self.moe.options._declared_entries:
if key in self.options:
self.moe.options[key] = self.options[key]
# self.supports['derivatives'] = self.options['derivatives_support'] # Interface not yet implemented
self.supports["variances"] = self.options["variances_support"]
def train(self):
if len(self.training_points) == 0:
xt = self.options["xt"]
yt = self.options["yt"]
self.set_training_values(xt, yt)
super(MOESurrogateModel, self).train()
def _train(self):
self._setup()
for name in self.training_points:
xt, yt = self.training_points[name][0]
self.moe.set_training_values(xt, yt, name=name)
self.moe.train()
def _predict_values(self, x: np.ndarray) -> np.ndarray:
return self.moe.predict_values(x)
def _predict_variances(self, x: np.ndarray) -> np.ndarray:
return self.moe.predict_variances(x)
class MOE(SurrogateBasedApplication):
# Names of experts available to be part of the mixture
AVAILABLE_EXPERTS = [
name
for name in MOE_EXPERT_NAMES
if name in SurrogateBasedApplication._surrogate_type
]
def _initialize(self):
super(MOE, self)._initialize()
declare = self.options.declare
declare("xt", None, types=np.ndarray, desc="Training inputs")
declare("yt", None, types=np.ndarray, desc="Training outputs")
declare(
"ct",
None,
types=np.ndarray,
desc="Training derivative outputs used for clustering",
)
declare("xtest", None, types=np.ndarray, desc="Test inputs")
declare("ytest", None, types=np.ndarray, desc="Test outputs")
declare("n_clusters", 2, types=int, desc="Number of clusters")
declare(
"smooth_recombination",
True,
types=bool,
desc="Continuous cluster transition",
)
declare(
"heaviside_optimization",
False,
types=bool,
desc="Optimize Heaviside scaling factor when smooth recombination is used",
)
declare(
"derivatives_support",
False,
types=bool,
desc="Use only experts that support derivatives prediction",
)
declare(
"variances_support",
False,
types=bool,
desc="Use only experts that support variance prediction",
)
declare(
"allow",
[],
desc="Names of allowed experts to be possibly part of the mixture. "
"Empty list corresponds to all surrogates allowed.",
)
declare(
"deny",
[],
desc="Names of forbidden experts",
)
self.x = None
self.y = None
self.c = None
self.n_clusters = None
self.smooth_recombination = None
self.heaviside_optimization = None
self.heaviside_factor = 1.0
# dictionary {name: class} of possible experts wrt to options
self._enabled_expert_types = self._get_enabled_expert_types()
# list of experts after MOE training
self._experts = []
self.xt = None
self.yt = None
@property
def enabled_experts(self):
"""
Returns the names of enabled experts after taking into account MOE options
"""
self._enabled_expert_types = self._get_enabled_expert_types()
return list(self._enabled_expert_types.keys())
def set_training_values(self, xt, yt, name=None):
"""
Set training data (values).
Parameters
----------
xt : np.ndarray[nt, nx] or np.ndarray[nt]
The input values for the nt training points.
yt : np.ndarray[nt, ny] or np.ndarray[nt]
The output values for the nt training points.
name : str or None
An optional label for the group of training points being set.
This is only used in special situations (e.g., multi-fidelity applications).
"""
self.xt = xt
self.yt = yt
def train(self):
"""
Supports for surrogate model API.
Build and train the mixture of experts surrogate.
"""
if self.xt is not None and self.yt is not None:
# set_training_values has been called
self.x = x = self.xt
self.y = y = self.yt
else:
self.x = x = self.options["xt"]
self.y = y = self.options["yt"]
self.c = c = self.options["ct"]
if not self.c:
self.c = c = y
self.n_clusters = self.options["n_clusters"]
self.smooth_recombination = self.options["smooth_recombination"]
self.heaviside_optimization = (
self.options["smooth_recombination"]
and self.options["heaviside_optimization"]
)
self.heaviside_factor = 1.0
self._check_inputs()
self._enabled_expert_types = self._get_enabled_expert_types()
self._experts = []
# Set test values and trained values
xtest = self.options["xtest"]
ytest = self.options["ytest"]
values = np.c_[x, y, c]
test_data_present = xtest is not None and ytest is not None
if test_data_present:
self.test_values = np.c_[xtest, ytest]
self.training_values = values
else:
self.test_values, self.training_values = self._extract_part(values, 10)
self.ndim = nx = x.shape[1]
xt = self.training_values[:, 0:nx]
yt = self.training_values[:, nx : nx + 1]
ct = self.training_values[:, nx + 1 :]
# Clustering
self.cluster = GaussianMixture(
n_components=self.n_clusters, covariance_type="full", n_init=20
)
self.cluster.fit(np.c_[xt, ct])
if not self.cluster.converged_:
raise Exception("Clustering not converged")
# Choice of the experts and training
self._fit(xt, yt, ct)
xtest = self.test_values[:, 0:nx]
ytest = self.test_values[:, nx : nx + 1]
# Heaviside factor
if self.heaviside_optimization and self.n_clusters > 1:
self.heaviside_factor = self._find_best_heaviside_factor(xtest, ytest)
print("Best Heaviside factor = {}".format(self.heaviside_factor))
self.distribs = self._create_clusters_distributions(self.heaviside_factor)
if not test_data_present:
# if we have used part of data to validate, fit on overall data
self._fit(x, y, c, new_model=False)
def predict_values(self, x):
"""
Predict the output values at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output values at the prediction points.
"""
if self.smooth_recombination:
y = self._predict_smooth_output(x)
else:
y = self._predict_hard_output(x)
return y
def predict_variances(self, x):
"""
Predict the output variances at a set of points.
Parameters
----------
x : np.ndarray[nt, nx] or np.ndarray[nt]
Input values for the prediction points.
Returns
-------
y : np.ndarray[nt, ny]
Output variances at the prediction points.
"""
if not self.options["variances_support"]:
raise RuntimeError(
"Experts not selected taking variance support into account: use variances_support=True "
"when creating MOE"
)
if self.smooth_recombination:
y = self._predict_smooth_output(x, output_variances=True)
else:
y = self._predict_hard_output(x, output_variances=True)
return y
def _check_inputs(self):
"""
Check the input data given by the client is correct.
raise Value error with relevant message
"""
if self.x is None or self.y is None:
raise ValueError("check x and y values")
if self.x.shape[0] != self.y.shape[0]:
raise ValueError(
"The number of input points %d doesn t match with the number of output points %d."
% (self.x.shape[0], self.y.shape[0])
)
if self.y.shape[0] != self.c.shape[0]:
raise ValueError(
"The number of output points %d doesn t match with the number of criterion weights %d."
% (self.y.shape[0], self.c.shape[0])
)
# choice of number of cluster
max_n_clusters = int(len(self.x) / 10) + 1
if self.n_clusters > max_n_clusters:
print("Number of clusters should be inferior to {0}".format(max_n_clusters))
raise ValueError(
"The number of clusters is too high considering the number of points"
)
def _get_enabled_expert_types(self):
"""
Select relevant surrogate models (experts) regarding MOE feature options
"""
prototypes = {
name: smclass()
for name, smclass in self._surrogate_type.items()
if name in MOE_EXPERT_NAMES
}
if self.options["derivatives_support"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if proto.supports["derivatives"]
}
if self.options["variances_support"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if proto.supports["variances"]
}
if self.options["allow"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if name in self.options["allow"]
}
if self.options["deny"]:
prototypes = {
name: proto
for name, proto in prototypes.items()
if name not in self.options["deny"]
}
if not prototypes:
ValueError(
"List of possible experts is empty: check support, allow and deny options wrt"
)
return {name: self._surrogate_type[name] for name in prototypes}
def _fit(self, x_trained, y_trained, c_trained, new_model=True):
"""
Find the best model for each cluster (clustering already done) and train it if new_model is True
otherwise train the points given (choice of best models by cluster already done)
Arguments
---------
- x_trained: array_like
Input training samples
- y_trained: array_like
Output training samples
- c_trained: array_like
Clustering training samples
- new_model : bool (optional)
Set true to search the best local model
"""
self.distribs = self._create_clusters_distributions(self.heaviside_factor)
cluster_classifier = self.cluster.predict(np.c_[x_trained, c_trained])
# sort trained_values for each cluster
clusters = self._cluster_values(np.c_[x_trained, y_trained], cluster_classifier)
# find model for each cluster
for i in range(self.n_clusters):
if new_model:
model = self._find_best_model(clusters[i])
self._experts.append(model)
else: # retrain the experts with the
trained_values = np.array(clusters[i])
x_trained = trained_values[:, 0 : self.ndim]
y_trained = trained_values[:, self.ndim]
self._experts[i].set_training_values(x_trained, y_trained)
self._experts[i].train()
def _predict_hard_output(self, x, output_variances=False):
"""
This method predicts the output of a x samples for a
discontinuous recombination.
Arguments
---------
- x : array_like
x samples
Return
------
- predicted_values : array_like
predicted output
"""
predicted_values = []
probs = self._proba_cluster(x)
sort_cluster = np.apply_along_axis(np.argmax, 1, probs)
for i in range(len(sort_cluster)):
model = self._experts[sort_cluster[i]]
if output_variances:
predicted_values.append(model.predict_variances(np.atleast_2d(x[i]))[0])
else:
predicted_values.append(model.predict_values(np.atleast_2d(x[i]))[0])
predicted_values = np.array(predicted_values)
return predicted_values
def _predict_smooth_output(self, x, distribs=None, output_variances=False):
"""
This method predicts the output of x with a smooth recombination.
Arguments:
----------
- x: np.ndarray
x samples
- distribs: distribution list (optional)
array of membership distributions (use self ones if None)
Returns
-------
- predicted_values : array_like
predicted output
"""
predicted_values = []
if distribs is None:
distribs = self.distribs
sort_proba = self._proba_cluster(x, distribs)
for i in range(len(sort_proba)):
recombined_value = 0
for j in range(len(self._experts)):
if output_variances:
expert_value = (
self._experts[j].predict_variances(np.atleast_2d(x[i]))[0]
* sort_proba[i][j] ** 2
)
else:
expert_value = (
self._experts[j].predict_values(np.atleast_2d(x[i]))[0]
* sort_proba[i][j]
)
recombined_value += expert_value
predicted_values.append(recombined_value)
predicted_values = np.array(predicted_values)
return predicted_values
@staticmethod
def _extract_part(values, quantile):
"""
Divide the values list in quantile parts to return one part
of (num/quantile) values out of num values.
Arguments
----------
- values : np.ndarray[num, -1]
the values list to extract from
- quantile : int
the quantile
Returns
-------
- extracted, remaining : np.ndarray, np.ndarray
the extracted values part, the remaining values
"""
num = values.shape[0]
indices = np.arange(0, num, quantile) # uniformly distributed
mask = np.zeros(num, dtype=bool)
mask[indices] = True
return values[mask], values[~mask]
def _find_best_model(self, clustered_values):
"""
Find the best model which minimizes the errors.
Arguments :
------------
- clustered_values: array_like
training samples [[X1,X2, ..., Xn, Y], ... ]
Returns :
---------
- model : surrogate model
best trained surrogate model
"""
dim = self.ndim
clustered_values = np.array(clustered_values)
scores = {}
sms = {}
# validation with 10% of the training data
test_values, training_values = self._extract_part(clustered_values, 10)
for name, sm_class in self._enabled_expert_types.items():
kwargs = {}
if name in ["RMTB", "RMTC"]:
# Note: RMTS checks for xlimits,
# we take limits on all x (not just the trained_values ones) as
# the surrogate is finally re-trained on the whole x set.
xlimits = np.zeros((dim, 2))
for i in range(dim):
xlimits[i][0] = np.amin(self.x[:, i])
xlimits[i][1] = np.amax(self.x[:, i])
kwargs = {"xlimits": xlimits}
sm = sm_class(**kwargs)
sm.options["print_global"] = False
sm.set_training_values(training_values[:, 0:dim], training_values[:, dim])
sm.train()
expected = test_values[:, dim]
actual = sm.predict_values(test_values[:, 0:dim]).reshape(-1)
l_two = np.linalg.norm(expected - actual, 2)
# l_two_rel = l_two / np.linalg.norm(expected, 2)
# mse = (l_two**2) / len(expected)
# rmse = mse ** 0.5
scores[sm.name] = l_two
print(sm.name, l_two)
sms[sm.name] = sm
best_name = None
best_score = None
for name, rmse in scores.items():
if best_score is None or rmse < best_score:
best_name, best_score = name, rmse
print("Best expert = {}".format(best_name))
return sms[best_name]
def _find_best_heaviside_factor(self, x, y):
"""
Find the best heaviside factor to smooth approximated values.
Arguments
---------
- x: array_like
input training samples
- y: array_like
output training samples
Returns
-------
hfactor : float
best heaviside factor wrt given samples
"""
heaviside_factor = 1.0
if self.n_clusters > 1:
hfactors = np.linspace(0.1, 2.1, num=21)
errors = []
for hfactor in hfactors:
distribs = self._create_clusters_distributions(hfactor)
ypred = self._predict_smooth_output(x, distribs)
err_rel = np.linalg.norm(y - ypred, 2) / np.linalg.norm(y, 2)
errors.append(err_rel)
if max(errors) < 1e-6:
heaviside_factor = 1.0
else:
min_error_index = errors.index(min(errors))
heaviside_factor = hfactors[min_error_index]
return heaviside_factor
"""
Functions related to clustering
"""
def _create_clusters_distributions(self, heaviside_factor=1.0):
"""
Create an array of frozen multivariate normal distributions (distribs).
Arguments
---------
- heaviside_factor: float
Heaviside factor used to scale covariance matrices
Returns:
--------
- distribs: array_like
Array of frozen multivariate normal distributions
with clusters means and covariances
"""
distribs = []
dim = self.ndim
means = self.cluster.means_
if OLD_SKLEARN:
cov = heaviside_factor * self.cluster.covars_
else:
cov = heaviside_factor * self.cluster.covariances_
for k in range(self.n_clusters):
meansk = means[k][0:dim]
covk = cov[k][0:dim, 0:dim]
mvn = multivariate_normal(meansk, covk, allow_singular=True)
distribs.append(mvn)
return distribs
def _cluster_values(self, values, classifier):
"""
Classify values regarding the given classifier info.
Arguments
---------
- values: array_like
values to cluster
- classifier: array_like
Cluster corresponding to each point of value in the same order
Returns
-------
- clustered: array_like
Samples sort by cluster
Example:
---------
values:
[[ 1.67016597e-01 5.42927264e-01 9.25779645e+00]
[ 5.20618344e-01 9.88223010e-01 1.51596837e+02]
[ 6.09979830e-02 2.66824984e-01 1.17890707e+02]
[ 9.62783472e-01 7.36979149e-01 7.37641826e+01]
[ 3.01194132e-01 8.58084068e-02 4.88696602e+01]
[ 6.40398203e-01 6.91090937e-01 8.91963162e+01]
[ 7.90710374e-01 1.40464471e-01 1.89390766e+01]
[ 4.64498124e-01 3.61009635e-01 1.04779656e+01]]
cluster_classifier:
[1 0 0 2 1 2 1 1]
clustered:
[[array([ 0.52061834, 0.98822301, 151.59683723]),
array([ 6.09979830e-02, 2.66824984e-01, 1.17890707e+02])]
[array([ 0.1670166 , 0.54292726, 9.25779645]),
array([ 0.30119413, 0.08580841, 48.86966023]),
array([ 0.79071037, 0.14046447, 18.93907662]),
array([ 0.46449812, 0.36100964, 10.47796563])]
[array([ 0.96278347, 0.73697915, 73.76418261]),
array([ 0.6403982 , 0.69109094, 89.19631619])]]
"""
num = len(classifier)
assert values.shape[0] == num
clusters = [[] for n in range(self.n_clusters)]
for i in range(num):
clusters[classifier[i]].append(values[i])
return clusters
def _proba_cluster_one_sample(self, x, distribs):
"""
Compute membership probabilities to each cluster for one sample.
Arguments
---------
- x: array_like
a sample for which probabilities must be calculated
- distribs: multivariate_normal objects list
array of normal distributions
Returns
-------
- prob: array_like
x membership probability for each cluster
"""
weights = np.array(self.cluster.weights_)
rvs = np.array([distribs[k].pdf(x) for k in range(len(weights))])
probs = weights * rvs
rad = np.sum(probs)
if rad > 0:
probs = probs / rad
return probs
def _proba_cluster(self, x, distribs=None):
"""
Calculate membership probabilities to each cluster for each sample
Arguments
---------
- x: array_like
samples where probabilities must be calculated
- distribs : multivariate_normal objects list (optional)
array of membership distributions. If None, use self ones.
Returns
-------
- probs: array_like
x membership probabilities to each cluster.
Examples :
----------
x:
[[ 0. 0.]
[ 0. 1.]
[ 1. 0.]
[ 1. 1.]]
prob:
[[ 1.49050563e-02 9.85094944e-01]
[ 9.90381299e-01 9.61870088e-03]
[ 9.99208990e-01 7.91009759e-04]
[ 1.48949963e-03 9.98510500e-01]]
"""
if distribs is None:
distribs = self.distribs
if self.n_clusters == 1:
probs = np.ones((x.shape[0], 1))
else:
probs = np.array(
[self._proba_cluster_one_sample(x[i], distribs) for i in range(len(x))]
)
return probs
|
relf/smt
|
smt/applications/moe.py
|
Python
|
bsd-3-clause
| 24,509
|
[
"MOE"
] |
d76eea468a68aed612afcde83145bcf00622d9b41ada42f6526dda8ce67cda9a
|
#!/usr/bin/python
#
# Copyright (C) 2018-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import requests
import subprocess
if not os.environ['CI_COMMIT_REF_NAME'].startswith('PR-'):
exit(0)
PR = os.environ['CI_COMMIT_REF_NAME'][3:]
URL = 'https://api.github.com/repos/espressomd/espresso/issues/' + \
PR + '/comments?access_token=' + os.environ['GITHUB_TOKEN']
SIZELIMIT = 10000
# Delete all existing comments
comments = requests.get(URL)
for comment in comments.json():
if comment['user']['login'] == 'espresso-ci' and 'style.patch' in comment['body']:
requests.delete(comment['url'] + '?access_token=' +
os.environ['GITHUB_TOKEN'])
# If the working directory is not clean, post a new comment
if subprocess.call(["git", "diff-index", "--quiet", "HEAD", "--"]) != 0:
comment = 'Your pull request does not meet our code formatting rules. '
patch = subprocess.check_output(['git', '--no-pager', 'diff'])
if len(patch) <= SIZELIMIT:
comment += 'Specifically, I suggest you make the following changes:\n'
comment += '```diff\n'
comment += patch.replace('`', r'\`').strip()
comment += '\n```\n'
comment += 'To apply these changes, please do one of the following:\n'
else:
comment += 'To fix this, please do one of the following:\n'
comment += '- You can download a patch with my suggested changes '
comment += '[here](' + os.environ['CI_JOB_URL'] + \
'/artifacts/raw/style.patch), '
comment += 'inspect it and make changes manually.\n'
comment += '- You can directly apply it to your repository by running '
comment += '`curl ' + os.environ['CI_JOB_URL'] + \
'/artifacts/raw/style.patch | git apply -`.\n'
comment += '- You can run `maintainer/CI/fix_style.sh` to automatically fix your coding style. This is the same command that I have executed to generate the patch above, but it requires certain tools to be installed on your computer.\n\n'
comment += 'You can run `gitlab-runner exec docker style` afterwards to check if your changes worked out properly.\n\n'
comment += 'Please note that there are often multiple ways to correctly format code. As I am just a robot, I sometimes fail to identify the most aesthetically pleasing way. So please look over my suggested changes and adapt them where the style does not make sense.'
if len(patch) > 0:
requests.post(URL, json={'body': comment})
|
mkuron/espresso
|
maintainer/gh_post_style_patch.py
|
Python
|
gpl-3.0
| 3,132
|
[
"ESPResSo"
] |
b772a298f2dc7899e33d5911f3c9a126ef1776bbab07a12a8341bc234233f521
|
import discord, asyncio, logging, time, threading, markovify, psutil, posixpath, platform, re, requests, os, time, shutil, glob, textwrap, datetime, json
from pprint import pprint
import random
from random import randint
import PIL
from PIL import ImageFont
from PIL import Image
from PIL import ImageDraw
from pyfiglet import Figlet
logging.basicConfig(level=logging.INFO)
if not os.path.exists("images/"):
os.makedirs("images")
if not os.path.exists("fonts/"):
os.makedirs("fonts")
if not os.path.exists("server/"):
os.makedirs("server")
print("created general folder structure")
settings_ver = 6
client = discord.Client()
p = psutil.Process(os.getpid())
p.create_time()
@client.event
async def on_ready():
print('Discord: Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
await status()
@client.event
async def on_message(message):
print("'" + message.clean_content + "'")
message_to_bot = False
image_in_message = False
settings = ""
old_settings = ""
bot_message = ""
if not message.author.bot:
for mention in message.mentions:
if mention.bot:
print("message sent to bot")
message_to_bot = True
print("message sent by " + message.author.name)
if message.server:
serverid = message.server.id
owner = message.server.owner
if os.path.exists("server/" + message.server.name):
os.rename("server/" + message.server.name, "server/" + serverid)
if not os.path.exists("server/" + serverid):
os.makedirs("server/" + serverid)
os.makedirs("server/" + serverid + "/images")
os.makedirs("server/" + serverid + "/output")
print("created server folder structure for " + serverid)
if os.path.isfile("server/" + serverid + "/settings.json"):
with open("server/" + serverid + "/settings.json", "r") as settings_file:
settings = settings_file.read()
settings = json.loads(settings)
if settings != "":
current_settings_ver = settings["version"]
else:
current_settings_ver = 0
if current_settings_ver != settings_ver:
with open("server/" + serverid + "/settings.json", "w") as settings_file:
settings_file.write('{"version" : '+str(settings_ver)+', "farewell": false, "farewell_text": "**Hope to see you soon again, $member <3**", "greetings": false, "greetings_text" : "**Welcome $mention to __$server__**!", "fakku": true, "sadpanda": true,"animated": "'+message.server.default_role.id+'", "meme_txt": "'+message.server.default_role.id+'", "meme_img": "'+message.server.default_role.id+'", "image": "'+message.server.default_role.id+'", "say": "'+message.server.owner.top_role.id+'", "ascii": "'+message.server.default_role.id+'", "rate": true, "sleep": true, "question": true, "info": "'+message.server.default_role.id+'", "help": "'+message.server.default_role.id+'", "options": "'+message.server.owner.top_role.id+'", "slot_machine": [":pizza:", ":frog:", ":alien:", ":green_apple:", ":heart:"] }')
print("created new settings_file")
if message.server.me.nick:
my_name = message.server.me.nick
else:
my_name = client.user.name
if os.path.isfile("server/" + serverid + "/settings.json"):
if current_settings_ver != settings_ver:
old_settings = settings
if os.path.isfile("server/" + serverid + "/settings.json"):
with open("server/" + serverid + "/settings.json", "r") as settings_file:
settings = settings_file.read()
settings = json.loads(settings)
if old_settings != "":
for key, value in old_settings.items():
if key != "version":
settings[key] = old_settings[key]
with open("server/" + serverid + "/settings.json", "w") as settings_file:
json.dump(settings, settings_file)
print("settings updated")
else:
serverid = "None"
owner = message.author
my_name = client.user.name
settings = json.loads('{"slot_machine": [":pizza:", ":frog:", ":alien:", ":green_apple:", ":heart:"], "sadpanda": true, "fakku": true, "question": true }')
if not os.path.exists("server/" + serverid + "/images/last_image.png"):
file_download(random.choice(requests.get('https://pastebin.com/raw/90WCeZp9').text.split()), "server/" + serverid + "/images/", "last_image.png")
with open("server/" + serverid + "/log.txt", "a") as myfile:
if message.clean_content.endswith(".") or message.clean_content.endswith("!") or message.clean_content.endswith("?") or message.clean_content.endswith("="):
myfile.write(message.clean_content.replace("@", ""))
elif message.clean_content.startswith(("?", "!", "=", "`", "´", "^", ";", "~", "+", "\/", "\\", "]", "}", ")", ":", "<")):
print("message sent to bot")
message_to_bot = True
else:
myfile.write(message.clean_content.replace("@", "") + ". ")
for attachment in message.attachments:
file_download(attachment["proxy_url"], "server/" + serverid + "/", "last_image.png")
image_in_message = True
images = re.findall('(?i)https?:\/\/.*\.(?:png|jpg|jpeg|gif)', message.content)
for image in images:
file_download(image, "server/" + serverid + "/", "last_image.png")
if not message_to_bot:
sadpanda = re.findall('(?i)https?:\/\/(?:ex|g.e-)hentai.org\/g\/(\S{6})\/(\S{10})', message.content)
fakku = re.findall('(?i)https:\/\/(?:www\.)fakku\.net\/(?:hentai|manga)\/\S*', message.content)
if settings["sadpanda"] == True and sadpanda:
await client.send_typing(message.channel)
gidlist = []
manga_info = ""
payload = json.loads('{"method" : "gdata", "gidlist" : [], "namespace": 1 }')
for index, manga in enumerate(sadpanda):
gid = int(manga[0])
gt = manga[1]
payload["gidlist"].append([gid, gt])
url = 'http://g.e-hentai.org/api.php'
header = {'Content-type' : 'application/json'}
print("creating json request for mangas")
ex_response = requests.post(url, data=json.dumps(payload), headers=header)
if ex_response.status_code == 200:
brackets = re.compile(r'(?:\(|\[|\{)[^(?:\)|\]|\})]*(?:\)|\]|\})')
manga_info = ex_response.json()
print(manga_info)
for manga in manga_info["gmetadata"]:
title_eng = re.sub(brackets, '', re.sub(brackets, '', manga["title"])).strip()
title_jpn = re.sub(brackets, '', re.sub(brackets, '', manga["title_jpn"])).strip()
date = datetime.datetime.fromtimestamp(
int(manga["posted"])
).strftime('%Y-%m-%d %H:%M')
artists, male_tags, female_tags, misc_tags, parodies, groups, characters, languages = ([] for i in range(8))
artist, male, female, misc, parody, group, character, language = ("" for i in range(8))
for tag in manga["tags"]:
if "artist:" in tag:
artists.append(tag[7:])
elif "female:" in tag:
female_tags.append(tag[7:])
elif "male:" in tag and not "female:" in tag:
male_tags.append(tag[5:])
elif "parody:" in tag:
parodies.append(tag[7:])
elif "group:" in tag:
groups.append(tag[6:])
elif "character:" in tag:
characters.append(tag[10:])
elif "language:" in tag:
languages.append(tag[9:])
else:
misc_tags.append(tag)
if artists:
artist = "\n **Artist:** " + str(artists)[:-1][1:].replace("'", "")
if groups:
group = "\n **Group:** " + str(groups)[:-1][1:].replace("'", "")
if parodies:
parody = "\n **Parody:** " + str(parodies)[:-1][1:].replace("'", "")
if characters:
character = "\n **Character:** " + str(characters)[:-1][1:].replace("'", "")
if female_tags:
female = "\n **Female:** " + str(female_tags)[:-1][1:].replace("'", "")
if male_tags:
male = "\n **Male:** " + str(male_tags)[:-1][1:].replace("'", "")
if misc_tags:
misc = "\n **Misc:** " + str(misc_tags)[:-1][1:].replace("'", "")
if languages:
language = "\n **Language:** " + str(languages)[:-1][1:].replace("'", "")
rating = ""
for i in range(round(float(manga["rating"])*2)):
rating += ":star:"
if title_jpn != title_eng:
title = "__" + title_eng + "** / **" + title_jpn + "__"
else:
title = "__" + title_eng + "__"
bot_message += ":information_source: " + title + "\n **Category:** " + manga["category"] + language + artist + group + "\n **Posted:** " + date + "\n **Rating:** " + rating + " (" + manga["rating"] + ")\n **Tags:** " + parody + character + female + male + misc + "\n **Thumb:** " + manga["thumb"]
print("posting manga info")
await client.send_message(message.channel, bot_message)
else:
manga_info = None
print(ex_response)
if settings["fakku"] == True and fakku:
await client.send_typing(message.channel)
for manga in fakku:
manga = manga.replace("hentai", "manga")
manga = manga.split('fakku.net', 1)[-1]
manga = "https://api.fakku.net" + manga
data = requests.get(manga).json()
data = data["content"]
date = datetime.datetime.fromtimestamp(
int(data["content_date"])
).strftime('%Y-%m-%d %H:%M')
tags = "["
for tag in data["content_tags"]:
tags += "'" + tag["attribute"] + "', "
tags = tags[:-2] + "]"
if len(data["content_artists"]) > 1:
artist_tag = "\n **Artists:** "
else:
artist_tag = "\n **Artist:** "
for artist in data["content_artists"]:
artist_tag += artist["attribute"] + ", "
artist_tag = artist_tag[:-2]
if len(data["content_series"]) > 1:
parody_tag = "\n **Parodies:** "
else:
parody_tag = "\n **Parody:** "
for serie in data["content_series"]:
parody_tag += serie["attribute"] + ", "
parody_tag = parody_tag[:-2]
if "content_description" in data:
description = "\n **Description:** " + data["content_description"]
else:
description = ""
bot_message += ":information_source: __" + data["content_name"] + "__\n **Category:** " + data["content_category"] + artist_tag + parody_tag + "\n **Posted:** " + date + "\n **Favorites:** " + str(data["content_favorites"]) + " :heart:" + description + "\n **Tags: **" + tags[:-1][1:].replace("'", "")
await client.send_message(message.channel, bot_message)
if client.user.mentioned_in(message) or serverid == "None":
if "roles" in message.content.lower() and message.author.id == (await client.application_info()).owner.id:
if serverid == "None":
await client.send_message(message.channel, "This only works on servers.")
else:
bot_message = "**top role:** " + str(owner.top_role.name) + " **position:** " + str(owner.top_role.position) + "\n**default role:** " + str(message.server.default_role.name) + " **position:** " + str(message.server.default_role.position) + "\n**your roles are:** " + str(user_role_ids(message.author))
bot_message += " \n__role hierachy:__\n"
for roles in message.server.role_hierarchy:
bot_message += "**name:** " + roles.name + " **position: **" + str(roles.position) + " **ID: **" + str(roles.id) + "\n"
await client.send_message(message.channel, bot_message)
elif " raw" in message.content.lower() or "raw " in message.content.lower():
await client.send_message(message.channel, ":page_with_curl: `" + message.content + "`")
elif ( "settings" in message.content.lower() ) and ( message.author.id == owner.id or settings["options"] in user_role_ids(message.author) or discord.utils.get(message.server.roles, id=settings["options"]).position <= message.author.top_role.position ):
await client.send_typing(message.channel)
REMOVE_LIST = [client.user.mention[:2] + '!' + client.user.mention[2:], client.user.mention, "settings"]
if " set " in message.content.lower():
REMOVE_LIST.append("set")
remove = '|'.join(REMOVE_LIST)
regex = re.compile(r'('+remove+')', flags=re.IGNORECASE)
text = regex.sub("", message.content.lower()).strip().split(' ', 1)
if text[0] == "version":
await client.send_message(message.channel, ":x: **Error:** you can't change the version.")
elif text[0] not in settings:
await client.send_message(message.channel, ":x: **Error:** this option doesn't exist.")
elif text[0] == "slot_machine":
settings["slot_machine"] = text[1].split()
else:
if text[1] == "true":
settings[text[0]] = True
elif text[1] == "false":
settings[text[0]] = False
elif text[1] == "@everyone":
settings[text[0]] = message.server.default_role.id
elif text[1].startswith("<"):
settings[text[0]] = text[1][3:21]
else:
settings[text[0]] = text[1]
if serverid == "None":
await client.send_message(message.channel, ":x: **Error:** You can not change settings in privat messages.")
else:
await client.send_message(message.channel, ":white_check_mark: **Success:** set `" + text[0] + "` to `" + text[1] + "`.")
with open("server/" + serverid + "/settings.json", "w") as settings_file:
json.dump(settings, settings_file)
print("updated settings_file")
elif " show" in message.content.lower():
await client.send_typing(message.channel)
REMOVE_LIST.append("show")
remove = '|'.join(REMOVE_LIST)
regex = re.compile(r'('+remove+')', flags=re.IGNORECASE)
text = regex.sub("", message.content.lower()).strip().split(' ', 1)
bot_message = "__Settings:__\n"
for key, value in settings.items():
if isinstance(value, (int, bool, list)):
value = str(value)
elif key != "farewell_text" and key != "greetings_text":
value = "<@&" + value + ">"
if not text == ['']:
if text[0] == "quiet":
bot_message += "`Option: " + key + " Value: " + value + "`\n"
elif text[0] not in settings:
bot_message = ":x: **Error:** this option doesn't exist.\n"
break
else:
if isinstance(settings[text[0]], (int, bool, list)):
value = str(settings[text[0]])
elif text[0] != "farewell_text" and text[0] != "greetings_text":
value = "<@&" + settings[text[0]] + ">"
else:
value = settings[text[0]]
if len(text) > 2:
bot_message = "**Error**: too many arguments\n"
elif len(text) > 1 and text[1] == "quiet":
bot_message = "`Option: " + text[0] + " Value: " + value + "`\n"
elif len(text) > 1 and text[1] != "quiet":
bot_message = "**Error**: wrong argument\n"
else:
bot_message = "**Option:** " + text[0] + " **Value:** " + value + "\n"
break
else:
bot_message += "**Option:** " + key + " **Value:** " + value + "\n"
await client.send_message(message.channel, bot_message)
elif " json dump" in message.content.lower():
await client.send_message(message.server.owner, ":floppy_disk: These were your old settings for your server: __"+message.server.name+"__\n```js\n" + json.dumps(settings) + "```")
if message.server:
sent_message = await client.send_message(message.channel, ":incoming_envelope: **Sent!**")
await asyncio.sleep(100)
await client.delete_message(sent_message)
if message.channel.permissions_for(message.server.me).manage_messages:
await client.delete_message(message)
elif " settings set" in message.content.lower():
for key, value in settings.items():
bot_message += key + "\n"
await client.send_message(message.channel, ":x: **Error:** You need to choose one of the following options: \n**" + bot_message + "**\nFor current values use `settings show`")
else:
await client.send_message(message.channel, ":x: **Error:** Further arguments are needed, eg: `show, set`")
elif ( "say" in message.content.lower() and message.channel_mentions ) and ( message.author.id == owner.id or settings["say"] in user_role_ids(message.author) or discord.utils.get(message.server.roles, id=settings["say"]).position <= message.author.top_role.position ):
REMOVE_LIST = ["@" + my_name, "@", "say" ,"#"]
for channel in message.channel_mentions:
REMOVE_LIST.append(channel.name)
remove = '|'.join(REMOVE_LIST)
regex = re.compile(r'('+remove+')', flags=re.IGNORECASE)
text = regex.sub("", message.clean_content).strip()
for channel in message.channel_mentions:
await client.send_message(channel, ":mega: " + text)
elif ( "animated" in message.content.lower() ) and ( message.author.id == owner.id or settings["animated"] in user_role_ids(message.author) or discord.utils.get(message.server.roles, id=settings["animated"]).position <= message.author.top_role.position ):
if "dick" in message.content.lower():
bot_message = "8"
dick = await client.send_message(message.channel, bot_message)
for i in range(randint(4,15)):
bot_message += "="
await asyncio.sleep(1)
await client.edit_message(dick, bot_message)
bot_message += "D"
await client.edit_message(dick, bot_message)
elif "slot" in message.content.lower() and "machine" in message.content.lower():
choice = ["","",""]
for y in range(3):
choice[y] = random.choice(settings["slot_machine"])
bot_message = "[" + choice[0] + "][" + choice[1] + "][" + choice[2] + "]"
slot_machine = await client.send_message(message.channel, bot_message)
for x in range(5):
for y in range(3):
choice[y] = random.choice(settings["slot_machine"])
bot_message = "[" + choice[0] + "][" + choice[1] + "][" + choice[2] + "]"
await asyncio.sleep(1)
await client.edit_message(slot_machine, bot_message)
if choice[0]==choice[1] and choice[0]==choice[2]:
await client.send_message(message.channel, ":trophy: **"+message.author.name.upper()+" WON!** :trophy:")
else:
await client.send_message(message.channel, "maybe next time.")
else:
await client.send_message(message.channel, ":x: **Error:** You need to choose one of the following memes: \n **dick**\n **slot_machine**")
elif ( "meme_text" in message.content.lower() or "meme_txt" in message.content.lower() ) and ( ( message.author.id == owner.id or settings["meme_txt"] in user_role_ids(message.author) or discord.utils.get(message.server.roles, id=settings["meme_txt"]).position <= message.author.top_role.position ) ):
await client.send_typing(message.channel)
REMOVE_LIST = ["@" + my_name, "@", "meme_text", "meme_txt"]
remove = '|'.join(REMOVE_LIST)
regex = re.compile(r'('+remove+')', flags=re.IGNORECASE)
text = regex.sub("", message.clean_content).strip()
if len(text) == 0:
print("no text entered")
text = shitpost(serverid)
print("Text: " + text)
await client.send_file(message.channel, meme_text(text, serverid))
elif ( "meme_image" in message.content.lower() or "meme_img" in message.content.lower() ) and (( message.author.id == owner.id or settings["meme_img"] in user_role_ids(message.author) or discord.utils.get(message.server.roles, id=settings["meme_img"]).position <= message.author.top_role.position )):
await client.send_typing(message.channel)
REMOVE_LIST = ["@" + my_name, "@", "meme_image", "meme_img", "(?i)https?:\/\/.*\.(?:png|jpg|jpeg|gif)"]
remove = '|'.join(REMOVE_LIST)
regex = re.compile(r'('+remove+')', flags=re.IGNORECASE)
text = regex.sub("", message.clean_content).strip().lower()
data = requests.get('https://pastebin.com/raw/fAHJ6gbC').json()
print(text)
if text in data["memes_images"]:
print("Meme: " + text)
await client.send_file(message.channel, meme_image("last_image.png", text, serverid))
else:
for memes in data["memes_images"]:
bot_message += " " + memes + "\n"
await client.send_message(message.channel, ":x: **Error:** You need to choose one of the following memes: \n**" + bot_message + "**")
elif "ascii" in message.content.lower() and (( message.author.id == owner.id or settings["ascii"] in user_role_ids(message.author) or discord.utils.get(message.server.roles, id=settings["ascii"]).position <= message.author.top_role.position )):
emoji = re.findall('<(:\S*:)\d*>', message.clean_content)
print(emoji)
REMOVE_LIST = ["@" + my_name, "@", "ascii", "<:\S*:\d*>"]
remove = '|'.join(REMOVE_LIST)
regex = re.compile(r'('+remove+')', flags=re.IGNORECASE)
text = regex.sub("", message.clean_content).strip().upper()
f = Figlet(font='alphabet')
if not text:
await client.send_message(message.channel, ":x: **Error:** You need to write some text")
else:
await client.send_message(message.channel, ":a: :regional_indicator_s: :regional_indicator_c: :regional_indicator_i: :regional_indicator_i: \n```" + f.renderText(text) + "```")
elif "random" in message.content.lower() and "gallery" in message.content.lower():
await client.send_message(message.channel, ":link:http://pururin.us/gallery/" + str(randint(1,30000)))
elif (" rate" in message.content.lower() or "rate " in message.content.lower()) and (settings["rate"] == True):
rating = randint(1,10)
if rating == 10:
bot_message = "i r8 8/8 m8.\n"
else:
bot_message =str(rating) + "/10 " + random.choice(["memepoints", "points", "goodboipoints", "faggotpoints"]) + ".\n"
bot_message += ":star:" * rating
await client.send_message(message.channel, bot_message)
elif (" sleep" in message.content.lower() or "sleep " in message.content.lower() or " night" in message.content.lower() or "night " in message.content.lower()) and (settings["sleep"] == True):
await client.send_typing(message.channel)
kaga = requests.get('https://pastebin.com/raw/4DxVcG4n').text.split()
kaga_posting = random.choice (kaga)
await client.send_message(message.channel, ":sleeping_accommodation: " + kaga_posting)
elif message.content.endswith('?') and (settings["question"] == True):
if " or " in message.content.lower():
REMOVE_LIST = ["@" + my_name, "@", "\?", "should I rather", "should I", "would you rather", "what do you prefer", "who do you prefer", "do you prefer", "what is better", "what should I do", "what could I do" , "would you prefer", "decide between", "what do you like more", "decide for me between"]
remove = '|'.join(REMOVE_LIST)
regex = re.compile(r'('+remove+')', flags=re.IGNORECASE)
shitdecision = re.split('; |, | Or | oR | or | OR |\n', regex.sub("", message.clean_content))
shitdecision = " ".join(random.choice(shitdecision).split())
await client.send_message(message.channel, ":heavy_check_mark: " + shitdecision)
elif " who" in message.content.lower() or "who " in message.content.lower() or "who?" in message.content.lower():
if message.server:
await client.send_message(message.channel, ":bust_in_silhouette: " + random.choice(list(message.server.members)).display_name)
else:
await client.send_message(message.channel, ":bust_in_silhouette: " + random.choice(["you", "I"]))
else:
yesno = requests.get('https://pastebin.com/raw/90WCeZp9').text.split()
shitanswer = random.choice (yesno)
await client.send_message(message.channel, ":link:" + shitanswer)
elif "info" in message.content.lower() and ( ( message.author.id == owner.id or settings["info"] in user_role_ids(message.author) or discord.utils.get(message.server.roles, id=settings["info"]).position <= message.author.top_role.position ) ):
await client.send_typing(message.channel)
if serverid == "None":
servername = "None"
else:
servername = message.server.name
num_lines = 0
num_words = 0
num_chars = 0
with open("server/" + serverid + "/log.txt", 'r') as f:
for line in f:
words = line.split()
num_lines += 1
num_words += len(words)
num_chars += len(line)
user_count = 0
for server in client.servers:
user_count += server.member_count
await client.send_message(message.channel, """:information_source: *Information:*
I'm :robot: **""" + client.user.name + "**. I'm running on " + platform.dist()[0] + " *" + platform.dist()[1] + "* with :snake: python *" + platform.python_version() + "* using discord.py *" + discord.__version__ + """*.
I've been online since :clock1: *""" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(p.create_time())) + "* on **" + str(len(client.servers)) + """** servers with a total user count of :busts_in_silhouette: **""" + str(user_count) + """**.
The :card_box: log file for **""" + servername + "** is currently **" + str(num_words) + "** words and **" + str(num_chars) + """** characters long.
This bot was created by **""" + (await client.application_info()).owner.name + "**#" + (await client.application_info()).owner.discriminator + " with :heart:\nSource: :link:https://github.com/ZerataX/ebooks")
elif "help" in message.content.lower() and ( ( message.author.id == owner.id or settings["help"] in user_role_ids(message.author) or discord.utils.get(message.server.roles, id=settings["help"]).position <= message.author.top_role.position ) ):
await client.send_typing(message.channel)
await client.send_message(message.author, """:exclamation: __Mention me with one of the following commands:__
**set username** to change my username *(this only works twice per hour)*
`""" + client.user.mention + """" set username newusername`
**set avatar** to change my avatar
`""" + client.user.mention + """ set avatar http(s)://website.tld/imageurl`
**settings set** to change an option
`""" + client.user.mention + """ settings set option value`
**settings show** shows the currently set options
`""" + client.user.mention + """ settings show`
**meme_text** to get a dank meme *(if no text is given a random sentence will be generated)*
`""" + client.user.mention + """ meme_text sentence`
**meme_image** to get a meme_image *(uses the last posted image on the server)*
`""" + client.user.mention + """ meme_image`
**ascii** ascii turns your message into huge ascii letters
`""" + client.user.mention + """ ascii string`
**say** sends a message to all mentioned channels
`""" + client.user.mention + """ say string #channel-mention1 #channel-mention2 … #channel-mentionN`
**information** sends the current stats
`""" + client.user.mention + """ info`
**invite** to receive an invite link for another server
`""" + client.user.mention + """ invite`
A more detailed :page_facing_up: documentation is available here: :link:https://github.com/ZerataX/ebooks/blob/master/README.md
""")
if message.server:
sent_message = await client.send_message(message.channel, ":incoming_envelope: **Sent!**")
await asyncio.sleep(100)
await client.delete_message(sent_message)
if message.channel.permissions_for(message.server.me).manage_messages:
await client.delete_message(message)
elif "invite" in message.content.lower():
await client.send_typing(message.channel)
if "all" in message.content.lower() and message.author.id == (await client.application_info()).owner.id:
print("creating invites")
bot_message = ":love_letter: __Invites:__\n"
for server in client.servers:
print("trying to create invite")
try:
if server.me.server_permissions.create_instant_invite:
print("succeeded")
invite = await client.create_invite(server, max_age=60, max_uses=1)
bot_message += "**Server:** " + server.name + " **Invite**: " + invite.url + "\n"
else:
print("failed")
bot_message += "**Server:** " + server.name + " **Invite**: could not create invite *(missing permission)*\n"
except:
pass
await client.send_message(message.channel, bot_message)
elif "delete" in message.content.lower() and message.author.id == (await client.application_info()).owner.id:
invites = re.findall("(https?:\/\/discord\.gg\/[\da-zA-Z]+)", message.content)
for invite in invites:
print("trying to delete invite: '" + invite + "'")
invite = await client.get_invite(invite)
if invite.inviter == client.user:
await client.delete_invite(invite)
print("succeeded")
await client.send_message(message.channel, ":white_check_mark: **Success:** deleted invite: " + invite.url)
else:
print("failed")
await client.send_message(message.channel, ":x: **Error:** could not deleted invite: " + invite.url)
else:
await client.send_message(message.author, discord.utils.oauth_url((await client.application_info())[0], permissions=None, server=None))
if message.server:
sent_message = await client.send_message(message.channel, ":incoming_envelope: **Sent!**")
await asyncio.sleep(100)
await client.delete_message(sent_message)
if message.channel.permissions_for(message.server.me).manage_messages:
await client.delete_message(message)
elif "set avatar " in message.content.lower():
await client.send_typing(message.channel)
if message.author.id == (await client.application_info()).owner.id:
if image_in_message:
with open("server/images/last_image.png", 'rb') as f:
await client.edit_profile(password=None,avatar=f.read())
await client.send_message(message.author, ":white_check_mark: **Success:** Avatar set!")
else:
await client.send_message(message.channel, ":x: **Error:** No image given!")
else:
await client.send_message(message.channel, ":x: **Error:** You are not allowed to do that!")
elif "set username " in message.content.lower():
await client.send_typing(message.channel)
if message.author.id == (await client.application_info()).owner.id:
REMOVE_LIST = ["@", my_name , " set username "]
remove = '|'.join(REMOVE_LIST)
regex = re.compile(r'('+remove+')', flags=re.IGNORECASE)
name = regex.sub("", message.clean_content)
print("New username: " + name)
await client.edit_profile(password=None, username=name)
await client.send_message(message.channel, ":white_check_mark: **Success:** Username set!")
else:
await client.send_message(message.channel, ":x: **Error:** **You are not allowed to do that!")
else:
await client.send_typing(message.channel)
await client.send_message(message.channel, shitpost(serverid))
else:
print("message sent by bot")
@client.event
async def on_server_join(server):
await client.send_message(server.default_channel, "**Yahallo!** :heart: Please don't expect me to talk right away, I'm *very* shy :3\nFor help please read: :link:https://github.com/ZerataX/ebooks or mention me with `help`")
await client.send_message(server.owner, "I was just added to your server, "+server.name+". Most interactions with me work by :speech_balloon: mentioning me. For me to work properly I need some time to gather enough text. For more :question: help on command-usage use:\n`" + client.user.mention + " help`\n\nFor a :page_facing_up: documentation or more help visit:\n:link:https://github.com/ZerataX/ebooks\nor message the creator of this :robot: bot **" + (await client.application_info()).owner.name + "**#" + (await client.application_info()).owner.discriminator + ".")
@client.event
async def on_server_remove(server):
await client.send_message(server.owner, ":broken_heart: WHY DON'T YOU LOVE ME! ;_;")
await client.send_message(server.owner, "You can :envelope_with_arrow: invite me again with:\n:link:" +discord.utils.oauth_url((await client.application_info())[0], permissions=None, server=server))
await client.send_message(server.owner, "If you had any *trouble* or just want to give feedback you can message: " + (await client.application_info()).owner.mention + "\nor open an issue::link:https://github.com/ZerataX/ebooks/issues/new")
@client.event
async def on_member_join(member):
with open("server/" + member.server.id + "/settings.json", "r") as settings_file:
settings = settings_file.read()
settings = json.loads(settings)
if settings["greetings"] == True:
await client.send_message(member.server.default_channel, str(settings["greetings_text"]).replace("$member", member.name).replace("$mention", member.mention).replace("$server", member.server.name))
@client.event
async def on_member_remove(member):
with open("server/" + member.server.id + "/settings.json", "r") as settings_file:
settings = settings_file.read()
settings = json.loads(settings)
if settings["farewell"] == True:
await client.send_message(member.server.default_channel, str(settings["farewell_text"]).replace("$member", member.name).replace("$mention", member.mention).replace("$server", member.server.name))
def user_role_ids(user):
roles = []
for role in user.roles:
roles.append(role.id)
return roles
def substract(a, b):
return "".join(a.rsplit(b))
def file_download(url, dir, filename=None):
if "imgur" in url:
print("imgur sucks ass")
else:
print("Downloading: " + url)
imagepath = url.split('/')[-1]
r = requests.get(url, stream=True)
if r.status_code == 200:
if filename:
print("Saving to: " + dir + filename)
with open(dir + filename, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
else:
with open(dir + posixpath.basename(imagepath), 'wb') as f:
print("Saving to: " + dir + posixpath.basename(imagepath))
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
def getUptime():
return time.time() - startTime
def shitpost(serverid):
print("Creating shitpost for server " + serverid)
with open("server/" + serverid + "/log.txt") as f:
text = f.read()
text_model = markovify.Text(text)
shitpost = text_model.make_short_sentence(50)
if shitpost is not None:
return shitpost
else:
shitpost = text_model.make_short_sentence(50)
if shitpost is not None:
return shitpost
else:
shitpost = "fuck off~"
return shitpost
def meme_text(text, serverid):
with open("info.json", "r") as info_file:
data = info_file.read()
data = json.loads(data)
meme = randint(0,(len(data["memes_text"]) -1))
image_name = data["memes_text"][meme]["image"]
margin = data["memes_text"][meme]["size"]["left"]
offset = data["memes_text"][meme]["size"]["up"]
style = data["memes_text"][meme]["style"]
print("Creating meme " + data["memes_text"][meme]["image"] + " for server " + serverid)
if not os.path.isfile("images/" + image_name):
print("Downloading new Images")
file_download(data["memes_text"][meme]["image_url"], "images/", image_name)
if not os.path.isfile("fonts/" + data["styles"][style]["font"]):
print("Downloading new Font")
file_download(data["styles"][style]["font_url"], "fonts/", data["styles"][style]["font"])
meme_font = ImageFont.truetype("fonts/" + data["styles"][style]["font"], data["styles"][style]["font_size"])
base = Image.open("images/" + image_name).convert('RGBA')
width, height = base.size
txt = Image.new('RGBA', base.size, (255,255,255,0))
d = ImageDraw.Draw(txt)
dif = (data["memes_text"][meme]["size"]["right"] - data["memes_text"][meme]["size"]["left"])
wrap = textwrap.wrap(" ".join(text.split()), width=dif/data["styles"][style]["font_size"])
offset += (data["memes_text"][meme]["size"]["bottom"]-offset)/2-(meme_font.getsize(wrap[0])[1]*len(wrap)/2)
if offset < data["memes_text"][meme]["size"]["up"]:
offset = data["memes_text"][meme]["size"]["up"]
for line in wrap:
d.text((margin+(data["memes_text"][meme]["size"]["center"]-meme_font.getsize(line)[0])/2, offset), line, font=meme_font, fill=data["styles"][style]["font_color"])
offset += meme_font.getsize(text)[1]
if offset > data["memes_text"][meme]["size"]["bottom"] - meme_font.getsize(line)[1]:
break
out = Image.alpha_composite(base, txt)
out.save("server/" + serverid + "/output/" + image_name);
print("Meme saved to: server/" + serverid + "/output/" + image_name)
return "server/" + serverid + "/output/" + image_name
def meme_image(image_name, memename, serverid):
print("Creating " + memename + " meme using " + image_name + " for server " + serverid)
with open("info.json", "r") as info_file:
data = info_file.read()
data = json.loads(data)
if not os.path.isfile("images/" + data["memes_images"][memename]["image"]):
print("Downloading new Images")
file_download(data["memes_images"][memename]["image_url"], "images/", data["memes_images"][memename]["image"])
frame = Image.open("images/" + data["memes_images"][memename]["image"]).convert("RGBA")
pic = Image.open("server/" + serverid + "/" + image_name).convert("RGBA")
if data["memes_images"][memename]["background"] == True:
box = data["memes_images"][memename]["box"]
if pic.size[0] < pic.size[1]:
scale = (box[2]/pic.size[0])
pic = pic.resize((box[2],int(pic.size[1]*scale)), PIL.Image.ANTIALIAS)
if pic.size[1] < box[3] - box[1]:
scale = (box[3]/pic.size[1])
pic = pic.resize(((int(pic.size[0]*scale),box[3])), PIL.Image.ANTIALIAS)
else:
scale = (box[3]/pic.size[1])
pic = pic.resize(((int(pic.size[0]*scale),box[3])), PIL.Image.ANTIALIAS)
if pic.size[0] < box[2] - box[0]:
scale = (box[2]/pic.size[0])
pic = pic.resize((box[2],int(pic.size[1]*scale)), PIL.Image.ANTIALIAS)
center = [(pic.size[0]-box[2])/2, (pic.size[1]-box[3])/2]
pic = pic.crop((center[0],center[1],center[0]+box[2],center[1]+box[3]))
frame.paste(pic,(box[0],box[1]))
background = Image.new('RGBA', frame.size, (data["memes_images"][memename]["backgrond_color"][0],data["memes_images"][memename]["backgrond_color"][1],data["memes_images"][memename]["backgrond_color"][2],data["memes_images"][memename]["backgrond_color"][3]))
frame = Image.alpha_composite(background, frame)
frame.save("server/" + serverid + "/output/"+ data["memes_images"][memename]["image"]);
else:
if pic.size[1] < frame.size[1]:
scale = (frame.size[1]/pic.size[1])
pic = pic.resize(((int(pic.size[0]*scale),frame.size[1])), PIL.Image.ANTIALIAS)
if pic.size[0] < frame.size[0]:
scale = (frame.size[0]/pic.size[0])
pic = pic.resize((frame.size[0],int(pic.size[1]*scale)), PIL.Image.ANTIALIAS)
if pic.size[1] < frame.size[1]:
scale = (frame.size[1]/pic.size[1])
pic = pic.resize(((int(pic.size[0]*scale),frame.size[1])), PIL.Image.ANTIALIAS)
if pic.size[0] < frame.size[0]:
scale = (frame.size[0]/pic.size[0])
pic = pic.resize((frame.size[0],int(pic.size[1]*scale)), PIL.Image.ANTIALIAS)
pic.paste(frame, (10, pic.size[1]-frame.size[1]-30),frame)
background = Image.new('RGBA', pic.size, (data["memes_images"][memename]["backgrond_color"][0],data["memes_images"][memename]["backgrond_color"][1],data["memes_images"][memename]["backgrond_color"][2],data["memes_images"][memename]["backgrond_color"][3]))
pic = Image.alpha_composite(background, pic)
pic.save("server/" + serverid + "/output/"+ data["memes_images"][memename]["image"]);
print(memename + " meme saved to: server/" + serverid + "/output/" + data["memes_images"][memename]["image"])
return("server/" + serverid + "/output/" + data["memes_images"][memename]["image"])
async def status():
print("updating status")
payload = { "server_count": len(client.servers) }
url = 'https://bots.discord.pw/api/bots/189777680982474753/stats'
header = {'Content-type' : 'application/json', 'Authorization' : "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VySUQiOiIxNDIzMTcyNDU1MTc3OTEyMzIiLCJyYW5kIjoyNjIsImlhdCI6MTQ3NjY0MDYzM30.aLttxHmTn1v8mn8kIASyrNqFwfgS2gMSdKEc0XuRDbk" }
bots_discord_response = requests.post(url, data=json.dumps(payload), headers=header)
await client.change_presence(game=discord.Game( name=shitpost("None")))
await asyncio.sleep(300)
await status()
client.run('token')
|
ZerataX/ebooks
|
ebooks.py
|
Python
|
gpl-3.0
| 39,538
|
[
"VisIt"
] |
c7903bde6ffad2aa94048c4e127a9344306f57ef4ace190a7dbbe849f1ecc072
|
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Node base classes.
These classes provide the generic base classes available for nodes.
"""
from nuitka import Options, Tracing, TreeXML, Variables
from nuitka.__past__ import iterItems
from nuitka.containers.odict import OrderedDict
from nuitka.containers.oset import OrderedSet
from nuitka.PythonVersions import python_version
from nuitka.utils.InstanceCounters import counted_del, counted_init
from nuitka.VariableRegistry import addVariableUsage
class NodeCheckMetaClass(type):
kinds = set()
def __new__(cls, name, bases, dictionary):
# This is in conflict with either PyDev or Pylint, pylint: disable=C0204
assert len(bases) == len(set(bases))
return type.__new__(cls, name, bases, dictionary)
def __init__(cls, name, bases, dictionary):
if not name.endswith("Base"):
assert ("kind" in dictionary), name
kind = dictionary["kind"]
assert type(kind) is str, name
assert kind not in NodeCheckMetaClass.kinds, name
NodeCheckMetaClass.kinds.add(kind)
def convert(value):
if value in ("AND", "OR", "NOT"):
return value
else:
return value.title()
kind_to_name_part = "".join(
[convert(x) for x in kind.split('_')]
)
assert name.endswith(kind_to_name_part), \
(name, kind_to_name_part)
# Automatically add checker methods for everything to the common
# base class
checker_method = "is" + kind_to_name_part
def checkKind(self):
return self.kind == kind
if not hasattr(NodeBase, checker_method):
setattr(NodeBase, checker_method, checkKind)
type.__init__(cls, name, bases, dictionary)
# For every node type, there is a test, and then some more members,
# For Python2/3 compatible source, we create a base class that has the metaclass
# used and doesn't require making a choice.
NodeMetaClassBase = NodeCheckMetaClass("NodeMetaClassBase", (object,), {})
class NodeBase(NodeMetaClassBase):
kind = None
@counted_init
def __init__(self, source_ref):
# The base class has no __init__ worth calling.
# Check source reference to meet basic standards, so we note errors
# when they occur.
assert source_ref is not None
assert source_ref.line is not None
self.parent = None
self.source_ref = source_ref
__del__ = counted_del()
def __repr__(self):
# This is to avoid crashes, because of bugs in detail.
# pylint: disable=W0703
try:
detail = self.getDetail()
except Exception as e:
detail = "detail raises exception %s" % e
if not detail:
return "<Node %s>" % self.getDescription()
else:
return "<Node %s %s>" % (self.getDescription(), detail)
def getDescription(self):
""" Description of the node, intended for use in __repr__ and
graphical display.
"""
return "%s at %s" % (self.kind, self.source_ref.getAsString())
def getDetails(self):
""" Details of the node, intended for re-creation.
We are not using the pickle mechanisms, but this is basically
part of what the constructor call needs. Real children will
also be added.
"""
# Virtual method, pylint: disable=R0201
return {}
def getDetailsForDisplay(self):
""" Details of the node, intended for use in __repr__ and dumps.
This is also used for XML.
"""
return self.getDetails()
def getDetail(self):
""" Details of the node, intended for use in __repr__ and graphical
display.
"""
return str(self.getDetails())[1:-1]
def makeClone(self):
try:
# Using star dictionary arguments here for generic use.
return self.__class__(
source_ref = self.source_ref,
**self.getDetails()
)
except TypeError:
print("Problem cloning", self.__class__)
raise
def makeCloneAt(self, source_ref):
result = self.makeClone()
result.source_ref = source_ref
return result
def getParent(self):
""" Parent of the node. Every node except modules have to have a parent.
"""
if self.parent is None and not self.isPythonModule():
# print self.getVisitableNodesNamed()
assert False, (self, self.source_ref)
return self.parent
def getParents(self):
""" Parents of the node. Up to module level.
"""
result = []
current = self
while True:
current = current.getParent()
result.append(current)
if current.isPythonModule() or current.isExpressionFunctionBody():
break
assert None not in result, self
result.reverse()
return result
def getChildName(self):
""" Return the role in the current parent, subject to changes.
"""
parent = self.getParent()
for key, value in parent.child_values.items():
if self is value:
return key
if type(value) is tuple:
if self in value:
return key, value.index(self)
# TODO: Not checking tuples yet
return None
def getParentFunction(self):
""" Return the parent that is a function.
"""
parent = self.getParent()
while parent is not None and not parent.isExpressionFunctionBody():
parent = parent.getParent()
return parent
def getParentModule(self):
""" Return the parent that is module.
"""
parent = self
while not parent.isPythonModule():
if hasattr(parent, "provider"):
# After we checked, we can use it, will be much faster route
# to take.
parent = parent.provider
else:
parent = parent.getParent()
return parent
def isParentVariableProvider(self):
# Check if it's a closure giver, in which cases it can provide variables,
return isinstance(self, ClosureGiverNodeBase)
def getParentVariableProvider(self):
parent = self.getParent()
while not parent.isParentVariableProvider():
parent = parent.getParent()
return parent
def getParentReturnConsumer(self):
parent = self.getParent()
while not parent.isParentVariableProvider() and \
not parent.isExpressionOutlineBody():
parent = parent.getParent()
return parent
def getParentStatementsFrame(self):
current = self.getParent()
while True:
if current.isStatementsFrame():
return current
if current.isParentVariableProvider():
return None
if current.isExpressionOutlineBody():
return None
current = current.getParent()
def getSourceReference(self):
return self.source_ref
def setCompatibleSourceReference(self, source_ref):
""" Bug compatible line numbers information.
As CPython outputs the last bit of bytecode executed, and not the
line of the operation. For example calls, output the line of the
last argument, as opposed to the line of the operation start.
For tests, we wants to be compatible. In improved more, we are
not being fully compatible, and just drop it altogether.
"""
# Getting the same source reference can be dealt with quickly, so do
# this first.
if self.source_ref is not source_ref and \
Options.isFullCompat() and \
self.source_ref != source_ref:
# An attribute outside of "__init__", so we save one memory for the
# most cases. Very few cases involve splitting across lines.
# pylint: disable=W0201
self.effective_source_ref = source_ref
def getCompatibleSourceReference(self):
""" Bug compatible line numbers information.
See above.
"""
return getattr(self, "effective_source_ref", self.source_ref)
def asXml(self):
line = self.getSourceReference().getLineNumber()
result = TreeXML.Element(
"node",
kind = self.__class__.__name__,
line = "%s" % line
)
compat_line = self.getCompatibleSourceReference().getLineNumber()
if compat_line != line:
result.attrib["compat_line"] = str(compat_line)
for key, value in iterItems(self.getDetailsForDisplay()):
value = str(value)
if value.startswith('<') and value.endswith('>'):
value = value[1:-1]
result.set(key, str(value))
for name, children in self.getVisitableNodesNamed():
if type(children) not in (list, tuple):
children = (children,)
role = TreeXML.Element(
"role",
name = name
)
result.append(role)
for child in children:
if child is not None:
role.append(
child.asXml()
)
return result
def asXmlText(self):
xml = self.asXml()
return TreeXML.toString(xml)
def dump(self, level = 0):
Tracing.printIndented(level, self)
Tracing.printSeparator(level)
for visitable in self.getVisitableNodes():
visitable.dump(level + 1)
Tracing.printSeparator(level)
@staticmethod
def isPythonModule():
# For overload by module nodes
return False
def isExpression(self):
return self.kind.startswith("EXPRESSION_")
def isStatement(self):
return self.kind.startswith("STATEMENT_")
def isExpressionBuiltin(self):
return self.kind.startswith("EXPRESSION_BUILTIN_")
def isStatementReraiseException(self):
# Virtual method, pylint: disable=R0201
return False
def isExpressionMakeSequence(self):
# Virtual method, pylint: disable=R0201
return False
def isIteratorMaking(self):
# Virtual method, pylint: disable=R0201
return False
def isNumberConstant(self):
# Virtual method, pylint: disable=R0201
return False
def isExpressionCall(self):
# Virtual method, pylint: disable=R0201
return False
def visit(self, context, visitor):
visitor(self)
for visitable in self.getVisitableNodes():
visitable.visit(context, visitor)
def getVisitableNodes(self):
# Virtual method, pylint: disable=R0201
return ()
def getVisitableNodesNamed(self):
# Virtual method, pylint: disable=R0201
return ()
def replaceWith(self, new_node):
self.parent.replaceChild(
old_node = self,
new_node = new_node
)
def getName(self):
# Virtual method, pylint: disable=R0201
return None
def mayHaveSideEffects(self):
""" Unless we are told otherwise, everything may have a side effect. """
# Virtual method, pylint: disable=R0201
return True
def isOrderRelevant(self):
return self.mayHaveSideEffects()
def mayHaveSideEffectsBool(self):
""" Unless we are told otherwise, everything may have a side effect. """
# Virtual method, pylint: disable=R0201
return True
def extractSideEffects(self):
""" Unless defined otherwise, the expression is the side effect. """
return (self,)
def mayRaiseException(self, exception_type):
""" Unless we are told otherwise, everything may raise everything. """
# Virtual method, pylint: disable=R0201,W0613
return True
def mayRaiseExceptionBool(self, exception_type):
""" Unless we are told otherwise, everything may raise being checked. """
# Virtual method, pylint: disable=R0201,W0613
return True
def mayReturn(self):
return "_RETURN" in self.kind
def mayBreak(self):
# For overload, pylint: disable=R0201
return False
def mayContinue(self):
# For overload, pylint: disable=R0201
return False
def needsFrame(self):
""" Unless we are tolder otherwise, this depends on exception raise. """
return self.mayRaiseException(BaseException)
def willRaiseException(self, exception_type):
""" Unless we are told otherwise, nothing may raise anything. """
# Virtual method, pylint: disable=R0201,W0613
return False
def isIndexable(self):
""" Unless we are told otherwise, it's not indexable. """
# Virtual method, pylint: disable=R0201
return False
def isStatementAborting(self):
""" Is the node aborting, control flow doesn't continue after this node. """
assert self.isStatement(), self.kind
return False
def needsLocalsDict(self):
""" Node requires a locals dictionary by provider. """
# Virtual method, pylint: disable=R0201
return False
def getIntegerValue(self):
""" Node as integer value, if possible."""
# Virtual method, pylint: disable=R0201
return None
class CodeNodeBase(NodeBase):
def __init__(self, name, code_prefix, source_ref):
assert name is not None
NodeBase.__init__(self, source_ref = source_ref)
self.name = name
self.code_prefix = code_prefix
# The code name is determined on demand only.
self.code_name = None
# The "UID" values of children kinds are kept here.
self.uids = {}
def getName(self):
return self.name
def getFullName(self):
result = self.getName()
current = self
while True:
current = current.getParent()
if current is None:
break
name = current.getName()
if name is not None:
result = "%s__%s" % (name, result)
assert '<' not in result, result
return result
def getCodeName(self):
if self.code_name is None:
provider = self.getParentVariableProvider()
parent_name = provider.getCodeName()
uid = "_%d" % provider.getChildUID(self)
assert isinstance(self, CodeNodeBase)
if self.name:
name = uid + '_' + self.name
else:
name = uid
self.code_name = "%s%s_of_%s" % (self.code_prefix, name, parent_name)
return self.code_name
def getChildUID(self, node):
if node.kind not in self.uids:
self.uids[ node.kind ] = 0
self.uids[ node.kind ] += 1
return self.uids[ node.kind ]
class ChildrenHavingMixin:
named_children = ()
checkers = {}
def __init__(self, values):
assert type(self.named_children) is tuple and len(self.named_children)
# Check for completeness of given values, everything should be there
# but of course, might be put to None.
assert set(values.keys()) == set(self.named_children)
self.child_values = dict(values)
for key, value in self.child_values.items():
if key in self.checkers:
value = self.child_values[key] = self.checkers[key](value)
assert type(value) is not list, key
if type(value) is tuple:
assert None not in value, key
for val in value:
val.parent = self
elif value is not None:
value.parent = self
elif value is None:
pass
else:
assert False, type(value)
def setChild(self, name, value):
""" Set a child value.
Do not overload, provider self.checkers instead.
"""
# Only accept legal child names
assert name in self.child_values, name
# Lists as inputs are OK, but turn them into tuples.
if type(value) is list:
value = tuple(value)
if name in self.checkers:
value = self.checkers[name](value)
# Re-parent value to us.
if type(value) is tuple:
for val in value:
val.parent = self
elif value is not None:
value.parent = self
# Determine old value, and inform it about loosing its parent.
old_value = self.child_values[name]
assert old_value is not value, value
self.child_values[name] = value
def getChild(self, name):
# Only accept legal child names
assert name in self.child_values, name
return self.child_values[name]
def hasChild(self, name):
return name in self.child_values
@staticmethod
def childGetter(name):
def getter(self):
return self.getChild(name)
return getter
@staticmethod
def childSetter(name):
def setter(self, value):
self.setChild(name, value)
return setter
def getVisitableNodes(self):
result = []
for name in self.named_children:
value = self.child_values[ name ]
if value is None:
pass
elif type(value) is tuple:
result += list(value)
elif isinstance(value, NodeBase):
result.append(value)
else:
raise AssertionError(
self,
"has illegal child", name, value, value.__class__
)
return tuple(result)
def getVisitableNodesNamed(self):
result = []
for name in self.named_children:
value = self.child_values[ name ]
result.append((name, value))
return result
def replaceChild(self, old_node, new_node):
if new_node is not None and not isinstance(new_node, NodeBase):
raise AssertionError(
"Cannot replace with", new_node, "old", old_node, "in", self
)
# Find the replaced node, as an added difficulty, what might be
# happening, is that the old node is an element of a tuple, in which we
# may also remove that element, by setting it to None.
for key, value in self.child_values.items():
if value is None:
pass
elif type(value) is tuple:
if old_node in value:
if new_node is not None:
self.setChild(
key,
tuple(
(val if val is not old_node else new_node)
for val in
value
)
)
else:
self.setChild(
key,
tuple(
val
for val in
value
if val is not old_node
)
)
return key
elif isinstance(value, NodeBase):
if old_node is value:
self.setChild(key, new_node)
return key
else:
assert False, (key, value, value.__class__)
raise AssertionError(
"Didn't find child",
old_node,
"in",
self
)
def makeClone(self):
values = {}
for key, value in self.child_values.items():
assert type(value) is not list, key
if value is None:
values[key] = None
elif type(value) is tuple:
values[key] = tuple(
v.makeClone()
for v in
value
)
else:
values[key] = value.makeClone()
values.update(
self.getDetails()
)
try:
# Using star dictionary arguments here for generic use,
# pylint: disable=E1123
return self.__class__(
source_ref = self.source_ref,
**values
)
except TypeError:
print("Problem cloning", self.__class__)
raise
class ClosureGiverNodeBase(CodeNodeBase):
""" Mix-in for nodes that provide variables for closure takers. """
def __init__(self, name, code_prefix, source_ref):
CodeNodeBase.__init__(
self,
name = name,
code_prefix = code_prefix,
source_ref = source_ref
)
self.providing = OrderedDict()
self.keeper_variables = OrderedSet()
self.temp_variables = OrderedDict()
self.temp_scopes = OrderedDict()
self.preserver_id = 0
def hasProvidedVariable(self, variable_name):
return variable_name in self.providing
def getProvidedVariable(self, variable_name):
if variable_name not in self.providing:
self.providing[variable_name] = self.createProvidedVariable(
variable_name = variable_name
)
return self.providing[variable_name]
def createProvidedVariable(self, variable_name):
# Virtual method, pylint: disable=R0201
assert type(variable_name) is str
return None
def registerProvidedVariables(self, *variables):
for variable in variables:
self.registerProvidedVariable(variable)
def registerProvidedVariable(self, variable):
assert variable is not None
self.providing[variable.getName()] = variable
def getProvidedVariables(self):
return self.providing.values()
def allocateTempScope(self, name, allow_closure = False):
self.temp_scopes[name] = self.temp_scopes.get(name, 0) + 1
# TODO: Instead of using overly long code name, could just visit parents
# and make sure to allocate the scope at the top.
if allow_closure:
return "%s_%s_%d" % (
self.getCodeName(),
name,
self.temp_scopes[name]
)
else:
return "%s_%d" % (
name,
self.temp_scopes[name]
)
def allocateTempVariable(self, temp_scope, name):
if temp_scope is not None:
full_name = "%s__%s" % (
temp_scope,
name
)
else:
assert name != "result"
full_name = name
del name
assert full_name not in self.temp_variables, full_name
result = Variables.TempVariable(
owner = self,
variable_name = full_name
)
self.temp_variables[full_name] = result
addVariableUsage(result, self)
return result
def getTempVariable(self, temp_scope, name):
if temp_scope is not None:
full_name = "%s__%s" % (temp_scope, name)
else:
full_name = name
return self.temp_variables[full_name]
def getTempVariables(self):
return tuple(self.temp_variables.values())
def removeTempVariable(self, variable):
del self.temp_variables[variable.getName()]
def allocatePreserverId(self):
if python_version >= 300:
self.preserver_id += 1
return self.preserver_id
class ClosureTakerMixin:
""" Mixin for nodes that accept variables from closure givers. """
def __init__(self, provider, early_closure):
assert provider.isParentVariableProvider(), provider
self.provider = provider
self.early_closure = early_closure
self.taken = set()
self.temp_variables = set()
def getParentVariableProvider(self):
return self.provider
def getClosureVariable(self, variable_name):
result = self.provider.getVariableForClosure(
variable_name = variable_name
)
assert result is not None, variable_name
# There is no maybe with closures. It means, it is closure variable in
# this case.
if result.isMaybeLocalVariable():
result = result.getMaybeVariable()
if not result.isModuleVariable():
self.addClosureVariable(result)
return result
def addClosureVariable(self, variable):
self.taken.add(variable)
return variable
def getClosureVariables(self):
return tuple(
sorted(
[
take
for take in
self.taken
if not take.isModuleVariable()
],
key = lambda x : x.getName()
)
)
def hasTakenVariable(self, variable_name):
for variable in self.taken:
if variable.getName() == variable_name:
return True
return False
def getTakenVariable(self, variable_name):
for variable in self.taken:
if variable.getName() == variable_name:
return variable
return None
def isEarlyClosure(self):
""" Early closure taking means immediate binding of references.
Normally it's good to lookup name references immediately, but not for
functions. In case of a function body it is not allowed to do that,
because a later assignment needs to be queried first. Nodes need to
indicate via this if they would like to resolve references at the same
time as assignments.
"""
return self.early_closure
class ExpressionMixin:
def isCompileTimeConstant(self):
""" Has a value that we can use at compile time.
Yes or no. If it has such a value, simulations can be applied at
compile time and e.g. operations or conditions, or even calls may
be executed against it.
"""
# Virtual method, pylint: disable=R0201
return False
def getCompileTimeConstant(self):
assert self.isCompileTimeConstant(), self
assert False
def getTruthValue(self):
""" Return known truth value. The "None" value indicates unknown. """
if self.isCompileTimeConstant():
return bool(self.getCompileTimeConstant())
else:
return None
def mayBeNone(self):
""" Could this evaluate to be "None".
Yes or no. Defaults to pessimistic yes."""
# For overload, pylint: disable=R0201
return True
def isKnownToBeIterable(self, count):
""" Can be iterated at all (count is None) or exactly count times.
Yes or no. If it can be iterated a known number of times, it may
be asked to unpack itself.
"""
# Virtual method, pylint: disable=R0201,W0613
return False
def isKnownToBeIterableAtMin(self, count):
# Virtual method, pylint: disable=R0201,W0613
return False
def isKnownToBeIterableAtMax(self, count):
# Virtual method, pylint: disable=R0201,W0613
return False
def getIterationLength(self):
""" Value that "len" or "PyObject_Size" would give, if known.
Otherwise it is "None" to indicate unknown.
"""
# Virtual method, pylint: disable=R0201
return None
def getStringValue(self):
""" Node as integer value, if possible."""
# Virtual method, pylint: disable=R0201
return None
def getStrValue(self):
""" Value that "str" or "PyObject_Str" would give, if known.
Otherwise it is "None" to indicate unknown. Users must not
forget to take side effects into account, when replacing a
node with its string value.
"""
string_value = self.getStringValue()
if string_value is not None:
from .NodeMakingHelpers import makeConstantReplacementNode
return makeConstantReplacementNode(
node = self,
constant = string_value
)
return None
def isKnownToBeHashable(self):
""" Is the value hashable, i.e. suitable for dictionary/set keying."""
# Virtual method, pylint: disable=R0201
# Unknown by default.
return None
def onRelease(self, constraint_collection):
# print "onRelease", self
pass
def computeExpressionRaw(self, constraint_collection):
""" Compute an expression.
Default behavior is to just visit the child expressions first, and
then the node "computeExpression". For a few cases this needs to
be overloaded, e.g. conditional expressions.
"""
# First apply the sub-expressions, as they are evaluated before.
sub_expressions = self.getVisitableNodes()
for sub_expression in sub_expressions:
assert sub_expression.isExpression(), (self, sub_expression)
constraint_collection.onExpression(
expression = sub_expression
)
# Then ask ourselves to work on it.
return self.computeExpression(
constraint_collection = constraint_collection
)
def computeExpressionAttribute(self, lookup_node, attribute_name,
constraint_collection):
# By default, an attribute lookup may change everything about the lookup
# source. Virtual method, pylint: disable=W0613
constraint_collection.removeKnowledge(lookup_node)
# Any code could be run, note that.
constraint_collection.onControlFlowEscape(self)
return lookup_node, None, None
def computeExpressionSubscript(self, lookup_node, subscript,
constraint_collection):
# By default, an subscript may change everything about the lookup
# source.
constraint_collection.removeKnowledge(lookup_node)
constraint_collection.removeKnowledge(subscript)
# Any code could be run, note that.
constraint_collection.onControlFlowEscape(self)
return lookup_node, None, None
def computeExpressionSlice(self, lookup_node, lower, upper,
constraint_collection):
# By default, a slicing may change everything about the lookup source.
# Virtual method, pylint: disable=R0201,W0613
constraint_collection.removeKnowledge(lookup_node)
return lookup_node, None, None
def computeExpressionCall(self, call_node, constraint_collection):
self.onContentEscapes(constraint_collection)
return call_node, None, None
def computeExpressionIter1(self, iter_node, constraint_collection):
self.onContentEscapes(constraint_collection)
assert iter_node.getValue() is self
return iter_node, None, None
def computeExpressionOperationNot(self, not_node, constraint_collection):
# Virtual method, pylint: disable=R0201
# The value of that node escapes and could change its contents.
constraint_collection.removeKnowledge(not_node)
# Any code could be run, note that.
constraint_collection.onControlFlowEscape(not_node)
return not_node, None, None
def computeExpressionDrop(self, statement, constraint_collection):
if not self.mayHaveSideEffects():
return None, "new_statements", "Removed statement without effect."
return statement, None, None
def onContentEscapes(self, constraint_collection):
pass
class CompileTimeConstantExpressionMixin(ExpressionMixin):
# TODO: Do this for all computations, do this in the base class of all
# nodes.
computed_attribute = False
def __init__(self):
pass
def isCompileTimeConstant(self):
""" Has a value that we can use at compile time.
Yes or no. If it has such a value, simulations can be applied at
compile time and e.g. operations or conditions, or even calls may
be executed against it.
"""
return True
def isMutable(self):
# Virtual method, pylint: disable=R0201
return False
def mayHaveSideEffects(self):
# Virtual method, pylint: disable=R0201
return False
def mayHaveSideEffectsBool(self):
# Virtual method, pylint: disable=R0201
return False
def mayBeNone(self):
return self.getCompileTimeConstant() is None
def computeExpressionOperationNot(self, not_node, constraint_collection):
from .NodeMakingHelpers import getComputationResult
return getComputationResult(
node = not_node,
computation = lambda : not self.getCompileTimeConstant(),
description = """\
Compile time constant negation truth value pre-computed."""
)
def computeExpressionAttribute(self, lookup_node, attribute_name, constraint_collection):
if self.computed_attribute:
return lookup_node, None, None
value = self.getCompileTimeConstant()
from .NodeMakingHelpers import getComputationResult, isCompileTimeConstantValue
# If it raises, or the attribute itself is a compile time constant,
# then do execute it.
if not hasattr(value, attribute_name) or \
isCompileTimeConstantValue(getattr(value, attribute_name)):
return getComputationResult(
node = lookup_node,
computation = lambda : getattr(value, attribute_name),
description = "Attribute lookup to '%s' pre-computed." % (
attribute_name
)
)
self.computed_attribute = True
return lookup_node, None, None
def computeExpressionSubscript(self, lookup_node, subscript, constraint_collection):
from .NodeMakingHelpers import getComputationResult
if subscript.isCompileTimeConstant():
return getComputationResult(
node = lookup_node,
computation = lambda : self.getCompileTimeConstant()[ subscript.getCompileTimeConstant() ],
description = "Subscript of constant with constant value."
)
# TODO: Look-up of subscript to index may happen.
return lookup_node, None, None
def computeExpressionSlice(self, lookup_node, lower, upper, constraint_collection):
from .NodeMakingHelpers import getComputationResult
# TODO: Could be happy with predictable index values and not require
# constants.
if lower is not None:
if upper is not None:
if lower.isCompileTimeConstant() and upper.isCompileTimeConstant():
return getComputationResult(
node = lookup_node,
computation = lambda : self.getCompileTimeConstant()[
lower.getCompileTimeConstant() : upper.getCompileTimeConstant()
],
description = """\
Slicing of constant with constant indexes."""
)
else:
if lower.isCompileTimeConstant():
return getComputationResult(
node = lookup_node,
computation = lambda : self.getCompileTimeConstant()[
lower.getCompileTimeConstant() :
],
description = """\
Slicing of constant with constant lower index only."""
)
else:
if upper is not None:
if upper.isCompileTimeConstant():
return getComputationResult(
node = lookup_node,
computation = lambda : self.getCompileTimeConstant()[
: upper.getCompileTimeConstant()
],
description = """\
Slicing of constant with constant upper index only."""
)
else:
return getComputationResult(
node = lookup_node,
computation = lambda : self.getCompileTimeConstant()[ : ],
description = "Slicing of constant with no indexes."
)
return lookup_node, None, None
class ExpressionSpecBasedComputationMixin(ExpressionMixin):
builtin_spec = None
def computeBuiltinSpec(self, given_values):
assert self.builtin_spec is not None, self
for value in given_values:
if value is not None and not value.isCompileTimeConstant():
return self, None, None
if not self.builtin_spec.isCompileTimeComputable(given_values):
return self, None, None
from .NodeMakingHelpers import getComputationResult
return getComputationResult(
node = self,
computation = lambda : self.builtin_spec.simulateCall(given_values),
description = "Built-in call to '%s' pre-computed." % (
self.builtin_spec.getName()
)
)
class ExpressionChildrenHavingBase(ChildrenHavingMixin, NodeBase,
ExpressionMixin):
def __init__(self, values, source_ref):
NodeBase.__init__(
self,
source_ref = source_ref
)
ChildrenHavingMixin.__init__(
self,
values = values
)
class StatementChildrenHavingBase(ChildrenHavingMixin, NodeBase):
def __init__(self, values, source_ref):
NodeBase.__init__(self, source_ref = source_ref)
ChildrenHavingMixin.__init__(
self,
values = values
)
class ExpressionBuiltinNoArgBase(NodeBase, ExpressionMixin):
def __init__(self, builtin_function, source_ref):
NodeBase.__init__(
self,
source_ref = source_ref
)
self.builtin_function = builtin_function
def computeExpression(self, constraint_collection):
from .NodeMakingHelpers import getComputationResult
# The lambda is there for make sure that no argument parsing will reach
# the built-in function at all, pylint: disable=W0108
return getComputationResult(
node = self,
computation = lambda : self.builtin_function(),
description = "No arg %s built-in" % self.builtin_function.__name__
)
class ExpressionBuiltinSingleArgBase(ExpressionChildrenHavingBase,
ExpressionSpecBasedComputationMixin):
named_children = (
"value",
)
def __init__(self, value, source_ref):
ExpressionChildrenHavingBase.__init__(
self,
values = {
"value" : value,
},
source_ref = source_ref
)
getValue = ExpressionChildrenHavingBase.childGetter(
"value"
)
def computeExpression(self, constraint_collection):
value = self.getValue()
assert self.builtin_spec is not None, self
if value is None:
return self.computeBuiltinSpec(
given_values = ()
)
else:
if value.willRaiseException(BaseException):
return value, "new_raise", """\
Built-in call raises exception while building argument."""
return self.computeBuiltinSpec(
given_values = (value,)
)
class SideEffectsFromChildrenMixin:
def mayHaveSideEffects(self):
for child in self.getVisitableNodes():
if child.mayHaveSideEffects():
return True
return False
def extractSideEffects(self):
# No side effects at all but from the children.
result = []
for child in self.getVisitableNodes():
result.extend(
child.extractSideEffects()
)
return tuple(result)
|
tempbottle/Nuitka
|
nuitka/nodes/NodeBases.py
|
Python
|
apache-2.0
| 41,638
|
[
"VisIt"
] |
2e8565e712e458882539bd9cc523eb5400de8b7a57649e9c5e9fd88502a390a5
|
########################################################################
# File : FC_Scaling_test
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Test suite for a generic File Catalog scalability tests
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC import S_OK
import sys, pprint, os, numpy
Script.setUsageMessage( """
Test suite for a generic File Catalog scalability tests
""" )
testType = 'noTest'
def setTestType( value ):
global testType
testType = value
return S_OK()
testDir = ''
def setTestDirectory( value ):
global testDir
testDir = value
return S_OK()
nClients = 1
def setNumberOfClients( value ):
global nClients
nClients = int( value )
return S_OK()
nQueries = 100
def setNumberOfQueries( value ):
global nQueries
nQueries = int( value )
return S_OK()
lfnListFile = 'lfns_100.txt'
def setLFNListFile( value ):
global lfnListFile
lfnListFile = value
return S_OK()
outputFile = "output.txt"
def setOutputFile( value ):
global outputFile
outputFile = value
return S_OK()
catalog = 'AugerTestFileCatalog'
def setCatalog( value ):
global catalog
catalog = value
return S_OK()
fullTest = False
def setFullTest( value ):
global fullTest
fullTest = True
return S_OK()
shortRange = False
def setShortRange( value ):
global shortRange
shortRange = True
return S_OK()
verbosity = 0
def setVerbosity( value ):
global verbosity
verbosity += 1
return S_OK()
Script.registerSwitch( "t:", "type=", "test type", setTestType )
Script.registerSwitch( "D:", "directory=", "test directory", setTestDirectory )
Script.registerSwitch( "N:", "clients=", "number of parallel clients", setNumberOfClients )
Script.registerSwitch( "Q:", "queries=", "number of queries in one test", setNumberOfQueries )
Script.registerSwitch( "C:", "catalog=", "catalog to use", setCatalog )
Script.registerSwitch( "L:", "lfnList=", "file with a list of LFNs", setLFNListFile )
Script.registerSwitch( "F", "fullTest", "run the full test", setFullTest )
Script.registerSwitch( "O:", "output=", "file with output result", setOutputFile )
Script.registerSwitch( "v", "verbose", "file with output result", setVerbosity )
Script.registerSwitch( "S", "shortRange", "run short parameter range", setShortRange )
Script.parseCommandLine( ignoreErrors = True )
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ProcessPool import ProcessPool
from DIRAC import S_OK
import time
fc = FileCatalog( catalogs=[catalog] )
resultTest = []
def listDirectory( n_queries ):
global testDir
start = time.time()
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.listDirectory( testDir )
resultList.append( time.time() - start )
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, 'Average', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def getBulkReplicas( n_queries ):
global lfnListFile, verbosity
lFile = open(lfnListFile)
lfnList = [ l.strip().replace('//','/') for l in lFile.read().strip().split() ]
lFile.close()
start = time.time()
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.getReplicas( lfnList )
resultList.append( time.time() - start )
if verbosity >= 2:
print "getReplicas: received lfns", len(result['Value']['Successful'])
for lfn in result['Value']['Successful']:
print result['Value']['Successful'][lfn]
if verbosity >= 3:
for lfn,res in result['Value']['Successful'].items():
print lfn
print res
break
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, 'Average', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def getDirectoryReplicas( n_queries ):
global testDir, verbosity
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.getDirectoryReplicas( testDir )
resultList.append( time.time() - start )
if verbosity >= 2:
print "Returned values", len(result['Value']['Successful'][testDir])
for lfn,res in result['Value']['Successful'][testDir].items():
print lfn
print res
break
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getDirectoryReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, '\nAverage', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def finalize(task,result):
global resultTest, verbosity
if verbosity >= 2:
if result['OK']:
print "Test time ", result['Value'], task.getTaskID()
else:
print "Error:", result['Message']
resultTest.append( result['Value'] )
def doException( expt ):
print "Exception", expt
def runTest( ):
global nClients, nQueries, testType, resultTest, testDir, lfnListFile
resultTest = []
pp = ProcessPool( nClients )
testFunction = eval( testType )
for c in xrange( nClients ):
pp.createAndQueueTask( testFunction, [nQueries],
callback=finalize,
exceptionCallback=doException )
pp.processAllResults(3600)
pp.finalize(0)
timeResult = []
for testTime,success,failure in resultTest:
#print testTime,success,failure
timeResult += testTime
averageTime, errorTime = doStats( timeResult )
rateResult = [ nClients/t for t in timeResult ]
averageRate, errorRate = doStats( rateResult )
if testDir:
print "\nTest results for clients %d, %s" % ( nClients, testDir )
else:
print "\nTest results for clients %d, %s" % ( nClients, lfnListFile )
print "Query time: %.2f +/- %.2f" % (averageTime, errorTime)
print "Query rate: %.2f +/- %.2f" % (averageRate, errorRate)
return( (averageTime, errorTime), (averageRate, errorRate) )
def doStats( testArray ):
array = list( testArray )
# Delete min and max value first
del array[ array.index(max(array)) ]
del array[ array.index(min(array)) ]
numArray = numpy.array( array )
average = numpy.mean( numArray )
stddev = numpy.std( numArray )
return (average, stddev)
numberOfFilesList = [ 10, 100, 500, 1000, 2000, 5000, 10000, 15000, 20000 ]
numberOfFilesList_short = [ 100, 1000, 5000, 10000, 20000 ]
numberOfClientsList = [1,2,3,5,7,10,12,15,20,30,50,75]
numberOfClientsList_short = [1,5,10,20]
directoriesList = [ (35455, "/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en18.000/th0.65/2008/11/12"),
(24024, "/auger/prod/QGSjetII_gr20/2008/09/04/en17.500/th0.65"),
#(15205, "/auger/generated/2012-09-03"),
(18391,"/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en17.500/th0.65/2008/11/11"),
(9907, "/auger/prod/QGSjetII_gr20/2008/09/03/en17.500/th0.65"),
(5157, "/auger/prod/QGSjetII_gr20/2008/09/04/en20.000/th0.65"),
(2538, "/auger/prod/QGSjetII_gr21/2009/01/12/en18.500/th0.65"),
(1500, "/auger/prod/epos_gr03_sim/en17.500/th26.000"),
(502, "/auger/prod/REPLICATED20081014/epos_gr08/en21.250/th26.000")
]
directoriesList_short = [ (35455, "/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en18.000/th0.65/2008/11/12"),
(18391,"/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en17.500/th0.65/2008/11/11"),
(5157, "/auger/prod/QGSjetII_gr20/2008/09/04/en20.000/th0.65"),
(1000, "/auger/prod/PhotonLib_gr22/2009/02/27/en17.500/th26.000")
]
directoriesList.reverse()
directoriesList_short.reverse()
def executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r ):
global nClients
nClients = nc
t1,t2 = runTest()
query,querys = t1
rate, rates = t2
fileLabel = "%d files" % nf
queryDict.setdefault( fileLabel, {} )
queryDict[fileLabel][nc] = (query,querys)
rateDict.setdefault( fileLabel, {} )
rateDict[fileLabel][nc] = (rate,rates)
clientLabel = "%d clients" % nc
queryDict_r.setdefault( clientLabel, {} )
queryDict_r[clientLabel][nf] = (query,querys)
rateDict_r.setdefault( clientLabel, {} )
rateDict_r[clientLabel][nf] = (rate,rates)
def runFullTest():
global outputFile, nClients, testDir, lfnListFile, shortRange
queryDict = {}
rateDict = {}
queryDict_r = {}
rateDict_r = {}
ncList = numberOfClientsList
if shortRange:
ncList = numberOfClientsList_short
nfList = numberOfFilesList
if shortRange:
nfList = numberOfFilesList_short
ndList = directoriesList
if shortRange:
ndList = directoriesList_short
for nc in ncList:
if testType in ['getBulkReplicas']:
for nf in nfList:
lfnListFile = "lfns_%d.txt" % nf
executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r )
elif testType in ['getDirectoryReplicas', "listDirectory"]:
for nf, directory in ndList:
testDir = directory
executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r )
# Writing out result
outFile = open( outputFile, "w" )
outFile.write( "Test type %s \n" % testType )
outFile.write( "Number of queries per unit test %d \n" % nQueries )
outFile.write( "Results: \n\n\n" )
outFile.write( 'data_f = ' + str( queryDict ) + '\n\n\n' )
outFile.write( 'data_f_r = ' + str( rateDict ) + '\n\n\n' )
outFile.write( 'data_c = ' + str( queryDict_r ) + '\n\n\n' )
outFile.write( 'data_c_r = ' + str( rateDict_r ) + '\n\n\n' )
outFile.close()
pprint.pprint( queryDict )
pprint.pprint( rateDict )
pprint.pprint( queryDict_r )
pprint.pprint( rateDict_r )
#########################################################################
if os.path.exists( outputFile ):
print "Output file %s already exists, exiting ..."
sys.exit(-1)
if fullTest:
runFullTest()
else:
runTest()
|
Andrew-McNab-UK/DIRAC
|
tests/Integration/DataManagementSystem/FC_scaling_test.py
|
Python
|
gpl-3.0
| 10,751
|
[
"DIRAC"
] |
54ef00daf22d0e615f9a37b5817e1cbd612e65ce08ca6af1eab9bbbb38836814
|
# First pass at loading and running a cell model
import os
os.environ['NUMPTHREADS'] = '1'
import moose
import proto18
def dumpPlots( fname ):
if ( os.path.exists( fname ) ):
os.remove( fname )
tab.xplot( fname, 'soma.Vm' )
catab.xplot( fname, 'soma.Ca' )
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
cellId = moose.loadModel( 'ca1_asym.p', '/cell', "hsolve" )
moose.le( cellId )
moose.le( '/cell/lat_14_1' )
#le( '/cell' )
graphs = moose.Neutral( '/graphs' )
tab = moose.Table( '/graphs/soma' )
catab = moose.Table( '/graphs/ca' )
soma = moose.element( '/cell/soma' )
soma.inject = 2e-10
moose.connect( tab, 'requestOut', soma, 'getVm' )
capool = moose.element( '/cell/soma/Ca_conc' )
moose.connect( catab, 'requestOut', capool, 'getCa' )
print 1
dt = 50e-6
moose.setClock( 0, dt )
moose.setClock( 1, dt )
moose.setClock( 2, dt )
moose.setClock( 3, 2e-4 )
moose.useClock( 0, '/cell/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/cell/##[ISA=Compartment]', 'process' )
moose.useClock( 2, '/cell/##[ISA!=Compartment]', 'process' )
moose.useClock( 3, '/graphs/soma,/graphs/ca', 'process' )
print 2
moose.reinit()
print 3
moose.start( 0.1 )
dumpPlots( '50usec.plot' )
print 4
moose.reinit()
hsolve = moose.HSolve( '/cell/hsolve' )
moose.useClock( 1, '/cell/hsolve', 'process' )
hsolve.dt = dt
hsolve.target = '/cell/soma'
moose.reinit()
moose.reinit()
print 5
moose.start( 0.1 )
print 6
dumpPlots( 'h50usec.plot' )
print 7
|
dilawar/moose-full
|
moose-examples/snippets/MULTI/runcell18.py
|
Python
|
gpl-2.0
| 1,706
|
[
"MOOSE"
] |
ffde5db0038db9eb5f9c1ba23e36fe0d8cb6d400f98984693d8cea1fcd6e5c6e
|
# coding: utf8
{
' Quotas: %(quotas)s x%(quota_amount).2f': ' Quotas: %(quotas)s x%(quota_amount).2f',
' Transaction number: %s': ' Transaction number: %s',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" je voliteľný výraz ako "field1=\'newvalue\'". Nemôžete upravovať alebo zmazať výsledky JOINu',
'%Y-%m-%d': '%d.%m.%Y',
'%Y-%m-%d %H:%M:%S': '%d.%m.%Y %H:%M:%S',
'%s rows deleted': '%s zmazaných záznamov',
'%s rows updated': '%s upravených záznamov',
'/absolute/folder/path': '/absolute/folder/path',
'Account': 'Account',
'Add article': 'Add article',
'Add check': 'Add check',
'Add item': 'Add item',
'Add payment method': 'Add payment method',
'Add tax': 'Add tax',
'Administrative interface': 'pre administrátorské rozhranie kliknite sem',
'Administrative panel': 'Administrative panel',
'All tables modified': 'All tables modified',
'Allocate': 'Allocate',
'Allocate orders': 'Allocate orders',
'Allocated': 'Allocated',
'Amount': 'Amount',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available databases and tables': 'Dostupné databázy a tabuľky',
'Bank': 'Bank',
'Bill': 'Bill',
'Bill checked': 'Bill checked',
'Blank for price list values': 'Blank for price list values',
'CSV parameters file: /absolute/path/file_name.csv': 'CSV parameters file: /absolute/path/file_name.csv',
'CSV table files path: /absolute/path/tables_folder': 'CSV table files path: /absolute/path/tables_folder',
'Calculate movements difference....': 'Calculate movements difference....',
'Calculated difference: %s': 'Calculated difference: %s',
'Cannot be empty': 'Nemôže byť prázdne',
'Cash/transfer': 'Cash/transfer',
'Change': 'Change',
'Change update taxes value to %s': 'Change update taxes value to %s',
'Check to delete': 'Označiť na zmazanie',
'Checks': 'Checks',
'Choose a concept': 'Choose a concept',
'Choose a document type': 'Choose a document type',
'Choose a price list': 'Choose a price list',
'Code': 'Code',
'Collect': 'Collect',
'Color': 'Color',
'Concept': 'Concept',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Could not change': 'Could not change',
'Could not process the operation': 'Could not process the operation',
'Could not process the operation: it is not editable': 'Could not process the operation: it is not editable',
'Could not process the receipt': 'Could not process the receipt',
'Create fee': 'Create fee',
'Create/Edit orders': 'Create/Edit orders',
'Credit': 'Credit',
'Current account': 'Current account',
'Current account calculated amount': 'Current account calculated amount',
'Current account list/payments': 'Current account list/payments',
'Current account payment data': 'Current account payment data',
'Current account payment options': 'Current account payment options',
'Current account quotas': 'Current account quotas',
'Current account report': 'Current account report',
'Current account value: %s': 'Current account value: %s',
'Current accounts payments': 'Current accounts payments',
'Current accounts type: %(at)s': 'Current accounts type: %(at)s',
'Current request': 'Aktuálna požiadavka',
'Current response': 'Aktuálna odpoveď',
'Current session': 'Aktuálne sedenie',
'Customer': 'Customer',
'Customer Panel': 'Customer Panel',
'Customer control panel': 'Customer control panel',
'Customer control panel (requires registration and login)': 'Customer control panel (requires registration and login)',
'Customer current account': 'Customer current account',
'Customer panel': 'Customer panel',
'Customer/Supplier data': 'Customer/Supplier data',
'DB Model': 'DB Model',
'Database': 'Databáza',
'Date': 'Date',
'Dates: ': 'Dates: ',
'Debit': 'Debit',
'Debt limit: %s': 'Debt limit: %s',
'Delete value is %s': 'Delete value is %s',
'Delete:': 'Zmazať:',
'Description': 'Popis',
'Difference': 'Difference',
'Difference: %s': 'Difference: %s',
'Discounts/Surcharges': 'Discounts/Surcharges',
'Document': 'Document',
'Documentation': 'Dokumentácia',
'Done': 'Done',
'Due date': 'Due date',
'Edit': 'Upraviť',
'Edit Profile': 'Upraviť profil',
'Edit current record': 'Upraviť aktuálny záznam',
'Edit in movements': 'Edit in movements',
'Edit order number': 'Edit order number',
'Ending': 'Ending',
'Entries': 'Entries',
'Entries: %s': 'Entries: %s',
'Entry': 'Entry',
'Erasing record %s': 'Erasing record %s',
'Error: could not calculate the total debt.': 'Error: could not calculate the total debt.',
'Errors': 'Errors',
'Esta es la plantilla accounting/offset_account.html': 'Esta es la plantilla accounting/offset_account.html',
'Exits: %s': 'Exits: %s',
'Family': 'Family',
'Fee': 'Fee',
'Fees': 'Fees',
'Fees list': 'Fees list',
'Firm': 'Firm',
'First name': 'Krstné meno',
'For purchases: %(pt)s payment is recorded as concept id %s(c)': 'For purchases: %(pt)s payment is recorded as concept id %s(c)',
'Form accepted': 'Form accepted',
'Form data: %(fd)s': 'Form data: %(fd)s',
'Form data: %s': 'Form data: %s',
'GestionLibre': 'GestionLibre',
'GestionLibre %(version)s': 'GestionLibre %(version)s',
'GestionLibre %s': 'GestionLibre %s',
'Group ID': 'ID skupiny',
'Hello World': 'Ahoj svet',
'ID': 'ID',
'Import legacy tables': 'Import legacy tables',
'Import/Export': 'Import/Export',
'Increase/Decrease stock values': 'Increase/Decrease stock values',
'Increase/decrease stock values': 'Increase/decrease stock values',
'Index': 'Index',
'Initialize': 'Initialize',
'Insert movements element': 'Insert movements element',
'Insert order element': 'Insert order element',
'Installment': 'Installment',
'Installment created': 'Installment created',
'Installments': 'Installments',
'Insufficient source stock quantity': 'Insufficient source stock quantity',
'Insufficient stock value.': 'Insufficient stock value.',
'Internal State': 'Vnútorný stav',
'Invalid Query': 'Neplatná otázka',
'Invalid email': 'Neplatný email',
'Invalid password': 'Nesprávne heslo',
'Item added': 'Item added',
'Item value input: %s': 'Item value input: %s',
'Journal Entries': 'Journal Entries',
'Journal Entry': 'Journal Entry',
'Journal entries': 'Journal entries',
'Journal entry total amount': 'Journal entry total amount',
'Last name': 'Priezvisko',
'Layout': 'Layout',
'List of operations': 'List of operations',
'List order allocation operations': 'List order allocation operations',
'List order allocations': 'List order allocations',
'Logged in': 'Prihlásený',
'Logged out': 'Odhlásený',
'Lost Password': 'Stratené heslo?',
'Menu Model': 'Menu Model',
'Modify movements element': 'Modify movements element',
'Modify operation item': 'Modify operation item',
'Modify sales order element': 'Modify sales order element',
'Move stock items': 'Move stock items',
'Movement (offset): %(mo)s: %(a)s': 'Movement (offset): %(mo)s: %(a)s',
'Movements (Operations)': 'Movements (Operations)',
'Movements detail': 'Movements detail',
'Movements list': 'Movements list',
'Movements panel': 'Movements panel',
'Movements process. Operation: %s': 'Movements process. Operation: %s',
'Moving to new record': 'Moving to new record',
'Name': 'Meno',
'New Record': 'Nový záznam',
'New customer': 'New customer',
'New fee': 'New fee',
'New installment': 'New installment',
'New operation': 'New operation',
'New operation (movements form)': 'New operation (movements form)',
'New operation check': 'New operation check',
'New operation item': 'New operation item',
'New operation tax': 'New operation tax',
'New option': 'New option',
'New option created.': 'New option created.',
'New order allocation': 'New order allocation',
'New packing slip from this allocation': 'New packing slip from this allocation',
'New password': 'Nové heslo',
'New subcustomer': 'New subcustomer',
'No databases in this application': 'V tejto aplikácii nie sú databázy',
'No tax id selected': 'No tax id selected',
'None selected': 'None selected',
'Number': 'Number',
'Old password': 'Staré heslo',
'Online examples': 'pre online príklady kliknite sem',
'Operation': 'Operation',
'Operation %s is not editable': 'Operation %s is not editable',
'Operation details: %s': 'Operation details: %s',
'Operation discounts and surcharges': 'Operation discounts and surcharges',
'Operation header': 'Operation header',
'Operation id(s): %s': 'Operation id(s): %s',
'Operation number %s': 'Operation number %s',
'Operation processed': 'Operation processed',
'Operation processing failed: debt limit reached': 'Operation processing failed: debt limit reached',
'Operation processing result': 'Operation processing result',
'Operation successfully processed': 'Operation successfully processed',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s',
'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s': 'Operation: %(o)s. Amount: %(a)s. Value: %(v)s. Concept: %(c)s, Quantity: %(q)s, Movement: %(m)s',
'Operations list': 'Operations list',
'Option modified.': 'Option modified.',
'Options': 'Options',
'Order allocation': 'Order allocation',
'Order allocation %s': 'Order allocation %s',
'Order allocation list': 'Order allocation list',
'Order list': 'Order list',
'Order number': 'Order number',
'Ordered': 'Ordered',
'Origin': 'Pôvod',
'Packing slip': 'Packing slip',
'Password': 'Heslo',
'Pay': 'Pay',
'Period': 'Period',
'Please choose different warehouses': 'Please choose different warehouses',
"Please insert your firm's tax id": "Please insert your firm's tax id",
'Populate tables': 'Populate tables',
'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s': 'Populate_with_legacy_db Insert Error: Table %(table)s, row %(n)s: %(e)s',
'Post registration form': 'Post registration form',
'Post-registration form': 'Post-registration form',
'Posted': 'Posted',
'Powered by': 'Powered by',
'Process operation': 'Process operation',
'Product': 'Product',
'Product billing': 'Product billing',
'Product code': 'Product code',
'Purchases': 'Purchases',
'Quantity': 'Quantity',
'Query:': 'Otázka:',
'Quota': 'Quota',
'Quotas': 'Quotas',
'RIA Create/Edit operations': 'RIA Create/Edit operations',
'RIA Product billing': 'RIA Product billing',
'RIA Receipt': 'RIA Receipt',
'RIA Stock': 'RIA Stock',
'RIA Stock main menu': 'RIA Stock main menu',
'Receipt number': 'Receipt number',
'Receipt processed': 'Receipt processed',
'Receipts list': 'Receipts list',
'Record ID': 'ID záznamu',
'Record updated': 'Record updated',
'Register': 'Zaregistrovať sa',
'Registration key': 'Registračný kľúč',
'Registration successful': 'Registration successful',
'Remember me (for 30 days)': 'Zapamätaj si ma (na 30 dní)',
'Reset Password key': 'Nastaviť registračný kľúč',
'Reset operation': 'Reset operation',
'Reset order': 'Reset order',
'Reset receipt': 'Reset receipt',
'Role': 'Rola',
'Rows in table': 'riadkov v tabuľke',
'Rows selected': 'označených riadkov',
'SCM': 'SCM',
'Sales': 'Sales',
'Sales contact': 'Sales contact',
'Select': 'Select',
'Select an operation type': 'Select an operation type',
'Select price list': 'Select price list',
'Select warehouse': 'Select warehouse',
'Selection action: %s': 'Selection action: %s',
'Session data: %s': 'Session data: %s',
'Set options': 'Set options',
'Setting offset concept to %s': 'Setting offset concept to %s',
'Setup': 'Setup',
'Starting': 'Starting',
'Stock': 'Stock',
'Stock item update': 'Stock item update',
'Stock list': 'Stock list',
'Stock query': 'Stock query',
'Stock updated': 'Stock updated',
'Stock value changed': 'Stock value changed',
'Stylesheet': 'Stylesheet',
'Subcustomer': 'Subcustomer',
'Subcustomer current account': 'Subcustomer current account',
'Submit': 'Odoslať',
'Supplier': 'Supplier',
'Sure you want to delete this object?': 'Ste si istí, že chcete zmazať tento objekt?',
'TAX ID': 'TAX ID',
'Table name': 'Názov tabuľky',
'Tables': 'Tables',
'Tax id': 'Tax id',
'Taxes are': 'Taxes are',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query" je podmienka ako "db.table1.field1==\'value\'". Niečo ako "db.table1.field1==db.table2.field2" má za výsledok SQL JOIN.',
'The CSV data was stored at your web2py root folder': 'The CSV data was stored at your web2py root folder',
'The db load failed with these errors: ': 'The db load failed with these errors: ',
'The db records were uploaded correctly': 'The db records were uploaded correctly',
'The following operations were created': 'The following operations were created',
'The form has errors': 'The form has errors',
'The item specified was not found in the warehouse': 'The item specified was not found in the warehouse',
'The item will be removed without confirmation': 'The item will be removed without confirmation',
'The operation has current account movements: %s': 'The operation has current account movements: %s',
'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s': 'The operation processing failed. Booking ok: %(rs)s. Stock ok: %(st)s',
'The output of the file is a dictionary that was rendered by the view': 'Výstup zo súboru je slovník, ktorý bol zobrazený vo view',
'This is a copy of the scaffolding application': 'Toto je kópia skeletu aplikácie',
'This is the webapp index view of': 'This is the webapp index view of',
'Timestamp': 'Časová pečiatka',
'Total': 'Total',
'Total amount': 'Total amount',
'Total debt': 'Total debt',
'Trying with': 'Trying with',
'Update fee': 'Update fee',
'Update installment': 'Update installment',
'Update order allocation': 'Update order allocation',
'Update quota': 'Update quota',
'Update:': 'Upraviť:',
'Updating stock id: %(st)s as %(vl)s': 'Updating stock id: %(st)s as %(vl)s',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Použite (...)&(...) pre AND, (...)|(...) pre OR a ~(...) pre NOT na poskladanie komplexnejších otázok.',
'User %(id)s Logged-in': 'Používateľ %(id)s prihlásený',
'User %(id)s Logged-out': 'Používateľ %(id)s odhlásený',
'User %(id)s Password changed': 'Používateľ %(id)s zmenil heslo',
'User %(id)s Profile updated': 'Používateľ %(id)s upravil profil',
'User %(id)s Registered': 'Používateľ %(id)s sa zaregistroval',
'User ID': 'ID používateľa',
"Valid firm tax id's": "Valid firm tax id's",
'Value': 'Value',
'Values: %s': 'Values: %s',
'Verify Password': 'Zopakujte heslo',
'View': 'Zobraziť',
'WARNING: JOURNAL ENTRY IS UNBALANCED': 'WARNING: JOURNAL ENTRY IS UNBALANCED',
'Warehouse': 'Warehouse',
'Warning! Wrong document type.': 'Warning! Wrong document type.',
'Web interface': 'Web interface',
'Welcome to web2py': 'Vitajte vo web2py',
'Which called the function': 'Ktorý zavolal funkciu',
'You are successfully running web2py': 'Úspešne ste spustili web2py',
'You can modify this application and adapt it to your needs': 'Môžete upraviť túto aplikáciu a prispôsobiť ju svojim potrebám',
"You have not specified you firm's TAX ID. Please visit the": "You have not specified you firm's TAX ID. Please visit the",
'You visited the url': 'Navštívili ste URL',
'and try again': 'and try again',
'appadmin is disabled because insecure channel': 'appadmin je zakázaný bez zabezpečeného spojenia',
'cache': 'cache',
'customize me!': 'prispôsob ma!',
'data uploaded': 'údaje naplnené',
'database': 'databáza',
'database %s select': 'databáza %s výber',
'db': 'db',
'design': 'návrh',
'does not update stock': 'does not update stock',
'done!': 'hotovo!',
'export as csv file': 'exportovať do csv súboru',
'filename.ext': 'filename.ext',
'from table': 'from table',
'i.e. third party payment transaction number': 'i.e. third party payment transaction number',
'insert new': 'vložiť nový záznam ',
'insert new %s': 'vložiť nový záznam %s',
'invalid request': 'Neplatná požiadavka',
'located in the file': 'nachádzajúci sa v súbore ',
'login': 'prihlásiť',
'logout': 'odhlásiť',
'lost password?': 'stratené heslo?',
'new record inserted': 'nový záznam bol vložený',
'next 100 rows': 'ďalších 100 riadkov',
'not updated': 'not updated',
'or import from csv file': 'alebo naimportovať z csv súboru',
'password': 'heslo',
'previous 100 rows': 'predchádzajúcich 100 riadkov',
'record': 'záznam',
'record does not exist': 'záznam neexistuje',
'record id': 'id záznamu',
'register': 'registrovať',
'selected': 'označených',
'session.difference :%s': 'session.difference :%s',
'state': 'stav',
'table': 'tabuľka',
'unable to parse csv file': 'nedá sa načítať csv súbor',
'updated': 'updated',
'updates stock': 'updates stock',
'with old record': 'with old record',
}
|
reingart/gestionlibre
|
languages/sk-sk.py
|
Python
|
agpl-3.0
| 16,860
|
[
"VisIt"
] |
d793213d63e43ae7c0f95d85412ca2f6fc65f01604d678fa180e61b76eff95f5
|
from __future__ import division
from collections import defaultdict
from math import sqrt, log
import numpy as np
from scipy.spatial.distance import cdist, pdist
from sklearn.utils import check_random_state
def inertia(X, labels, metric='sqeuclidean', p=2):
"""
Given data and their cluster assigment, compute the sum of
within-cluster mean distance to cluster's mean
Parameter
---------
X: numpy array of shape (nb_data, nb_feature)
labels: list of int of length nb_data
metric: a string naming a scipy.spatial distance. can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'canberra', 'wminkowski'])
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
Return
------
distortion: float
"""
if metric == 'l2':
# Translate to something understood by scipy
metric = 'euclidean'
elif metric in ('l1', 'manhattan'):
metric = 'cityblock'
assi = defaultdict(list)
for i, l in enumerate(labels):
assi[l].append(i)
inertia = .0
nb_feature = X.shape[1]
for points in assi.values():
clu_points = X[points, :]
clu_center = np.mean(clu_points, axis=0).reshape(1, nb_feature)
inertia += (np.sum(cdist(clu_points, clu_center, metric=metric, p=p)) /
(2 * len(clu_points)))
return inertia
def normal_inertia(X, cluster_estimator, nb_draw=100,
metric='sqeuclidean', p=2, random_state=None,
mu=None, sigma=None):
"""
Draw multivariate normal data of size data_shape = (nb_data, nb_feature),
with same mean and covariance as X.
Clusterize data using cluster_estimator and compute inertia
Parameter
---------
X numpy array of size (nb_data, nb_feature)
cluster_estimator: ClusterMixing estimator object.
need parameter n_clusters
need method fit_predict: X -> labels
nb_draw: number of samples to calculate expected_inertia
metric: a string naming a scipy.spatial distance. can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'canberra', 'wminkowski'])
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
mu: mean of drawn data
sigma: covariance matrix of drawn data
Return
------
dist: list of inertias (float) obtained on random dataset
"""
rng = check_random_state(random_state)
nb_data, nb_feature = X.shape
if mu is None:
# data mean has no influence on distortion
mu = np.zeros(nb_feature)
if sigma is None:
sigma = np.cov(X.transpose())
dist = []
for i in range(nb_draw):
X_rand = rng.multivariate_normal(mu, sigma, size=nb_data)
dist.append(inertia(
X_rand, cluster_estimator.fit_predict(X_rand),
metric, p))
return dist
def uniform_inertia(X, cluster_estimator, nb_draw=100, val_min=None,
val_max=None, metric='sqeuclidean', p=2,
random_state=None):
"""
Uniformly draw data of size data_shape = (nb_data, nb_feature)
in the smallest hyperrectangle containing real data X.
Clusterize data using cluster_estimator and compute inertia
Parameter
---------
X: numpy array of shape (nb_data, nb_feature)
cluster_estimator: ClusterMixing estimator object.
need parameter n_clusters
need method fit_predict: X -> labels
nb_draw: number of samples to calculate expected_inertia
val_min: minimum values of each dimension of input data
array of length nb_feature
val_max: maximum values of each dimension of input data
array of length nb_feature
metric: a string naming a scipy.spatial distance. can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'canberra', 'wminkowski'])
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
Return
------
dist: list of distortions (float) obtained on random dataset
"""
rng = check_random_state(random_state)
if val_min is None:
val_min = np.min(X, axis=0)
if val_max is None:
val_max = np.max(X, axis=0)
dist = []
for i in range(nb_draw):
X_rand = rng.uniform(size=X.shape) * (val_max - val_min) + val_min
dist.append(inertia(X_rand, cluster_estimator.fit_predict(X_rand),
metric, p))
return dist
def gap_statistic(X, cluster_estimator, k_max=None, nb_draw=100,
random_state=None, draw_model='uniform',
metric='sqeuclidean', p=2):
"""
Estimating optimal number of cluster for data X with cluster_estimator by
comparing inertia of clustered real data with inertia of clustered
random data. Let W_rand(k) be the inertia of random data in k clusters,
W_real(k) inertia of real data in k clusters, statistic gap is defined
as
Gap(k) = E(log(W_rand(k))) - log(W_real(k))
We draw nb_draw random data "shapened-like X" (shape depend on draw_model)
We select the smallest k such as the gap between inertia of k clusters
of random data and k clusters of real data is superior to the gap with
k + 1 clusters minus a "standard-error" safety. Precisely:
k_star = min_k k
s.t. Gap(k) >= Gap(k + 1) - s(k + 1)
s(k) = stdev(log(W_rand)) * sqrt(1 + 1 / nb_draw)
From R.Tibshirani, G. Walther and T.Hastie, Estimating the number of
clusters in a dataset via the Gap statistic, Journal of the Royal
Statistical Socciety: Seris (B) (Statistical Methodology), 63(2), 411-423
Parameter
---------
X: data. array nb_data * nb_feature
cluster_estimator: ClusterMixing estimator object.
need parameter n_clusters
nb_draw: int: number of random data of shape (nb_data, nb_feature) drawn
to estimate E(log(D_rand(k)))
draw_model: under which i.i.d data are draw. default: uniform data
(following Tibshirani et al.)
can be 'uniform', 'normal' (Gaussian distribution)
metric: a string naming a scipy.spatial distance. can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'canberra', 'wminkowski'])
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
Return
------
k: int: number of cluster that maximizes the gap statistic
"""
rng = check_random_state(random_state)
# if no maximum number of clusters set, take datasize divided by 2
if not k_max:
k_max = X.shape[0] // 2
if draw_model == 'uniform':
val_min = np.min(X, axis=0)
val_max = np.max(X, axis=0)
elif draw_model == 'normal':
mu = np.mean(X, axis=0)
sigma = np.cov(X.transpose())
old_gap = - float("inf")
for k in range(2, k_max + 2):
cluster_estimator.set_params(n_clusters=k)
real_dist = inertia(X, cluster_estimator.fit_predict(X),
metric, p)
# expected distortion
if draw_model == 'uniform':
rand_dist = uniform_inertia(X, cluster_estimator, nb_draw,
val_min, val_max, metric,
p)
elif draw_model == 'normal':
rand_dist = normal_inertia(X, cluster_estimator, nb_draw,
metric=metric,
p=p, mu=mu, sigma=sigma)
else:
raise ValueError(
"For gap statistic, model for random data is unknown")
rand_dist = np.log(rand_dist)
exp_dist = np.mean(rand_dist)
std_dist = np.std(rand_dist)
gap = exp_dist - log(real_dist)
safety = std_dist * sqrt(1 + 1 / nb_draw)
if k > 2 and old_gap >= gap - safety:
return k - 1
old_gap = gap
# if k was found, the function would have returned
# no clusters were found -> only 1 cluster
return 1
def adjacency_matrix(cluster_assignement):
"""
Parameter
---------
cluster_assignement: vector (n_samples) of int i, 0 <= i < k
Return
------
adj_matrix: matrix (n_samples, n_samples)
adji_matrix[i, j] = cluster_assignement[i] == cluster_assignement[j]
"""
n_samples = len(cluster_assignement)
adj_matrix = np.zeros((n_samples, n_samples))
for i, val in enumerate(cluster_assignement):
for j in range(i, n_samples):
linked = val == cluster_assignement[j]
adj_matrix[i, j] = linked
adj_matrix[j, i] = linked
return adj_matrix
def fowlkes_mallows_index(clustering_1, clustering_2):
"""
Mesure the similarity of two clusterings of a set of points.
Let:
- TP be the number of pair of points (x_i, x_j) that belongs
in the same clusters in both clustering_1 and clustering_2
- FP be the number of pair of points (x_i, x_j) that belongs
in the same clusters in clustering_1 and not in clustering_2
- FN be the number of pair of points (x_i, x_j) that belongs
in the same clusters in clustering_2 and not in clustering_1
The Fowlkes-Mallows index has the following formula:
fowlkes_mallows_index = TP / sqrt((TP + FP) * (TP + FN))
Parameter
---------
clustering_1: list of int.
"clustering_1[i] = c" means that point i is assigned to cluster c
clustering_2: list of int.
"clustering_2[i] = c" means that point i is assigned to cluster c
Return
------
fowlkes_mallows_index: float between 0 and 1. 1 means that both
clusterings perfectly match, 0 means that they totally disconnect
"""
adj_mat_1 = adjacency_matrix(clustering_1)
adj_mat_2 = adjacency_matrix(clustering_2)
return 1 - cosine(adj_mat_1.flatten(), adj_mat_2.flatten())
def stability(X, cluster_estimator, k_max=None, nb_draw=100, prop_subset=.8,
random_state=None, p=None, distance='fowlkes-mallows',
verbose=False):
"""Stability algorithm.
For k from 2 to k_max, compute stability of cluster estimator to produce k
clusters. Stability measures if the estimator produces the same clusters
given small variations in the input data. It draws two overlapping subsets
A and B of input data. For points in the two subsets, we compute the
clustering C_A and C_B done on subsets A and B. We then compute the
similarity of those clustering. We can use the opposite of a distance
as a similarity
The stability of cluster_estimator with k cluster is the expectation of
similarity(C_A, C_B)
Ref: Ben-Hur, Elisseeff, Guyon: a stability based method for discovering
structure in clusterd data, 2002
Overview of stability: Luxburg: clustering stability: an overview
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
cluster_estimator: ClusterMixing estimator object.
need parameter n_clusters
need method fit_predict: X -> labels
k_max: int: maximum number of clusters (default = n_samples / 2)
nb_draw: number of draws to estimate expectation of expectation of
similarity(C_A, C_B)
prop_subset: 0 < float < 1: proportion of input data taken in each subset
distance: a string naming a distance or a cluster similarity. can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'canberra', 'wminkowski', 'fowlkes-mallows'])
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
Return
------
k: int
"""
rng = check_random_state(random_state)
cluster_similarity = function_cluster_similarity(distance, p)
n_samples, n_features = X.shape
if not k_max:
k_max = n_samples // 2
best_stab, best_k = 0, 0
for k in range(2, k_max + 1):
cluster_estimator.set_params(n_clusters=k)
this_score = sum(
_one_stability_measure(cluster_estimator, X, prop_subset,
cluster_similarity)
for _ in range(nb_draw)) / nb_draw
if verbose:
print('for %d cluster, stability is %f' % (k, this_score))
if this_score >= best_stab:
best_stab = this_score
best_k = k
return best_k
def _one_stability_measure(cluster_estimator, X, prop_sample,
cluster_similarity, random_state=None):
"""
Draws two subsets A and B from X, compute C_A, clustering on subset
A, and C_B, clustering on subset B, then returns
similarity(C_A, C_B)
Parameter
---------
X: array of size n_samples, n_features
cluster_estimator: ClusterMixing estimator object.
need parameter n_clusters
need method fit_predict: X -> labels
prop_sample: 0 < float < 1, proportion of X taken in each subset
cluster_similarity: function (list, list) -> float
"""
rng = check_random_state(random_state)
n_sample = X.shape[0]
set_1 = rng.uniform(size=n_sample) < prop_sample
set_2 = rng.uniform(size=n_sample) < prop_sample
nb_points_1, nb_points_2 = 0, 0
points_1, points_2 = [], []
common_points_1, common_points_2 = [], []
for i, (is_1, is_2) in enumerate(zip(set_1, set_2)):
if is_1 and is_2:
common_points_1.append(nb_points_1)
common_points_2.append(nb_points_2)
if is_1:
points_1.append(i)
nb_points_1 += 1
if is_2:
points_2.append(i)
nb_points_2 += 1
assi_1 = cluster_estimator.fit_predict(X[np.ix_(points_1)])
assi_2 = cluster_estimator.fit_predict(X[np.ix_(points_2)])
clustering_1 = [assi_1[c] for c in common_points_1]
clustering_2 = [assi_2[c] for c in common_points_2]
return cluster_similarity(clustering_1, clustering_2)
def function_cluster_similarity(metric='fowlkes-mallows', p=None):
"""
Given the name of a distance, return function to estimate
two clusterings similarity
Parameter
--------
metric: a string naming a distance or a cluster similarity. can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'canberra', 'wminkowski', 'fowlkes-mallows'])
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
Return:
function (clustering_1, clustering_2) -> similarity; with:
clustering_k: a list. clustering_k[i] = c means that
point x_i belongs to cluster c in clustering k
similarity: float
"""
if metric == 'fowlkes-mallows':
return fowlkes_mallows_index
if metric == 'l2':
# Translate to something understood by scipy
metric = 'euclidean'
elif metric in ('l1', 'manhattan'):
metric = 'cityblock'
def cluster_dist(clustering_1, clustering_2):
adj_mat_1 = adjacency_matrix(clustering_1).flatten()
adj_mat_2 = adjacency_matrix(clustering_2).flatten()
return -pdist([adj_mat_1, adj_mat_2], metric=metric, p=p)
return cluster_dist
def calinski_harabaz_index(X, labels):
"""
Compute the Calinski and Harabaz (1974). It a ratio between the
within-cluster dispersion and the between-cluster dispersion
CH(k) = trace(B_k) / (k -1) * (n - k) / trace(W_k)
With B_k the between group dispersion matrix, W_k the within-cluster
dispersion matrix
B_k = \sum_q n_q (c_q - c) (c_q -c)^T
W_k = \sum_q \sum_{x \in C_q} (x - c_q) (x - c_q)^T
Ref: R.B.Calinsky, J.Harabasz: A dendrite method for cluster analysis 1974
Parameter
---------
X: numpy array of size (nb_data, nb_feature)
labels: list of int of length nb_data: labels[i] is the cluster
assigned to X[i, :]
Return
------
res: float: mean silhouette of this clustering
"""
assi = defaultdict(list)
for i, l in enumerate(labels):
assi[l].append(i)
nb_data, nb_feature = X.shape
disp_intra = np.zeros((nb_feature, nb_feature))
disp_extra = np.zeros((nb_feature, nb_feature))
center = np.mean(X, axis=0)
for points in assi.values():
clu_points = X[points, :]
# unbiaised estimate of variace is \sum (x - mean_x)^2 / (n - 1)
# so, if I want sum of dispersion, I need
# W_k = cov(X) * (n - 1)
nb_point = clu_points.shape[0]
disp_intra += np.cov(clu_points, rowvar=0) * (nb_point - 1)
extra_var = (np.mean(clu_points, axis=0) - center).reshape(
(nb_feature, 1))
disp_extra += np.multiply(extra_var, extra_var.transpose()) * nb_point
return (disp_extra.trace() * (nb_data - len(assi)) /
(disp_intra.trace() * (len(assi) - 1)))
def calc_calinski_harabaz(X, cluster_estimator, n_clusters):
"""
Compute calinski harabaz for clusters made by cluster estimator
Parameter
---------
X numpy array of size (nb_data, nb_feature)
cluster_estimator: ClusterMixing estimator object.
need parameter n_clusters
need method fit_predict: X -> labels
n_clusters: number of clusters
"""
cluster_estimator.set_params(n_clusters=n_clusters)
return calinski_harabaz_index(X, cluster_estimator.fit_predict(X))
def max_CH_index(X, cluster_estimator, k_max=None):
"""
Select number of cluster maximizing the Calinski and Harabasz (1974).
It a ratio between the within-cluster dispersion and the between-cluster
dispersion
Ref: R.B.Calinsky, J.Harabasz: A dendrite method for cluster analysis 1974
Parameters
----------
X: numpy array of shape (nb_date, nb_features)
cluster_estimator: ClusterMixing estimator object.
need parameter n_clusters
need method fit_predict: X -> labels
k_max: int: maximum number of clusters
Return
------
k_star: int: optimal number of cluster
"""
# if no maximum number of clusters set, take datasize divided by 2
if not k_max:
k_max = X.shape[0] // 2
return max((k for k in range(2, k_max + 1)),
key=lambda k: calc_calinski_harabaz(X, cluster_estimator, k))
def distortion(X, labels, distortion_meth='sqeuclidean', p=2):
"""
Given data and their cluster assigment, compute the distortion D
Parameter
---------
X: numpy array of shape (nb_data, nb_feature)
labels: list of int of length nb_data
distortion_meth: can be a function X, labels -> float,
can be a string naming a scipy.spatial distance. can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'canberra', 'wminkowski'])
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
Return
------
distortion: float
"""
if isinstance(distortion_meth, str):
return distortion_metrics(X, labels, distortion_meth, p)
else:
return distortion_meth(X, labels)
def distortion_metrics(X, labels, metric='sqeuclidean', p=2):
"""
Given data and their cluster assigment, compute the distortion D
D = \sum_{x \in X} distance(x, c_x)
With c_x the center of the cluster containing x, distance is the distance
defined by metrics
Parameter
---------
X: numpy array of shape (nb_data, nb_feature)
labels: list of int of length nb_data
metric: string naming a scipy.spatial distance. metric can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'wminkowski', 'canberra']
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
Return
------
distortion: float
"""
if metric == 'l2':
# Translate to something understood by scipy
metric = 'euclidean'
elif metric in ('l1', 'manhattan'):
metric = 'cityblock'
assi = defaultdict(list)
for i, l in enumerate(labels):
assi[l].append(i)
distance_sum = .0
nb_feature = X.shape[1]
for points in assi.values():
clu_points = X[points, :]
clu_center = np.mean(clu_points, axis=0).reshape(1, nb_feature)
distance_sum += np.sum(cdist(
clu_points, clu_center, metric=metric, p=p))
return distance_sum / X.shape[1]
def distortion_jump(X, cluster_estimator, k_max=None,
distortion_meth='sqeuclidean', p=2):
"""
Find the number of clusters that maximizes efficiency while minimizing
error by information theoretic standards (wikipedia). For each number of
cluster, it calculates the distortion reduction. Roughly, it selects k such
as the difference between distortion with k clusters minus distortion with
k-1 clusters is maximal.
More precisely, let d(k) equals distortion with k clusters.
Let Y=nb_feature/2, let D[k] = d(k)^{-Y}
k^* = argmax(D[k] - D[k-1])
Parameters
----------
X: numpy array of shape (nb_date, nb_features)
cluster_estimator: ClusterMixing estimator object.
need parameter n_clusters
need method fit_predict: X -> labels
k_max: int: maximum number of clusters
distortion_meth: can be a function X, labels -> float,
can be a string naming a scipy.spatial distance. can be in
['euclidian', 'minkowski', 'seuclidiean', 'sqeuclidean', 'chebyshev'
'cityblock', 'cosine', 'correlation', 'hamming', 'jaccard',
'Bray-Curtis', 'mahalanobis', 'yule', 'matching', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener', 'sokalsneath',
'canberra', 'wminkowski'])
p : double
The p-norm to apply (for Minkowski, weighted and unweighted)
Return
------
k_star: int: optimal number of cluster
"""
nb_data, nb_feature = X.shape
# if no maximum number of clusters set, take datasize divided by 2
if not k_max:
k_max = nb_data // 2
Y = - nb_feature / 2
info_gain = 0
old_dist = pow(
distortion(X, np.zeros(nb_data), distortion_meth, p) / nb_feature, Y)
for k in range(2, k_max + 1):
cluster_estimator.set_params(n_clusters=k)
labs = cluster_estimator.fit_predict(X)
new_dist = pow(
distortion(X, labs, distortion_meth, p) / nb_feature, Y)
if new_dist - old_dist >= info_gain:
k_star = k
info_gain = new_dist - old_dist
old_dist = new_dist
return k_star
|
sarajcev/nowcasting
|
cluster_metrics.py
|
Python
|
gpl-3.0
| 24,358
|
[
"Gaussian"
] |
3bfc4925a5d928e740f535bb08286b95a3125586658006758396f4c5e32b18cc
|
# -*- encoding: utf-8 -*-
"""
synthetic.py : To obtain the characteristic size of the point spread function
(PSF) of a microscope system, and to generate simulated images containing one
or multiple spots (PSF's).
Copyright (C) 2021 Andries Effting, Delmic
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
USA.
"""
import math
from typing import List, Tuple, Union
import numpy
Shape2D = Tuple[int, int]
Coordinate = Tuple[float, float]
CoordinateList = List[Coordinate]
UINT16_MAX = numpy.iinfo(numpy.uint16).max
def psf_sigma_wffm(
refractive_index: float, numerical_aperture: float, wavelength: float
) -> float:
"""
Calculate the Gaussian approximation of a wide field fluorescence
microscope point spread function.
Parameters
----------
refractive_index : float, >= 1
Refractive index
numerical_aperture: float, positive
Numerical aperture of the optical system
wavelength : float
Wavelength.
Returns
-------
sigma : float
The standard deviation of the Gaussian approximation of a fluorescence
microscope point spread function. Same units as `wavelength`.
References
----------
.. [1] Zhang, B., Zerubia, J., & Olivo-Marin, J. C. (2007). Gaussian
approximations of fluorescence microscope point-spread function models.
Applied optics, 46(10), 1819-1829.
"""
if refractive_index < 1:
raise ValueError("The refractive index should be greater than or equal to 1.")
if numerical_aperture <= 0:
raise ValueError("The numerical aperture should be positive.")
if wavelength <= 0:
raise ValueError("The wavelength should be positive.")
if numerical_aperture >= refractive_index:
raise ValueError(
"The numerical aperture should be less than the refractive index."
)
k = 2 * math.pi / wavelength
nk = refractive_index * k
sa = numerical_aperture / refractive_index
ca = math.sqrt(1 - sa ** 2)
t = ca ** 1.5
sigma = 1 / (nk * math.sqrt((4 - 7 * t + 3 * ca ** 3.5) / (7 * (1 - t))))
return sigma
def psf_gaussian(
shape: Shape2D, loc: Union[Coordinate, CoordinateList], sigma: float
) -> numpy.ndarray:
"""
Return a synthetic spot image of a point-spread function (PSF) approximated
by a 2-dimensional Gaussian function.
Parameters
----------
shape : tuple of ints
Shape of the array, e.g. ``(9, 9)``.
loc : tuple of floats, or list of tuple of floats
Position of the maximum in pixel coordinates `(j0, i0)` relative to the
center of the spot image.
sigma : float, positive
Standard deviation of the Gaussian.
Returns
-------
image : ndarray, dtype=numpy.uint16
Array with the image of the point spread function with the given shape
and size and at the given location.
"""
if sigma <= 0:
raise ValueError("sigma should be positive")
n, m = shape
j = numpy.arange(n, dtype=numpy.float64)
i = numpy.arange(m, dtype=numpy.float64)
out = numpy.zeros((n, m), dtype=numpy.float64)
for j0, i0 in numpy.atleast_2d(loc):
kj = numpy.exp(-0.5 * numpy.square((j - j0) / sigma))
ki = numpy.exp(-0.5 * numpy.square((i - i0) / sigma))
out += numpy.outer(kj, ki)
# convert to uint16
numpy.clip(out, 0, 1, out=out)
numpy.rint(UINT16_MAX * out, out=out)
return out.astype(numpy.uint16)
|
pieleric/odemis
|
src/odemis/util/synthetic.py
|
Python
|
gpl-2.0
| 4,092
|
[
"Gaussian"
] |
cc00b73e1a04b9ddbcb1f2e6194af2833f5f9f0b9395456d556731367adfd370
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# http://www.chem.ucsb.edu/~sheagroup
# License: 3-clause BSD License (See LICENSE.TXT)
# Copyright (c) 2011, Regents of the University of California
# All rights reserved.
"""
lttree_check.py
The original template file format supports any variable types or file names.
However if you plan to process template files using lttree.py to create
LAMMPS-readable input/data files, then variables and file names obey certain
naming conventions. This code attempts to insure these conventions are obeyed
and to make sure that necessary variables are defined.
-- This code checks static variables (@) and basic LAMMPS syntax --
This program makes an attempt to check that the variables and file names
which appear in an "lttree" file are not mispelled (or miscapitlised).
It also attempts to check that LAMMPS syntax conventions are obeyed.
(It checks that the appropriate type of variable is located in each column).
It also attempts to check that all of the needed coeffs are defined.
-- This code does NOT check instance variables ($) --
This code does not check to make sure that all references to instance variables
(such as $atom, $bond, $angle, $dihedral, $improper or $mol variables) are valid
This means a user's input script command (like the "group" command) could refer
to an $atom or $mol which was never defined, and this code would not detect it.
(Why: Checking for instance variables requires building the entire instance tree
and checking references uses up additional memory after that. I do not do this
because memory is often very scarce after building the instance tree.)
Instead, we could check for these kinds of errors when post-processing of
the files generated by lttree.py or moltemplate.sh.
-- This is not the pretiest code I've ever written. --
"""
import sys
#from ttree import *
from lttree_styles import *
from lttree import *
from ttree_lex import InputError
if sys.version < '2.6':
raise InputError('Error: Alas, you must upgrade to a newer version of python.')
#g_no_check_msg = \
# "(If this error message is wrong, and/or you would like to continue anyway,\n"+\
# "try running moltemplate again using the \"-nocheck\" command-line-argument.)\n"
g_no_check_msg = \
'(To continue anyway, run moltemplate using the \"-nocheck\" argument.)\n'
def CheckCommonVarNames(prefix, descr_str, suffix, srcloc):
""" Check the name of variables in a lttree-file to confirm
that they follow the conventions used by lttree.
Almost any variable/category name is permitted, except for
names which closely match those reserved by lttree.
"""
cat_name, cat_ptkns, leaf_ptkns = \
DescrToCatLeafPtkns(descr_str,
srcloc)
if (cat_name.lower()=='mol'):
if (cat_name != 'mol'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Perhaps you meant \"mol\"?')
elif (cat_name.lower()=='group'):
if (cat_name != 'group'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Perhaps you meant \"group\"?')
elif (cat_name.lower()=='fix'):
if (cat_name != 'fix'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"fix\" instead.')
elif (cat_name.lower()=='atom'):
if (cat_name != 'atom'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Illegal lttree variable category: \"'+cat_name+'\"\n'+
'Use \"atom\" instead.')
elif (cat_name.lower()=='bond'):
if (cat_name != 'bond'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"bond\" instead.')
elif (cat_name.lower()=='angle'):
if (cat_name != 'angle'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"angle\" instead.')
elif (cat_name.lower()=='dihedral'):
if (cat_name != 'dihedral'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"dihedral\" instead.')
elif (cat_name.lower()=='improper'):
if (cat_name != 'improper'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Variable category: \"'+cat_name+'\" does not match, yet overlaps\n'+
'closely with a reserved lttree variable category.\n'
'Use \"improper\" instead.')
else:
sys.stderr.write('-----------------------------------------------------\n'+
'WARNING: in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
' Unrecognised template variable category: \"'+cat_name+'\"\n'+
'-----------------------------------------------------\n')
def CheckDataFileNames(filename,
srcloc,
write_command,
fnames_found):
N_data_prefix = len(data_prefix)
#data_prefix_no_space = data_prefix.rstrip()
N_data_prefix_no_space = len(data_prefix)
section_name = filename[N_data_prefix:]
if ((section_name.lower() == 'atom') or
(section_name.lower() == 'atoms')):
if (filename != data_atoms):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_atoms+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'velocities') or
(section_name.lower() == 'velocity')):
if (filename != data_velocities):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_velocities+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'mass') or
(section_name.lower() == 'masses')):
if (filename != data_masses):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_masses+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'ellipsoids') or
(section_name.lower() == 'ellipsoid') or
(section_name.lower() == 'elipsoids') or
(section_name.lower() == 'elipsoid')):
if (filename != data_ellipsoids):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_ellipsoids+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'triangle') or
(section_name.lower() == 'triangles')):
if (filename != data_triangles):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_triangles+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'line') or
(section_name.lower() == 'lines')):
if (filename != data_lines):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_lines+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('pair coef') == 0) or
(section_name.lower().find('pair_coef') == 0) or
(section_name.lower().find('paircoef') == 0) or
(section_name.lower().find('pair by type') == 0) or
(section_name.lower().find('pair bytype') == 0) or
(section_name.lower().find('pair_by_type') == 0) or
(section_name.lower().find('pair_bytype') == 0) or
(section_name.lower().find('pairbytype') == 0)):
if (filename != data_pair_coeffs):
err_msg = 'Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+\
'Output file name (\"'+filename+'\") does not match,\n'+\
'yet overlaps closely with reserved lttree-file name.\n'+\
'Perhaps you meant \"'+data_pair_coeffs+'\"?'
if ((section_name.lower().find('by type') != -1) or
(section_name.lower().find('by_type') != -1) or
(section_name.lower().find('bytype') != -1)):
err_msg += '\n (Note: "pair" parameters are always assigned by type.\n'+\
' There\'s no need to specify \"by type\")'
raise InputError(err_msg)
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bond coef') == 0) or
(section_name.lower().find('bond_coef') == 0) or
(section_name.lower().find('bondcoef') == 0)):
if (filename != data_bond_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bond_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angle coef') == 0) or
(section_name.lower().find('angle_coef') == 0) or
(section_name.lower().find('anglecoef') == 0)):
if (filename != data_angle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('dihedral coef') == 0) or
(section_name.lower().find('dihedral_coef') == 0) or
(section_name.lower().find('dihedralcoef') == 0)):
if (filename != data_dihedral_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedral_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('improper coef') == 0) or
(section_name.lower().find('improper_coef') == 0) or
(section_name.lower().find('impropercoef') == 0)):
if (filename != data_improper_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_improper_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
# -- class2 data sections --
elif ((section_name.lower().find('bondbond coef') == 0) or
(section_name.lower().find('bondbond_coef') == 0) or
(section_name.lower().find('bondbondcoef') == 0)):
if (filename != data_bondbond_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondbond_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bondangle coef') == 0) or
(section_name.lower().find('bondangle_coef') == 0) or
(section_name.lower().find('bondanglecoef') == 0)):
if (filename != data_bondangle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondangle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('middlebondtorsion coef') == 0) or
(section_name.lower().find('middlebondtorsion_coef') == 0) or
(section_name.lower().find('middlebondtorsioncoef') == 0) or
(section_name.lower().find('middlebondtorision coef') == 0) or
(section_name.lower().find('middlebondtorision_coef') == 0) or
(section_name.lower().find('middlebondtorisioncoef') == 0)):
if (filename != data_middlebondtorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_middlebondtorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('endbondtorsion coef') == 0) or
(section_name.lower().find('endbondtorsion_coef') == 0) or
(section_name.lower().find('endbondtorsioncoef') == 0) or
(section_name.lower().find('endbondtorision coef') == 0) or
(section_name.lower().find('endbondtorision_coef') == 0) or
(section_name.lower().find('endbondtorisioncoef') == 0)):
if (filename != data_endbondtorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_endbondtorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angletorsion coef') == 0) or
(section_name.lower().find('angletorsion_coef') == 0) or
(section_name.lower().find('angletorsioncoef') == 0) or
(section_name.lower().find('angletorision coef') == 0) or
(section_name.lower().find('angletorision_coef') == 0) or
(section_name.lower().find('angletorisioncoef') == 0)):
if (filename != data_angletorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angletorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angleangletorsion coef') == 0) or
(section_name.lower().find('angleangletorsion_coef') == 0) or
(section_name.lower().find('angleangletorsioncoef') == 0) or
(section_name.lower().find('angleangletorision coef') == 0) or
(section_name.lower().find('angleangletorision_coef') == 0) or
(section_name.lower().find('angleangletorisioncoef') == 0)):
if (filename != data_angleangletorsion_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angleangletorsion_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bondbond13 coef') == 0) or
(section_name.lower().find('bondbond13_coef') == 0) or
(section_name.lower().find('bondbond13coef') == 0)):
if (filename != data_bondbond13_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bondbond13_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('angleangle coef') == 0) or
(section_name.lower().find('angleangle_coef') == 0) or
(section_name.lower().find('angleanglecoef') == 0)):
if (filename != data_angleangle_coeffs):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angleangle_coeffs+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'bonds by type') or
(section_name.lower() == 'bonds bytype') or
(section_name.lower() == 'bonds_by_type') or
(section_name.lower() == 'bonds_bytype') or
(section_name.lower() == 'bondsbytype') or
(section_name.lower() == 'bond by type') or
(section_name.lower() == 'bond bytype') or
(section_name.lower() == 'bond_by_type') or
(section_name.lower() == 'bond_bytype') or
(section_name.lower() == 'bondbytype')):
if (filename != data_bonds_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bonds_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'angles by type') or
(section_name.lower() == 'angles bytype') or
(section_name.lower() == 'angles_by_type') or
(section_name.lower() == 'angles_bytype') or
(section_name.lower() == 'anglesbytype') or
(section_name.lower() == 'angle by type') or
(section_name.lower() == 'angle bytype') or
(section_name.lower() == 'angle_by_type') or
(section_name.lower() == 'angle_bytype') or
(section_name.lower() == 'anglebytype')):
if (filename != data_angles_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angles_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'dihedrals by type') or
(section_name.lower() == 'dihedrals bytype') or
(section_name.lower() == 'dihedrals_by_type') or
(section_name.lower() == 'dihedrals_bytype') or
(section_name.lower() == 'dihedralsbytype') or
(section_name.lower() == 'dihedral by type') or
(section_name.lower() == 'dihedral bytype') or
(section_name.lower() == 'dihedral_by_type') or
(section_name.lower() == 'dihedral_bytype') or
(section_name.lower() == 'dihedralbytype')):
if (filename != data_dihedrals_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedrals_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'impropers by type') or
(section_name.lower() == 'impropers bytype') or
(section_name.lower() == 'impropers_by_type') or
(section_name.lower() == 'impropers_bytype') or
(section_name.lower() == 'impropersbytype') or
(section_name.lower() == 'improper by type') or
(section_name.lower() == 'improper bytype') or
(section_name.lower() == 'improper_by_type') or
(section_name.lower() == 'improper_bytype') or
(section_name.lower() == 'improperbytype')):
if (filename != data_impropers_by_type):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_impropers_by_type+'\"?')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'bonds') or
(section_name.lower() == 'bond')):
if (filename != data_bonds):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bonds+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower().find('bond list') == 0) or
(section_name.lower().find('bonds list') == 0) or
(section_name.lower().find('bond_list') == 0) or
(section_name.lower().find('bonds_list') == 0)):
if (filename != data_bond_list):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_bonds_by_type+'\"?')
elif (write_command != 'write'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'angles') or
(section_name.lower() == 'angle')):
if (filename != data_angles):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_angles+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'dihedrals') or
(section_name.lower() == 'dihedral')):
if (filename != data_dihedrals):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_dihedrals+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'impropers') or
(section_name.lower() == 'improper')):
if (filename != data_impropers):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_impropers+'\"?')
elif (write_command == 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write(\"'+filename+'\") instead.\n')
elif ((section_name.lower() == 'box boundaries') or
(section_name.lower() == 'box boundary') or
(section_name.lower() == 'boundaries') or
(section_name.lower() == 'boundary') or
(section_name.lower() == 'boundary conditions') or
(section_name.lower() == 'periodic boundaries') or
(section_name.lower() == 'periodic boundary conditions') or
(section_name.lower() == 'periodic_boundaries') or
(section_name.lower() == 'periodic_boundary_conditions') or
(section_name.lower() == 'pbc')):
if ((filename != data_boundary) and
(filename != data_pbc)):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_boundary+'\"?\n'
'(Specify periodic boundary conditions this way.)')
elif (write_command != 'write_once'):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
'want to use the '+write_command+'() command with \"'+filename+'\".\n'
'You should probably use write_once(\"'+filename+'\") instead.\n')
elif (filename == data_pbc):
sys.stderr.write('WARNING: write_once(\"'+data_pbc+'\") is depreciated.\n'
' Use write_once(\"'+data_boundary+'\") instead.\n')
def CheckCommonFileNames(filename,
srcloc,
write_command,
filenames_found):
"""
Check the write() or write_once() statements in a
lttree-file to make sure that the files being written
follow the conventions used by lttree.
Almost any file name is permitted, except for file names
which closely match those reserved by lttree.
"""
filenames_found.add(filename)
N_data_prefix = len(data_prefix)
#data_prefix_no_space = data_prefix.rstrip()
N_data_prefix_no_space = len(data_prefix_no_space)
if ((filename[:N_data_prefix].lower() == data_prefix.lower()) and
(filename[:N_data_prefix] != data_prefix)):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'The beginning of output file (\"'+filename+'\")\n'
'does not match yet overlaps closely with a reserved lttree-file name prefix.\n'
'(\"'+data_prefix+'\"). Perhaps you meant \"'+data_prefix+filename[N_data_prefix:]+'\"?')
# check did they forget the space?
if (filename[:N_data_prefix_no_space] == data_prefix_no_space):
if (filename[:N_data_prefix] == data_prefix):
CheckDataFileNames(filename,
srcloc,
write_command,
filenames_found)
else:
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'The beginning of output file (\"'+filename+'\")\n'
'does not match yet overlaps closely with a reserved lttree-file name prefix.\n'
'(\"'+data_prefix+'\"). Perhaps you meant \"'+data_prefix+filename[N_data_prefix_no_space:]+'\"?')
elif ((filename.lower() == 'box boundaries') or
(filename.lower() == 'box boundary') or
(filename.lower() == 'boundaries') or
(filename.lower() == 'boundary') or
(filename.lower() == 'boundary conditions') or
(filename.lower() == 'periodic boundaries') or
(filename.lower() == 'periodic boundary conditions') or
(filename.lower() == 'periodic_boundaries') or
(filename.lower() == 'periodic_boundary_conditions') or
(filename.lower() == 'pbc')):
# In that case (for one thing) they forgot the data_prefix
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+data_boundary+'\"?\n'
'(Specify periodic boundary conditions this way.)')
elif ((filename.lower() == 'init') or
(filename.lower() == 'in init') or
(filename.lower() == 'ininit') or
(filename.lower() == 'initialize') or
(filename.lower() == 'in initialize') or
(filename.lower() == 'ininitialize')):
if (filename != in_init):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_init+'\"?')
#elif (write_command != 'write_once'):
# raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
# 'When using moltemplate.sh to build LAMMPS input files, you probably do not\n'
# 'want to use the '+write_command+'() command with \"'+filename+'\".\n'
# 'You should probably use write_once(\"'+filename+'\") instead.\n')
elif ((filename.lower() == 'settings') or
(filename.lower() == 'in settings') or
(filename.lower() == 'insettings')):
if (filename != in_settings):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_settings+'\"?')
elif ((filename.lower() == 'set_coords') or
(filename.lower() == 'set coords') or
(filename.lower() == 'setcoords') or
(filename.lower() == 'in set_coords') or
(filename.lower() == 'in set coords') or
(filename.lower() == 'in setcoords')):
if (filename != in_set_coords):
raise InputError('Probable typo in '+ErrorLeader(srcloc.infile,srcloc.lineno)+'\n\n'+
'Output file name (\"'+filename+'\") does not match,\n'
'yet overlaps closely with reserved lttree-file name.\n'
'Perhaps you meant \"'+in_set_coords+'\"?')
def CheckSyntaxCheap(lex):
""" Parse() builds a static tree of StaticObjs by parsing text file.
-The "lex" argument is afile or input stream which has been converted
to a "TemplateLexer" object (similar to the python's built-in shlex lexer).
"""
fnames_found = set([])
prematurely_read_token = None
while True:
if prematurely_read_token == None:
command = lex.get_token()
else:
command = prematurely_read_token
prematurely_read_token = None
#print('Parse(): token = \"'+command+'\", '+lex.error_leader())
if command == lex.eof:
#print('Parse(): EOF encountered\n')
break
if ((command == 'write') or (command == 'write_once')):
open_paren = lex.get_token()
#print('Parse(): open_paren=\"'+open_paren+'\"')
if open_paren=='{':
# ..then the user neglected to specify the "filename" file-name
# argument. In that case, supply the default, ''.
# (which is shorthand for the standard out in this case)
open_curly = open_paren[0]
open_paren = ''
close_paren = ''
filename = ''
srcloc = lex.GetSrcLoc()
else:
filename = lex.get_token()
if filename == ')':
filename == ''
close_paren = ')'
else:
close_paren = lex.get_token()
open_curly = lex.get_token()
srcloc = lex.GetSrcLoc()
if ((open_curly != '{') or
((open_paren == '') and (close_paren != '')) or
((open_paren == '(') and (close_paren != ')'))):
raise InputError('Error: in '+lex.error_leader()+'\n\n'
'Syntax error at beginning of '+command+' command.')
filename = RemoveOuterQuotes(filename, lex.quotes)
# The previous line is similar to:
#filename = filename.strip(lex.quotes)
CheckCommonFileNames(filename, lex.GetSrcLoc(), command, fnames_found)
tmpl_contents = lex.ReadTemplate()
StaticObj.CleanupReadTemplate(tmpl_contents, lex)
for entry in tmpl_contents:
if (type(entry) is VarRef):
CheckCommonVarNames(entry.prefix,
entry.descr_str,
entry.suffix,
entry.srcloc)
#if (data_velocities not in fnames_found):
# sys.stderr.write('-------------------------------------------------\n'
# 'WARNING: \"'+data_velocities+'\" file not found\n'
# '-------------------------------------------------\n')
#if (data_pair_coeffs not in fnames_found):
# sys.stderr.write('-------------------------------------------------\n'
# 'WARNING: \"'+data_pair_coeffs+'\" file not found\n'
# '-------------------------------------------------\n')
if (data_atoms not in fnames_found):
sys.stderr.write('WARNING: \"'+data_atoms+'\" file not found\n')
if (data_masses not in fnames_found):
sys.stderr.write('WARNING: \"'+data_masses+'\" file not found\n')
#if (data_bonds not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_bonds+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_angles not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_angles+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_dihedrals not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_dihedrals+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_impropers not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_impropers+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_bond_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_bond_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_angle_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_angle_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_dihedral_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_dihedral_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
#if (data_improper_coeffs not in fnames_found):
# sys.stderr.write('--------------------------------------------------\n'
# 'WARNING: \"'+data_imrpoper_coeffs+'\" file not found\n'
# '--------------------------------------------------\n')
if (in_init not in fnames_found):
sys.stderr.write('WARNING: \"'+in_init+'\" file not found\n')
if (in_settings not in fnames_found):
sys.stderr.write('WARNING: \"'+in_settings+'\" file not found\n')
def CheckSyntaxStatic(context_node,
root_node,
atom_column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands):
if search_instance_commands:
assert(isinstance(context_node, StaticObj))
commands = context_node.instance_commands
else:
# Note: Leaf nodes contain no commands, so skip them
if (not hasattr(context_node, 'commands')):
return
# Otherwise process their commands
commands = context_node.commands
for command in commands:
if isinstance(command, WriteFileCommand):
filename = command.filename
if filename == None: # (The "create_var" command causes this)
pass
elif (filename.find(in_prefix) == 0): #if filename begins with "In "
CheckInFileSyntax(command.tmpl_list,
root_node,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined)
elif filename == 'Data Atoms':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) == 0:
pass # skip blank lines
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass # skip comment lines
else:
syntax_err = False
if len(table[i]) < len(atom_column_names):
syntax_err = True
else:
syntax_err = False
for j in range(0, len(atom_column_names)):
if ((atom_column_names[j].lower() == 'atom-id') and
(not ((j < len(table[i])) and
isinstance(table[i][j], VarRef) and
(table[i][j].prefix in ('$','${')) and
(ExtractCatName(table[i][j].descr_str) == 'atom')))):
syntax_err = True
elif ((atom_column_names[j].lower() == 'molecule-id') and
(not ((j < len(table[i])) and
isinstance(table[i][j], VarRef) and
(table[i][j].prefix in ('$','${')) and
(ExtractCatName(table[i][j].descr_str) == 'mol')))):
syntax_err = True
elif ((atom_column_names[j].lower() == 'atom-type') and
(not ((j < len(table[i])) and
(isinstance(table[i][j], VarRef)) and
(table[i][j].prefix in ('@', '@{')) and
(table[i][j].nptr.cat_name == 'atom') and
(table[i][j].nptr.cat_node == root_node)))):
syntax_err = True
if syntax_err:
correct_rows_list = [s for s in atom_column_names]
for j in range(0, len(correct_rows_list)):
if correct_rows_list[j].lower() == 'atom-id':
correct_rows_list[j] = '$atom:id'
elif correct_rows_list[j].lower() == 'atom-type':
correct_rows_list[j] = '@atom:type'
elif correct_rows_list[j].lower() == 'molecule-id':
correct_rows_list[j] = '$mol:id'
correct_rows_msg = ' '.join(correct_rows_list)
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid "Data Atoms" syntax.\n'+
'Each line of the \"Data Atoms\" section should have this format:\n\n'
' '+correct_rows_msg+'\n\n'
'You may have forgotten to specify the LAMMPS atom_style.\n'+
'(You can do this running moltemplate with the -atom-style _style_ argument.)\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Bonds':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 4:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'bond'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'bond') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Bonds" syntax.\n'+
'Each line of the \"Data Bonds\" section should have this format:\n\n'
' $bond:id @bond:type $atom:id1 $atom:id2\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Bond List':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 3:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'bond'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Bond List" syntax.\n'+
'Each lines in this section should have this format:\n\n'
' $bond:id $atom:id1 $atom:id2\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Angles':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 5:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'angle'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'angle') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Angles" syntax.\n'+
'Each line of the \"Data Angles\" section should have this format:\n\n'
' $angle:id @angle:type $atom:id1 $atom:id2 $atom:id3\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
elif filename == 'Data Dihedrals':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 6:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'dihedral'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'dihedral') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 5:
table_entry = table[i][5]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Dihedrals" syntax.\n'+
'Each line of the \"Data Dihedrals\" section should have this format:\n\n'
' $dihedral:id @dihedral:type $atom:id1 $atom:id2 $atom:id3 $atom:id4\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Impropers':
table = TableFromTemplate(command.tmpl_list,
[[' ', '\t', '\r'], '\n'],
[True, False])
for i in range(0, len(table)):
syntax_err = False
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass
else:
if len(table[i]) < 6:
syntax_err = True
table_entry = table[i][0]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'improper'))):
syntax_err = True
if len(table[i]) > 1:
table_entry = table[i][1]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('@', '@{')) and
(table_entry.nptr.cat_name == 'improper') and
(table_entry.nptr.cat_node == root_node))):
syntax_err = True
if len(table[i]) > 2:
table_entry = table[i][2]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 3:
table_entry = table[i][3]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 4:
table_entry = table[i][4]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if len(table[i]) > 5:
table_entry = table[i][5]
if (not ((isinstance(table_entry, VarRef)) and
(table_entry.prefix in ('$','${')) and
(ExtractCatName(table_entry.descr_str) == 'atom'))):
syntax_err = True
if syntax_err:
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Impropers" syntax.\n'+
'Each line of the \"Data Impropers\" section should have this format:\n\n'
' $improper:id @improper:type $atom:id1 $atom:id2 $atom:id3 $atom:id4\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
# A simple wildcard is the character "*" on its own.
# These are okay.
# A "compound" wildcard expression is something like
# 5*7 or
# 5* or
# *7 or
# @{bond:A}*@bond:B or
# @{bond:A}* or
# *@bond:B
# LAMMPS allows this but in moltemplate this causes
# unintended side-effects. Check for these now.
if filename in set(['Data Bond Coeffs',
'Data Angle Coeffs',
'Data Dihedral Coeffs',
'Data Improper Coeffs',
'Data Pair Coeffs']):
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if (isinstance(table[i][0], TextBlock) and
table[i][0].text == '*'):
if filename == 'Data Bond Coeffs':
data_bond_coeffs_defined.add('*')
elif filename == 'Data Angle Coeffs':
data_angle_coeffs_defined.add('*')
elif filename == 'Data Dihedral Coeffs':
data_dihedral_coeffs_defined.add('*')
elif filename == 'Data Improper Coeffs':
data_improper_coeffs_defined.add('*')
elif filename == 'Data Pair Coeffs':
data_pair_coeffs_defined.add(('*','*'))
else:
compound_wildcard = False
if (len(table[i]) > 1):
if hasattr(table[i][0],'__len__'):
ltmpl = table[i][0]
else:
ltmpl = [table[i][0]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('--- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within \"'+filename+'\".\n'
'It is safer to specify the parameters for each type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if filename == 'Data Bond Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'bond') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Bond Coeffs" syntax.\n'
' Each line of the \"Data Bond Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @bond:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_bond_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Angle Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'angle') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Angle Coeffs" syntax.\n'
' Each line of the \"Data Angle Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @angle:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_angle_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Dihedral Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'dihedral') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Dihedral Coeffs" syntax.\n'
' Each line of the \"Data Dihedral Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @dihedral:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_dihedral_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Improper Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not (isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'improper') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Improper Coeffs" syntax.\n'
' Each line of the \"Data Improper Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @improper:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_improper_coeffs_defined.add(table[i][0].binding)
elif filename == 'Data Pair Coeffs':
# Commenting the next line out. We did this already:
#table = TableFromTemplate(command.tmpl_list,
# [[' ','\t','\r'], '\n'],
# [True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 0) and
isinstance(table[i][0], TextBlock) and
(table[i][0].text == '*')):
pass # we dealt with this case earlier
elif ((len(table[i]) > 0) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) > 0) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'atom') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect "Data Pair Coeffs" syntax.\n'
' Each line of the \"Data Pair Coeffs\" section\n'
' should have the following syntax:\n\n'+
' @atom:type list-of-parameters...\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
else:
data_pair_coeffs_defined.add((table[i][0].binding,
table[i][0].binding))
elif filename == 'Data Bonds By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 3) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'bond') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Bonds By Type\" syntax.\n'
' Each line of the \"Data Bonds By Type\" section should begin with an\n'
' @bond:type variable followed by 2 atom types.\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Angles By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 4) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'angle') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Angles By Type\" syntax.\n'
' Each line of the \"Data Angles By Type\" section should begin with an\n'
' @angle:type variable followed by 3 atom types (and 2 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Dihedrals By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 5) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'dihedral') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Dihedrals By Type\" syntax.\n'
' Each line of the \"Data Dihedrals By Type\" section should begin with a\n\n'
' @dihedral:type variable followed by 4 atom types (and 3 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
elif filename == 'Data Impropers By Type':
table = TableFromTemplate(command.tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
if len(table[i]) == 0:
pass
elif ((len(table[i]) > 1) and
isinstance(table[i][0], TextBlock) and
(len(table[i][0].text) > 0) and
(table[i][0].text == '#')):
pass #Ignore comment lines (postprocessing removes them)
elif (not ((len(table[i]) >= 5) and
isinstance(table[i][0], VarRef) and
(table[i][0].prefix in ('@', '@{')) and
(table[i][0].nptr.cat_name == 'improper') and
(table[i][0].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Incorrect \"Data Impropers By Type\" syntax.\n'
' Each line of the \"Data Impropers By Type\" section should begin with an\n\n'
' @improper:type variable followed by 4 atom types (and 3 optional bond types).\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
# Recursively invoke AssignVarPtrs() on all (non-leaf) child nodes:
for child in context_node.children.values():
CheckSyntaxStatic(child,
root_node,
atom_column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands)
def CheckInFileSyntax(tmpl_list,
root_node,
pair_coeffs_defined,
bond_coeffs_defined,
angle_coeffs_defined,
dihedral_coeffs_defined,
improper_coeffs_defined):
table = TableFromTemplate(tmpl_list,
[[' ','\t','\r'], '\n'],
[True, False])
for i in range(0, len(table)):
assert(hasattr(table[i], '__len__'))
if len(table[i]) > 0:
if ((isinstance(table[i][0], TextBlock)) and
(table[i][0].text in set(['bond_coeff',
'angle_coeff',
'dihedral_coeff',
'improper_coeff']))):
if len(table[i]) > 1: # if not deal with error later
if (isinstance(table[i][1], TextBlock) and
table[i][1].text == '*'):
if table[i][0].text == 'bond_coeff':
bond_coeffs_defined.add('*')
elif table[i][0].text == 'angle_coeff':
angle_coeffs_defined.add('*')
elif table[i][0].text == 'dihedral_coeff':
dihedral_coeffs_defined.add('*')
elif table[i][0].text == 'improper_coeff':
improper_coeffs_defined.add('*')
else:
compound_wildcard = False
if (len(table[i]) > 1):
if hasattr(table[i][1], '__len__'):
ltmpl = table[i][1]
else:
ltmpl = [table[i][1]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('---- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within a \"'+table[i][0].text+'\".\n'
'command. It is safer to specify the parameters for each bond type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'bondcoeff') or
(table[i][0].text.lower() == 'bond_coeff'))):
if table[i][0].text != 'bond_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"bond_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'bond') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"bond_coeff\" command.\n\n'+
' Each \"bond_coeff\" command should have the following syntax:\n\n'+
' bond_coeff @bond:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
bond_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'anglecoeff') or
(table[i][0].text.lower() == 'angle_coeff'))):
if table[i][0].text != 'angle_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"angle_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'angle') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"angle_coeff\" command.\n\n'+
' Each \"angle_coeff\" command should have the following syntax:\n\n'+
' angle_coeff @angle:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
angle_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'dihedralcoeff') or
(table[i][0].text.lower() == 'dihedral_coeff'))):
if table[i][0].text != 'dihedral_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"dihedral_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'dihedral') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"dihedral_coeff\" command.\n\n'+
' Each \"dihedral_coeff\" command should have the following syntax:\n\n'+
' dihedral_coeff @dihedral:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
dihedral_coeffs_defined.add(table[i][1].binding)
if ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'impropercoeff') or
(table[i][0].text.lower() == 'improper_coeff'))):
if table[i][0].text != 'improper_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"improper_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if ((len(table[i]) > 1) and
isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 1) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'improper') and
(table[i][1].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"improper_coeff\" command.\n\n'+
' Each \"improper_coeff\" command should have the following syntax:\n\n'+
' improper_coeff @improper:type [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
improper_coeffs_defined.add(table[i][1].binding)
elif ((isinstance(table[i][0], TextBlock)) and
((table[i][0].text.lower() == 'paircoeff') or
(table[i][0].text.lower() == 'pair_coeff'))):
if table[i][0].text != 'pair_coeff':
raise InputError('----------------------------------------------------\n'+
' Spelling error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Use \"pair_coeff\", not \"'+table[i][0].text+'\"\n'+
'----------------------------------------------------\n'+
g_no_check_msg)
if len(table[i]) > 2: # if not, deal with error later
if ((isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')) and
(isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*'))):
pair_coeffs_defined.add(('*','*'))
else:
compound_wildcard = False
assert(len(table[i]) > 1)
if hasattr(table[i][1], '__len__'):
ltmpl = table[i][1]
else:
ltmpl = [table[i][1]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if hasattr(table[i][2], '__len__'):
ltmpl = table[i][2]
else:
ltmpl = [table[i][2]]
for entry in ltmpl:
if (isinstance(entry, TextBlock) and
('*' in entry.text)):
compound_wildcard = True
elif (isinstance(entry, VarRef) and
('*' in entry.descr_str)):
compound_wildcard = True
if compound_wildcard:
raise InputError('---- Paranoid checking: ---\n'
' Possible error near '+
ErrorLeader(entry.srcloc.infile,
entry.srcloc.lineno)+'\n'
'The wildcard symbol, \"*\", is not recommended within a \"pair_coeff\" command.\n'
'It is safer to specify the parameters for each bond type explicitly.\n'
'You CAN use \"*\" wildcards, but you must disable syntax checking. To get\n'
'past this error message, run moltemplate.sh using the \"-nocheck\" option.\n')
if ((len(table[i]) > 2) and
(isinstance(table[i][1], TextBlock) and
(table[i][1].text == '*')) and
(isinstance(table[i][2], TextBlock) and
(table[i][2].text == '*'))):
pass # we dealt with this case earlier
elif (not ((len(table[i]) > 2) and
(isinstance(table[i][1], VarRef)) and
(table[i][1].prefix in ('@', '@{')) and
(table[i][1].nptr.cat_name == 'atom') and
(table[i][1].nptr.cat_node == root_node) and
(isinstance(table[i][2], VarRef)) and
(table[i][2].prefix in ('@', '@{')) and
(table[i][2].nptr.cat_name == 'atom') and
(table[i][2].nptr.cat_node == root_node))):
raise InputError('----------------------------------------------------\n'+
' Syntax error near '+
ErrorLeader(table[i][0].srcloc.infile,
table[i][0].srcloc.lineno)+'\n'
' Invalid \"pair_coeff\" command.\n\n'+
' Each \"pair_coeff\" command should have the following syntax:\n\n'+
' pair_coeff @atom:typeI @atom:typeJ [optional style] list-of-parameters...\n'+
'----------------------------------------------------\n\n'+
g_no_check_msg)
else:
pair_coeffs_defined.add((table[i][1].binding, table[i][2].binding))
def LttreeCheckParseArgs(argv, settings):
LttreeParseArgs(argv, settings)
if __name__ == "__main__":
# Instantiate the lexer we will be using.
# (The lexer's __init__() function requires an openned file.
# Assuming __name__ == "__main__", then the name of that file should
# be the last remaining (unprocessed) argument in the argument list.)
if len(argv) == 1:
raise InputError('Error: This program requires at least one argument\n'
' the name of a file containing ttree template commands\n')
elif len(argv) == 2:
settings.infile = argv[1]
try:
settings.lex = TemplateLexer(open(settings.infile, 'r'),
settings.infile) # Parse text from file
except IOError:
sys.stderr.write('Error: unable to open file\n'
' \"'+settings.infile+'\"\n'
' for reading.\n')
sys.exit(1)
del(argv[1:2])
else:
# if there are more than 2 remaining arguments,
problem_args = ['\"'+arg+'\"' for arg in argv[1:]]
raise InputError('Syntax Error('+g_program_name+'):\n\n'
' Unrecognized argument.\n'
' (That or there is some other problem with the argument list.)\n'
' The problem begins with these arguments:\n'
' '+(' '.join(problem_args))+'\n\n'
' (The actual problem may be earlier in the argument list.\n'
' If these arguments are source files, then keep in mind\n'
' that this program can not parse multiple source files.)\n'
' Check the syntax of the entire argument list.\n')
####### control flow begins here: #######
if __name__ == "__main__":
g_program_name = __file__.split('/')[-1] # = 'lttree_check.py'
g_version_str = '0.76'
g_date_str = '2014-12-19'
sys.stderr.write(g_program_name+' v'+g_version_str+' '+g_date_str+'\n')
try:
# Parse the argument list and instantiate the lexer we will be using:
#settings = BasicUISettings()
#BasicUIParseArgs(sys.argv, settings)
settings = LttreeSettings()
LttreeCheckParseArgs(sys.argv, settings)
# Invoke syntax checker pass:
# This first check only checks for very simple mistakes
# (mispelled versions of standard files or variable names).
CheckSyntaxCheap(settings.lex)
settings.lex.instream.close()
# Now read the file again.
# This time parse it using StaticObj.ReadTemplate().
# (This will allow us to check for deeper problems.)
del settings.lex
settings.lex = TemplateLexer(open(settings.infile, 'r'),
settings.infile)
static_tree_root = StaticObj('', None) # The root of the static tree
# has name '' (equivalent to '/')
sys.stderr.write(g_program_name+': parsing the class definitions...')
static_tree_root.Parse(settings.lex)
sys.stderr.write(' done\n'+g_program_name+': looking up classes...')
static_tree_root.LookupStaticRefs()
sys.stderr.write(' done\n'+g_program_name+': looking up @variables...')
AssignStaticVarPtrs(static_tree_root,
search_instance_commands=False)
AssignStaticVarPtrs(static_tree_root,
search_instance_commands=True)
sys.stderr.write(' done\n')
#sys.stderr.write(' done\n\nclass_def_tree = ' + str(static_tree_root) + '\n\n')
data_pair_coeffs_defined = set([])
data_bond_coeffs_defined = set([])
data_angle_coeffs_defined = set([])
data_dihedral_coeffs_defined = set([])
data_improper_coeffs_defined = set([])
in_pair_coeffs_defined = set([])
in_bond_coeffs_defined = set([])
in_angle_coeffs_defined = set([])
in_dihedral_coeffs_defined = set([])
in_improper_coeffs_defined = set([])
# Now check the static syntax
# Here we check the contents of the the "write_once()" commands:
CheckSyntaxStatic(static_tree_root,
static_tree_root,
settings.column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands=False)
# Here we check the contents of the the "write()" commands:
CheckSyntaxStatic(static_tree_root,
static_tree_root,
settings.column_names,
data_pair_coeffs_defined,
data_bond_coeffs_defined,
data_angle_coeffs_defined,
data_dihedral_coeffs_defined,
data_improper_coeffs_defined,
in_pair_coeffs_defined,
in_bond_coeffs_defined,
in_angle_coeffs_defined,
in_dihedral_coeffs_defined,
in_improper_coeffs_defined,
search_instance_commands=True)
if 'bond' in static_tree_root.categories:
if ((len(data_bond_coeffs_defined) > 0) and
(len(in_bond_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"bond_coeff\" commands\n'+
' OR you can have a \"Data Bond Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_bond_coeffs_defined) > 0:
bond_coeffs_defined = data_bond_coeffs_defined
else:
bond_coeffs_defined = in_bond_coeffs_defined
bond_bindings = static_tree_root.categories['bond'].bindings
for nd,bond_binding in bond_bindings.items():
if not nd.IsDeleted():
if ((not (bond_binding in bond_coeffs_defined)) and
(not HasWildCard(bond_binding.full_name)) and
(not ('*' in bond_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing bond coeff.\n\n'+
' No coeffs for the \"'+bond_binding.full_name+'\" bond type have been\n'+
'defined, but a reference to that bond type was discovered\n'+
'near '+ErrorLeader(bond_binding.refs[0].srcloc.infile,
bond_binding.refs[0].srcloc.lineno)+'. Check this file and also check\n'
'your \"bond_coeff\" commands or your \"Data Bond Coeffs" section.\n'
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'angle' in static_tree_root.categories:
if ((len(data_angle_coeffs_defined) > 0) and
(len(in_angle_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"angle_coeff\" commands\n'+
' OR you can have a \"Data Angle Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_angle_coeffs_defined) > 0:
angle_coeffs_defined = data_angle_coeffs_defined
else:
angle_coeffs_defined = in_angle_coeffs_defined
angle_bindings = static_tree_root.categories['angle'].bindings
for nd,angle_binding in angle_bindings.items():
if not nd.IsDeleted():
if ((not (angle_binding in angle_coeffs_defined)) and
#(not HasWildCard(angle_binding.full_name)) and
(not ('*' in angle_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing angle coeff.\n\n'+
' No coeffs for the \"'+angle_binding.full_name+'\" angle type have been\n'+
'defined, but a reference to that angle type was discovered\n'+
'near '+ErrorLeader(angle_binding.refs[0].srcloc.infile,
angle_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"angle_coeff\" commands or your \"Data Angle Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'dihedral' in static_tree_root.categories:
if ((len(data_dihedral_coeffs_defined) > 0) and
(len(in_dihedral_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"dihedral_coeff\" commands\n'+
' OR you can have a \"Data Dihedral Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_dihedral_coeffs_defined) > 0:
dihedral_coeffs_defined = data_dihedral_coeffs_defined
else:
dihedral_coeffs_defined = in_dihedral_coeffs_defined
dihedral_bindings = static_tree_root.categories['dihedral'].bindings
for nd,dihedral_binding in dihedral_bindings.items():
if not nd.IsDeleted():
if ((not (dihedral_binding in dihedral_coeffs_defined)) and
#(not HasWildCard(dihedral_binding.full_name)) and
(not ('*' in dihedral_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing dihedral coeff.\n\n'+
' No coeffs for the \"'+dihedral_binding.full_name+'\" dihedral type have been\n'+
'defined, but a reference to that dihedral type was discovered\n'+
'near '+ErrorLeader(dihedral_binding.refs[0].srcloc.infile,
dihedral_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"dihedral_coeff\" commands or your \"Data Dihedral Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'improper' in static_tree_root.categories:
if ((len(data_improper_coeffs_defined) > 0) and
(len(in_improper_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"improper_coeff\" commands\n'+
' OR you can have a \"Data Improper Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_improper_coeffs_defined) > 0:
improper_coeffs_defined = data_improper_coeffs_defined
else:
improper_coeffs_defined = in_improper_coeffs_defined
improper_bindings = static_tree_root.categories['improper'].bindings
for nd,improper_binding in improper_bindings.items():
if not nd.IsDeleted():
if ((not (improper_binding in improper_coeffs_defined)) and
#(not HasWildCard(improper_binding.full_name)) and
(not ('*' in improper_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing improper coeff.\n\n'+
' No coeffs for the \"'+improper_binding.full_name+'\" improper type have been\n'+
'defined, but a reference to that improper type was discovered\n'+
'near '+ErrorLeader(improper_binding.refs[0].srcloc.infile,
improper_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"improper_coeff\" commands or your \"Data Improper Coeffs" section.\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
if 'atom' in static_tree_root.categories:
if ((len(data_pair_coeffs_defined) > 0) and
(len(in_pair_coeffs_defined) > 0)):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: You can EITHER use \"pair_coeff\" commands\n'+
' OR you can have a \"Data Pair Coeffs\" section.\n'+
' LAMMPS will not allow both (...as of late 2012)\n'+
'---------------------------------------------------------------------\n'+
g_no_check_msg)
#' If this is no longer true, to override this error message you must\n'+
#' disable error checking by running moltemplate with the -nocheck option.\n')
if len(data_pair_coeffs_defined) > 0:
pair_coeffs_defined = data_pair_coeffs_defined
else:
pair_coeffs_defined = in_pair_coeffs_defined
atom_bindings = static_tree_root.categories['atom'].bindings
for nd,atom_binding in atom_bindings.items():
if not nd.IsDeleted():
if ((not ((atom_binding,atom_binding)
in
pair_coeffs_defined)) and
(not HasWildCard(atom_binding.full_name)) and
(not (('*','*') in pair_coeffs_defined))):
raise InputError('---------------------------------------------------------------------\n'+
' Syntax error: Missing pair coeff.\n\n'+
' No pair coeffs for the \"'+atom_binding.full_name+'\" atom type have been\n'+
'defined, but a reference to that atom type was discovered\n'+
'near '+ErrorLeader(atom_binding.refs[0].srcloc.infile,
atom_binding.refs[0].srcloc.lineno)+'. Check this file and\n'
'also check your \"pair_coeff\" commands or your \"Data Pair Coeffs" section.\n\n'+
g_no_check_msg)
#else:
# raise InputError('Error: No atom types (@atom) have been defined.\n')
sys.stderr.write(g_program_name+': -- No errors detected. --\n')
exit(0)
except (ValueError, InputError) as err:
sys.stderr.write('\n'+str(err)+'\n')
sys.exit(1)
|
SGenheden/lammps
|
tools/moltemplate/src/lttree_check.py
|
Python
|
gpl-2.0
| 133,762
|
[
"LAMMPS"
] |
280ab7443fcdcd5528d31e8f198bbf3cc53fc2309cc9f457308d2667e30e94bc
|
#!/usr/bin/env python
"""
Save polynomial basis on reference elements or on a mesh for visualization into
a given output directory.
"""
from __future__ import absolute_import
import sys
from six.moves import range
sys.path.append('.')
import os
from argparse import ArgumentParser
import numpy as nm
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import get_print_info, ensure_path
from sfepy.discrete import FieldVariable, Variables, PolySpace
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.discrete.fem.geometry_element import GeometryElement
from sfepy.discrete.fem.linearizer import create_output
from sfepy.discrete.fem.fields_base import create_expression_output
helps = {
'basis' :
'name of the FE basis [default: %(default)s]',
'derivative' :
'save d-th derivative of FE basis, can be 0 or 1 [default: %(default)s]',
'max_order' :
'maximum order of polynomials [default: %(default)s]',
'geometry' :
'reference element geometry, one of "2_3", "2_4", "3_4", "3_8"'
' [default: %(default)s]',
'mesh' :
('name of the mesh file - alternative to --geometry '
'[default: %(default)s]'),
'permutations' :
'list of geometry element permutations for each element, e.g. 0,1 is a'
' single permutation for two elements, 0,1,0,2,1,0 are three permutations'
' for two elements. Special value "all" can be used to save all possible'
' permutations for given reference element. Works only with --mesh option'
' [default: %(default)s]',
'dofs' :
'if given, save only the DOFs specified as a comma-separated list'
' [default: %(default)s]',
'lin_options' :
'linearizer options [default: %(default)s]',
'plot_dofs' :
'plot local and global DOF numberings, with --mesh option',
}
def get_dofs(dofs, n_total):
if dofs is None:
dofs = list(range(n_total))
else:
dofs = [int(ii) for ii in dofs.split(',')]
return dofs
def save_basis_on_mesh(mesh, options, output_dir, lin,
permutations=None, suffix=''):
if permutations is not None:
mesh = mesh.copy()
gel = GeometryElement(mesh.descs[0])
perms = gel.get_conn_permutations()[permutations]
conn = mesh.cmesh.get_cell_conn()
n_el, n_ep = conn.num, gel.n_vertex
offsets = nm.arange(n_el) * n_ep
conn.indices[:] = conn.indices.take((perms + offsets[:, None]).ravel())
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field = Field.from_args('f', nm.float64, shape=1, region=omega,
approx_order=options.max_order,
poly_space_base=options.basis)
var = FieldVariable('u', 'unknown', field)
if options.plot_dofs:
import sfepy.postprocess.plot_dofs as pd
import sfepy.postprocess.plot_cmesh as pc
ax = pc.plot_wireframe(None, mesh.cmesh)
ax = pd.plot_global_dofs(ax, field.get_coor(), field.econn)
ax = pd.plot_local_dofs(ax, field.get_coor(), field.econn)
if options.dofs is not None:
ax = pd.plot_nodes(ax, field.get_coor(), field.econn,
field.poly_space.nodes,
get_dofs(options.dofs, var.n_dof))
pd.plt.show()
output('dofs: %d' % var.n_dof)
vec = nm.empty(var.n_dof, dtype=var.dtype)
n_digit, _format = get_print_info(var.n_dof, fill='0')
name_template = os.path.join(output_dir,
'dof_%s%s.vtk' % (_format, suffix))
for ip in get_dofs(options.dofs, var.n_dof):
output('dof %d...' % ip)
vec.fill(0.0)
vec[ip] = 1.0
var.set_data(vec)
if options.derivative == 0:
out = var.create_output(vec, linearization=lin)
else:
out = create_expression_output('ev_grad.ie.Elements(u)',
'u', 'f', {'f' : field}, None,
Variables([var]),
mode='qp', verbose=False,
min_level=lin.min_level,
max_level=lin.max_level,
eps=lin.eps)
name = name_template % ip
ensure_path(name)
out['u'].mesh.write(name, out=out)
output('...done (%s)' % name)
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-b', '--basis', metavar='name',
action='store', dest='basis',
default='lagrange', help=helps['basis'])
parser.add_argument('-d', '--derivative', metavar='d', type=int,
action='store', dest='derivative',
default=0, help=helps['derivative'])
parser.add_argument('-n', '--max-order', metavar='order', type=int,
action='store', dest='max_order',
default=2, help=helps['max_order'])
parser.add_argument('-g', '--geometry', metavar='name',
action='store', dest='geometry',
default='2_4', help=helps['geometry'])
parser.add_argument('-m', '--mesh', metavar='mesh',
action='store', dest='mesh',
default=None, help=helps['mesh'])
parser.add_argument('--permutations', metavar='permutations',
action='store', dest='permutations',
default=None, help=helps['permutations'])
parser.add_argument('--dofs', metavar='dofs',
action='store', dest='dofs',
default=None, help=helps['dofs'])
parser.add_argument('-l', '--lin-options', metavar='options',
action='store', dest='lin_options',
default='min_level=2,max_level=5,eps=1e-3',
help=helps['lin_options'])
parser.add_argument('--plot-dofs',
action='store_true', dest='plot_dofs',
default=False, help=helps['plot_dofs'])
parser.add_argument('output_dir')
options = parser.parse_args()
output_dir = options.output_dir
output('polynomial space:', options.basis)
output('max. order:', options.max_order)
lin = Struct(kind='adaptive', min_level=2, max_level=5, eps=1e-3)
for opt in options.lin_options.split(','):
key, val = opt.split('=')
setattr(lin, key, eval(val))
if options.mesh is None:
dim, n_ep = int(options.geometry[0]), int(options.geometry[2])
output('reference element geometry:')
output(' dimension: %d, vertices: %d' % (dim, n_ep))
gel = GeometryElement(options.geometry)
gps = PolySpace.any_from_args(None, gel, 1,
base=options.basis)
ps = PolySpace.any_from_args(None, gel, options.max_order,
base=options.basis)
n_digit, _format = get_print_info(ps.n_nod, fill='0')
name_template = os.path.join(output_dir, 'bf_%s.vtk' % _format)
for ip in get_dofs(options.dofs, ps.n_nod):
output('shape function %d...' % ip)
def eval_dofs(iels, rx):
if options.derivative == 0:
bf = ps.eval_base(rx).squeeze()
rvals = bf[None, :, ip:ip+1]
else:
bfg = ps.eval_base(rx, diff=True)
rvals = bfg[None, ..., ip]
return rvals
def eval_coors(iels, rx):
bf = gps.eval_base(rx).squeeze()
coors = nm.dot(bf, gel.coors)[None, ...]
return coors
(level, coors, conn,
vdofs, mat_ids) = create_output(eval_dofs, eval_coors, 1,
ps, min_level=lin.min_level,
max_level=lin.max_level,
eps=lin.eps)
out = {
'bf' : Struct(name='output_data',
mode='vertex', data=vdofs,
var_name='bf', dofs=None)
}
mesh = Mesh.from_data('bf_mesh', coors, None, [conn], [mat_ids],
[options.geometry])
name = name_template % ip
ensure_path(name)
mesh.write(name, out=out)
output('...done (%s)' % name)
else:
mesh = Mesh.from_file(options.mesh)
output('mesh geometry:')
output(' dimension: %d, vertices: %d, elements: %d'
% (mesh.dim, mesh.n_nod, mesh.n_el))
if options.permutations:
if options.permutations == 'all':
from sfepy.linalg import cycle
gel = GeometryElement(mesh.descs[0])
n_perms = gel.get_conn_permutations().shape[0]
all_permutations = [ii for ii in cycle(mesh.n_el * [n_perms])]
else:
all_permutations = [int(ii)
for ii in options.permutations.split(',')]
all_permutations = nm.array(all_permutations)
np = len(all_permutations)
all_permutations.shape = (np // mesh.n_el, mesh.n_el)
output('using connectivity permutations:\n', all_permutations)
else:
all_permutations = [None]
for ip, permutations in enumerate(all_permutations):
if permutations is None:
suffix = ''
else:
suffix = '_' + '_'.join('%d' % ii for ii in permutations)
save_basis_on_mesh(mesh, options, output_dir, lin, permutations,
suffix)
if __name__ == '__main__':
main()
|
vlukes/sfepy
|
script/save_basis.py
|
Python
|
bsd-3-clause
| 10,029
|
[
"VTK"
] |
286d683e764b4ea846b6c897f7aae6a75723cf1d52495c1f1f7c3a241aede96d
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0009_auto_20170415_1144'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='skip_hinted_login_dialog',
field=models.BooleanField(default=False, help_text='If this option is enabled, users that visit a "TPA hinted" URL for this provider (e.g. a URL ending with `?tpa_hint=[provider_name]`) will be forwarded directly to the login URL of the provider instead of being first prompted with a login dialog.'),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='skip_hinted_login_dialog',
field=models.BooleanField(default=False, help_text='If this option is enabled, users that visit a "TPA hinted" URL for this provider (e.g. a URL ending with `?tpa_hint=[provider_name]`) will be forwarded directly to the login URL of the provider instead of being first prompted with a login dialog.'),
),
migrations.AddField(
model_name='samlproviderconfig',
name='skip_hinted_login_dialog',
field=models.BooleanField(default=False, help_text='If this option is enabled, users that visit a "TPA hinted" URL for this provider (e.g. a URL ending with `?tpa_hint=[provider_name]`) will be forwarded directly to the login URL of the provider instead of being first prompted with a login dialog.'),
),
]
|
eduNEXT/edunext-platform
|
common/djangoapps/third_party_auth/migrations/0010_add_skip_hinted_login_dialog_field.py
|
Python
|
agpl-3.0
| 1,526
|
[
"VisIt"
] |
68413ca10327e98fd81b75a6414e00b6514936ea74236c1f12f1530f5fbfe0c2
|
import matplotlib.pyplot as plt
import numpy as np
import time
from matplotlib.path import Path
pi=np.pi
# "dirac_sheet.py" is an object class that defines a 2D discretized sheet for calculating solutions to the
# time-domain solutions of the Dirac equation. It implements a staggered time and staggered space discretization
# approach outlined in Journal of Computational Physics 289(2015) 169-180. The discretized space is constructed of
# checkerboard "u" and "v" lattice. The checkerboard is formed by explicitly defining a "u1","u2","v1", and "v2"
# grid that forms a unit cell on a square grid. Within a unit cell, "u1" is the upper-right, "u2" is the lower
# left, "v1" is the upper left, and #v2 is the lower right.
#
# The default sheet is a square of NgridxNgrid unit cells with periodic boundary conditions. Further boundary
# conditions can be set by three matrices: No_prop_mat, Absorb_mat, and Drive_mat. No_prop_mat defines regions
# over which no propagation occurs (similar to defining the edge of the 2D material). Absorb_mat defines regions
# where the wave is "lossy". This is primarily intended to introduce absorptive boundary conditions. Drive_mat
# defines a region from which the electron wave is sourced.
#
# The properties of the sourced electrons are defined by the set_p method. p=2*pi/lambda is the injected electron
# wave-vector magnitude, and theta is the wave-vector direction. The code only computes plane-wave injection with
# the expectation that the user can sum several solutions to localize the injection.
#
# For boundary conditions, the u-lattice is set to zero in the "No_prop" region, and the v-lattice is free. The drive
# wave is implemented only on the v-lattice. In this sense, the "u" lattice is the master, and the "v" the slave.
class dirac_sheet:
def __init__(self,m, Ngrid, D_t, D_x, X_offset,Y_offset):
self.t=0.0 #time
self.Drive_coupling=0.3 #coupling strength of Drive plane-wave
self.Abs_rate=0.99 #exponential decay rate in absorptive regions
self.m=m #effective mass; in graphene=0
if not isinstance(Ngrid,tuple):
self.Ngrid_x=Ngrid #number of sub-lattice points: total (2Ngrid)x(2Ngrid) points
self.Ngrid_y=Ngrid
else:
self.Ngrid_x=Ngrid[0]
self.Ngrid_y=Ngrid[1]
self.D_t=D_t #discrete time step
self.D_x=D_x #spatial discretization
self.D_y=D_x
self.X_offset=X_offset
self.Y_offset=Y_offset
self.p0=2*pi/100.0 #Drive wave-vector magnitude
self.theta=0 #Drive wave-vector direction
self.massorV=False #encodes whether m or V is nonzero
self.px=self.p0*np.cos(self.theta)
self.py=self.p0*np.sin(self.theta)
self.y, self.x = np.mgrid[slice((0-round(self.Ngrid_y/2)),(self.Ngrid_y-round(self.Ngrid_y/2)),1),
slice((0-round(self.Ngrid_x/2)),(self.Ngrid_x-round(self.Ngrid_x/2)),1)]
#define the X,Y coordinate matrix for each sublattice point
self.x, self.y = self.x*self.D_x+self.X_offset, self.y*self.D_y+self.Y_offset
self.xhalf, self.yhalf = self.x-self.D_x/2.0, self.y-self.D_y/2.0
self.No_prop_mat=np.zeros((self.x.shape[0]*2,self.x.shape[1]*2))
self.Absorb_mat=np.zeros((self.x.shape[0]*2,self.x.shape[1]*2))
self.Drive_mat=np.zeros((self.x.shape[0]*2,self.x.shape[1]*2))
#legacy, should be equivalent to theta
self.phi_p=np.arctan2(self.py,self.px)
#out and out2 are temporary placeholders used in the wave-equation calculation
#this enables multiple manipulations of the wave matrix without overwriting
self.out=np.zeros(self.x.shape,dtype=np.complex64)
self.out2=np.zeros(self.x.shape,dtype=np.complex64)
#defining the u1,u2,v1,v2 sublattices
self.u1=np.zeros(self.x.shape,dtype=np.complex64)
self.u2=np.zeros(self.x.shape,dtype=np.complex64)
self.v1=np.zeros(self.x.shape,dtype=np.complex64)
self.v2=np.zeros(self.x.shape,dtype=np.complex64)
#Drive plane wave
self.u10=np.exp(1j*(self.px*self.x+self.py*self.y))
self.u20=np.exp(1j*(self.px*self.xhalf+self.py*self.yhalf))
self.v10=np.exp(1j*(self.px*self.xhalf+self.py*self.y))
self.v20=np.exp(1j*(self.px*self.x+self.py*self.yhalf))
self.N_zero=self.x[1,np.abs(self.x[1,:])==np.min(np.abs(self.x[1,:]))]
#logical arrays for boundary conditions
self.rDu1=self.Drive_mat[1::2,1::2]!=0.0
self.rDu2=self.Drive_mat[0::2,0::2]!=0.0
self.rDv1=self.Drive_mat[1::2,0::2]!=0.0
self.rDv2=self.Drive_mat[0::2,1::2]!=0.0
self.rAu1=self.Absorb_mat[1::2,1::2]!=0.0
self.rAu2=self.Absorb_mat[0::2,0::2]!=0.0
self.rAv1=self.Absorb_mat[1::2,0::2]!=0.0
self.rAv2=self.Absorb_mat[0::2,1::2]!=0.0
self.rNPu1=self.No_prop_mat[1::2,1::2]!=0.0
self.rNPu2=self.No_prop_mat[0::2,0::2]!=0.0
self.rNPv1=self.No_prop_mat[1::2,0::2]!=0.0
self.rNPv2=self.No_prop_mat[0::2,1::2]!=0.0
#V1-V4 are electrostatic scalr potentials for the v1,v2,u1,and u2
#latics respectively
self.V1=np.zeros(self.x.shape)
self.V2=np.zeros(self.x.shape)
self.V3=np.zeros(self.x.shape)
self.V4=np.zeros(self.x.shape)
#precomputes sum/difference of mass and potential to reduce computation overhead
self.mminusV1=self.m-self.V1
self.mminusV2=self.m-self.V2
self.mplusV3=self.m+self.V3
self.mplusV4=self.m+self.V4
def set_p(self,p0,theta):
#sets the injection wave-vector properties, p is amplitude, theta is direction
self.p0=p0
self.theta=theta
self.px=self.p0*np.cos(self.theta)
self.py=self.p0*np.sin(self.theta)
self.phi_p=np.arctan2(self.py,self.px)
self.u10=np.exp(1j*(self.px*self.x+self.py*self.y))
self.u20=np.exp(1j*(self.px*self.xhalf+self.py*self.yhalf))
self.v10=np.exp(1j*(self.px*self.xhalf+self.py*self.y+self.phi_p))
self.v20=np.exp(1j*(self.px*self.x+self.py*self.yhalf+self.phi_p))
def get_pos_mat(self):
#outputs the stiched X,Y coordinates; can be useful for defining boundary conditions
Xtot_out=np.zeros((self.x.shape[0]*2,self.x.shape[1]*2))
Ytot_out=np.zeros((self.x.shape[0]*2,self.x.shape[1]*2))
Xtot_out[::2,::2]=self.xhalf
Xtot_out[1::2,::2]=self.xhalf
Xtot_out[::2,1::2]=self.x
Xtot_out[1::2,1::2]=self.x
Ytot_out[::2,::2]=self.yhalf
Ytot_out[1::2,::2]=self.y
Ytot_out[::2,1::2]=self.yhalf
Ytot_out[1::2,1::2]=self.y
return Xtot_out, Ytot_out
def set_No_prop_mat(self,No_prop_mat):
#imports No_prop_mat and creates logical matrices to define propagation regions
#and their edges
self.No_prop_mat=No_prop_mat!=0.0
self.Nprop_up=~self.No_prop_mat&np.roll(self.No_prop_mat,-1,axis=0)
self.Nprop_down=~self.No_prop_mat&np.roll(self.No_prop_mat,1,axis=0)
self.Nprop_right=~self.No_prop_mat&np.roll(self.No_prop_mat,-1,axis=1)
self.Nprop_left=~self.No_prop_mat&np.roll(self.No_prop_mat,1,axis=1)
temp=np.zeros(self.No_prop_mat.shape,dtype=np.uint8)
for i in range(-1,2,2):
for j in range(-1,2,2):
temp+=~(np.roll(np.roll(self.No_prop_mat,i,axis=0),j,axis=1))
#Border of the no-prop region
self.Nprop_edge=(self.No_prop_mat)*temp
self.rNPu1=self.No_prop_mat[1::2,1::2]
self.rNPu2=self.No_prop_mat[0::2,0::2]
self.rNPv1=self.No_prop_mat[1::2,0::2]
self.rNPv2=self.No_prop_mat[0::2,1::2]
self.ru1_edge=self.Nprop_edge[1::2,1::2]
self.ru2_edge=self.Nprop_edge[0::2,0::2]
self.rv1_edge=self.Nprop_edge[1::2,0::2]
self.rv2_edge=self.Nprop_edge[0::2,1::2]
self.ru1_edge0=self.ru1_edge>0
self.ru2_edge0=self.ru2_edge>0
self.rv1_edge0=self.rv1_edge>0
self.rv2_edge0=self.rv2_edge>0
#Encodes pixel-by-pixel the existence or absence of nearest neighbors
self.ru1_edge=np.zeros((self.rNPu1.shape[0],(self.rNPu1.shape[1]),4),dtype=np.bool)
self.ru1_edge[:,:,0]=self.Nprop_up[1::2,1::2]
self.ru1_edge[:,:,1]=self.Nprop_right[1::2,1::2]
self.ru1_edge[:,:,2]=self.Nprop_down[1::2,1::2]
self.ru1_edge[:,:,3]=self.Nprop_left[1::2,1::2]
self.ru1_edgeX=self.ru1_edge[:,:,1]|self.ru1_edge[:,:,3]
self.ru1_edgeY=self.ru1_edge[:,:,0]|self.ru1_edge[:,:,2]
self.ru2_edge=np.zeros((self.rNPu2.shape[0],(self.rNPu2.shape[1]),4),dtype=np.bool)
self.ru2_edge[:,:,0]=self.Nprop_up[0::2,0::2]
self.ru2_edge[:,:,1]=self.Nprop_right[0::2,0::2]
self.ru2_edge[:,:,2]=self.Nprop_down[0::2,0::2]
self.ru2_edge[:,:,3]=self.Nprop_left[0::2,0::2]
self.ru2_edgeX=self.ru2_edge[:,:,1]|self.ru2_edge[:,:,3]
self.ru2_edgeY=self.ru2_edge[:,:,0]|self.ru2_edge[:,:,2]
self.rv1_edge=np.zeros((self.rNPv1.shape[0],(self.rNPv1.shape[1]),4),dtype=np.bool)
self.rv1_edge[:,:,0]=self.Nprop_up[1::2,0::2]
self.rv1_edge[:,:,1]=self.Nprop_right[1::2,0::2]
self.rv1_edge[:,:,2]=self.Nprop_down[1::2,0::2]
self.rv1_edge[:,:,3]=self.Nprop_left[1::2,0::2]
self.rv1_edgeX=self.rv1_edge[:,:,1]|self.rv1_edge[:,:,3]
self.rv1_edgeY=self.rv1_edge[:,:,0]|self.rv1_edge[:,:,2]
self.rv2_edge=np.zeros((self.rNPv2.shape[0],(self.rNPv2.shape[1]),4),dtype=np.bool)
self.rv2_edge[:,:,0]=self.Nprop_up[0::2,1::2]
self.rv2_edge[:,:,1]=self.Nprop_right[0::2,1::2]
self.rv2_edge[:,:,2]=self.Nprop_down[0::2,1::2]
self.rv2_edge[:,:,3]=self.Nprop_left[0::2,1::2]
self.rv2_edgeX=self.rv2_edge[:,:,1]|self.rv2_edge[:,:,3]
self.rv2_edgeY=self.rv2_edge[:,:,0]|self.rv2_edge[:,:,2]
def set_Absorb_mat(self,Absorb_mat):
self.Absorb_mat=Absorb_mat
self.rAu1=self.Absorb_mat[1::2,1::2]!=0.0
self.rAu2=self.Absorb_mat[0::2,0::2]!=0.0
self.rAv1=self.Absorb_mat[1::2,0::2]!=0.0
self.rAv2=self.Absorb_mat[0::2,1::2]!=0.0
self.rAu1_flat=np.where(self.rAu1)
self.rAu2_flat=np.where(self.rAu2)
self.rAv1_flat=np.where(self.rAv1)
self.rAv2_flat=np.where(self.rAv2)
def set_Drive_mat(self,Drive_mat):
self.Drive_mat=Drive_mat
self.rDu1=self.Drive_mat[1::2,1::2]!=0.0
self.rDu2=self.Drive_mat[0::2,0::2]!=0.0
self.rDv1=self.Drive_mat[1::2,0::2]!=0.0
self.rDv2=self.Drive_mat[0::2,1::2]!=0.0
self.rDu1_flat=np.where(self.rDu1)
self.rDu2_flat=np.where(self.rDu2)
self.rDv1_flat=np.where(self.rDv1)
self.rDv2_flat=np.where(self.rDv2)
def v_step(self):
#steps the v-lattice relative to a fixed u-lattice
self.t+=self.D_t/2.0
#introduce the drive wave to the u-lattice
self.u1[self.rDu1]+=self.Drive_coupling*self.u10[self.rDu1]*np.exp(-1j*self.p0*self.t)
self.u2[self.rDu2]+=self.Drive_coupling*self.u20[self.rDu2]*np.exp(-1j*self.p0*self.t)
#Apply absorption
self.u1[self.rAu1]*=self.Abs_rate
self.u2[self.rAu2]*=self.Abs_rate
#wave equation
if self.massorV:
self.out=(1/(2*1j+self.D_t*self.mminusV1))*((2*1j-self.D_t*self.mminusV1)*self.v1-(2*1j*self.D_t/self.D_x)*(self.u1-np.roll(self.u1,1,axis=1))+(2*self.D_t/self.D_y)*(np.roll(self.u2,-1,axis=0)-self.u2))
self.out2=(1/(2*1j+self.D_t*self.mminusV2))*((2*1j-self.D_t*self.mminusV2)*self.v2-(2*1j*self.D_t/self.D_x)*(np.roll(self.u2,-1,axis=1)-self.u2)+(2*self.D_t/self.D_y)*(self.u1-np.roll(self.u1,1,axis=0)))
else:
self.out=(1/(2*1j))*((2*1j)*self.v1-(2*1j*self.D_t/self.D_x)*(self.u1-np.roll(self.u1,1,axis=1))+(2*self.D_t/self.D_y)*(np.roll(self.u2,-1,axis=0)-self.u2))
self.out2=(1/(2*1j))*((2*1j)*self.v2-(2*1j*self.D_t/self.D_x)*(np.roll(self.u2,-1,axis=1)-self.u2)+(2*self.D_t/self.D_y)*(self.u1-np.roll(self.u1,1,axis=0)))
self.v1=self.out
self.v2=self.out2
def u_step(self):
#steps the u-lattice relative to a fixed v lattice
self.t+=self.D_t/2.0
#Apply absorption
self.v1[self.rAv1]*=self.Abs_rate
self.v2[self.rAv2]*=self.Abs_rate
#wave equation
if self.massorV:
self.out=(~self.rNPu1)*(1/(2*1j-self.D_t*self.mplusV3))*((2*1j+self.D_t*self.mplusV3)*self.u1-(2*1j*self.D_t/self.D_x)*(np.roll(self.v1,-1,axis=1)-self.v1)-(2*self.D_t/self.D_y)*(np.roll(self.v2,-1,axis=0)-self.v2))
self.out2=(~self.rNPu2)*(1/(2*1j-self.D_t*self.mplusV4))*((2*1j+self.D_t*self.mplusV4)*self.u2-(2*1j*self.D_t/self.D_x)*(self.v2-np.roll(self.v2,1,axis=1))-(2*self.D_t/self.D_y)*(self.v1-np.roll(self.v1,1,axis=0)))
else:
self.out=(~self.rNPu1)*(1/(2*1j))*((2*1j)*self.u1-(2*1j*self.D_t/self.D_x)*(np.roll(self.v1,-1,axis=1)-self.v1)-(2*self.D_t/self.D_y)*(np.roll(self.v2,-1,axis=0)-self.v2))
self.out2=(~self.rNPu2)*(1/(2*1j))*((2*1j)*self.u2-(2*1j*self.D_t/self.D_x)*(self.v2-np.roll(self.v2,1,axis=1))-(2*self.D_t/self.D_y)*(self.v1-np.roll(self.v1,1,axis=0)))
self.u1=self.out
self.u2=self.out2
def time_step(self):
#Takes a full time step D_t in two half steps
self.v_step()
self.u_step()
|
arthurbarnard/Dirac_simulation
|
dirac_sheet.py
|
Python
|
gpl-3.0
| 12,611
|
[
"DIRAC"
] |
5289ed6196fee181bf7369c87eb00c6478b99bc0fc6c031b721efe913d610b54
|
import numpy as np
from mayavi import mlab
from BDQuaternions import Conventions, EulerAngles
from BDSpace.Coordinates import Cartesian
import BDSpaceVis as Visual
# Create cartesian coordinate system
convention = Conventions().get_convention('Bunge')
# if you don't pass arguments the basis coincide with 'Absolute' (mayavi) coordinate system
CS_1 = Cartesian(origin=np.array([0, 0, 0]), euler_angles_convention=convention)
CS_2 = Cartesian(origin=np.array([0, 0, 0]), euler_angles_convention=convention)
step_prec = 1.0 # in degrees
step_rot = 1.0 # in degrees
direction_prec = 1
direction_rot = 1
Phi = 30 # tilt in degrees
CS_1.euler_angles = EulerAngles(CS_1.euler_angles.euler_angles + np.array([0, np.deg2rad(Phi), 0]), convention) # tilt the CS
# to visualise the coordinate system basis the module Visual is used
fig = mlab.figure('CS demo', bgcolor=(0, 0, 0)) # Create the mayavi figure
arrows_2, labels_2 = Visual.draw_coordinate_system_axes(fig, CS_2, scale=2, draw_labels=True)
cs_box_1, arrows_1, labels_1 = Visual.draw_coordinate_system_box(fig, CS_1, draw_labels=True)
@mlab.show
@mlab.animate(delay=10)
def anim():
while 1:
delta_eulers = np.array([direction_prec * np.deg2rad(step_prec), 0, direction_rot * np.deg2rad(step_rot)])
CS_1.euler_angles = EulerAngles(CS_1.euler_angles.euler_angles + delta_eulers, convention)
Visual.update_coordinate_system_box(CS_1, cs_box_1, arrows_1, labels_1)
yield
anim()
|
bond-anton/Space_visualization
|
demo/03_euler_angles_precession.py
|
Python
|
apache-2.0
| 1,473
|
[
"Mayavi"
] |
a391af9cb6d4351e253b83c32bef09f4661534bd6d86e38eb04587f8014aa2a6
|
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
def GetRGBColor(colorName):
'''
Return the red, green and blue components for a
color as doubles.
'''
rgb = [0.0, 0.0, 0.0] # black
vtk.vtkNamedColors().GetColorRGB(colorName, rgb)
return rgb
# Create a constrained Delaunay triangulation (i.e., edges and polygons defined)
# Generate the input points and constrained edges/polygons
#
points = vtk.vtkPoints()
points.InsertPoint(0, 1, 4, 0)
points.InsertPoint(1, 3, 4, 0)
points.InsertPoint(2, 7, 4, 0)
points.InsertPoint(3, 11, 4, 0)
points.InsertPoint(4, 13, 4, 0)
points.InsertPoint(5, 13, 8, 0)
points.InsertPoint(6, 13, 12, 0)
points.InsertPoint(7, 10, 12, 0)
points.InsertPoint(8, 7, 12, 0)
points.InsertPoint(9, 4, 12, 0)
points.InsertPoint(10, 1, 12, 0)
points.InsertPoint(11, 1, 8, 0)
points.InsertPoint(12, 3.5, 5, 0)
points.InsertPoint(13, 4.5, 5, 0)
points.InsertPoint(14, 5.5, 8, 0)
points.InsertPoint(15, 6.5, 8, 0)
points.InsertPoint(16, 6.5, 5, 0)
points.InsertPoint(17, 7.5, 5, 0)
points.InsertPoint(18, 7.5, 8, 0)
points.InsertPoint(19, 9, 8, 0)
points.InsertPoint(20, 9, 5, 0)
points.InsertPoint(21, 10, 5, 0)
points.InsertPoint(22, 10, 7, 0)
points.InsertPoint(23, 11, 5, 0)
points.InsertPoint(24, 12, 5, 0)
points.InsertPoint(25, 10.5, 8, 0)
points.InsertPoint(26, 12, 11, 0)
points.InsertPoint(27, 11, 11, 0)
points.InsertPoint(28, 10, 9, 0)
points.InsertPoint(29, 10, 11, 0)
points.InsertPoint(30, 9, 11, 0)
points.InsertPoint(31, 9, 9, 0)
points.InsertPoint(32, 7.5, 9, 0)
points.InsertPoint(33, 7.5, 11, 0)
points.InsertPoint(34, 6.5, 11, 0)
points.InsertPoint(35, 6.5, 9, 0)
points.InsertPoint(36, 5, 9, 0)
points.InsertPoint(37, 4, 6, 0)
points.InsertPoint(38, 3, 9, 0)
points.InsertPoint(39, 2, 9, 0)
polys = vtk.vtkCellArray()
polys.InsertNextCell(12)
polys.InsertCellPoint(0)
polys.InsertCellPoint(1)
polys.InsertCellPoint(2)
polys.InsertCellPoint(3)
polys.InsertCellPoint(4)
polys.InsertCellPoint(5)
polys.InsertCellPoint(6)
polys.InsertCellPoint(7)
polys.InsertCellPoint(8)
polys.InsertCellPoint(9)
polys.InsertCellPoint(10)
polys.InsertCellPoint(11)
polys.InsertNextCell(28)
polys.InsertCellPoint(39)
polys.InsertCellPoint(38)
polys.InsertCellPoint(37)
polys.InsertCellPoint(36)
polys.InsertCellPoint(35)
polys.InsertCellPoint(34)
polys.InsertCellPoint(33)
polys.InsertCellPoint(32)
polys.InsertCellPoint(31)
polys.InsertCellPoint(30)
polys.InsertCellPoint(29)
polys.InsertCellPoint(28)
polys.InsertCellPoint(27)
polys.InsertCellPoint(26)
polys.InsertCellPoint(25)
polys.InsertCellPoint(24)
polys.InsertCellPoint(23)
polys.InsertCellPoint(22)
polys.InsertCellPoint(21)
polys.InsertCellPoint(20)
polys.InsertCellPoint(19)
polys.InsertCellPoint(18)
polys.InsertCellPoint(17)
polys.InsertCellPoint(16)
polys.InsertCellPoint(15)
polys.InsertCellPoint(14)
polys.InsertCellPoint(13)
polys.InsertCellPoint(12)
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
polyData.SetPolys(polys)
# triangulate them
#
del1 = vtk.vtkDelaunay2D()
del1.SetInputData(polyData)
del1.SetSourceData(polyData)
mapMesh = vtk.vtkPolyDataMapper()
mapMesh.SetInputConnection(del1.GetOutputPort())
meshActor = vtk.vtkActor()
meshActor.SetMapper(mapMesh)
# tubes around mesh
extract = vtk.vtkExtractEdges()
extract.SetInputConnection(del1.GetOutputPort())
tubes = vtk.vtkTubeFilter()
tubes.SetInputConnection(extract.GetOutputPort())
tubes.SetRadius(0.1)
tubes.SetNumberOfSides(6)
mapEdges = vtk.vtkPolyDataMapper()
mapEdges.SetInputConnection(tubes.GetOutputPort())
edgeActor = vtk.vtkActor()
edgeActor.SetMapper(mapEdges)
edgeActor.GetProperty().SetColor(GetRGBColor('peacock'))
edgeActor.GetProperty().SetSpecularColor(1, 1, 1)
edgeActor.GetProperty().SetSpecular(0.3)
edgeActor.GetProperty().SetSpecularPower(20)
edgeActor.GetProperty().SetAmbient(0.2)
edgeActor.GetProperty().SetDiffuse(0.8)
# Create graphics objects
# Create the rendering window, renderer, and interactive renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren1.AddActor(meshActor)
ren1.AddActor(edgeActor)
ren1.ResetCamera()
ren1.SetBackground(0, 0, 0)
renWin.SetSize(450, 300)
# render the image
#
ren1.GetActiveCamera().Zoom(2)
iren.Initialize()
#iren.Start()
|
hlzz/dotfiles
|
graphics/VTK-7.0.0/Filters/Core/Testing/Python/constrainedDelaunay.py
|
Python
|
bsd-3-clause
| 4,603
|
[
"VTK"
] |
296496da2c7ecfc67fa591175632ddc511b12f8c843afde6d2d65c576bcbdcc1
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
This module provides classes for plotting Pourbaix objects.
"""
import six
from six.moves import map
from six.moves import zip
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Sai Jayaraman"
__email__ = "sjayaram@mit.edu"
__status__ = "Production"
__date__ = "Jan 26, 2012"
import numpy as np
import re
import collections
from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer
from pymatgen.analysis.pourbaix.maker import PREFAC
from pymatgen.analysis.pourbaix.entry import MultiEntry
from pymatgen.phasediagram.plotter import uniquelines
from pymatgen.util.string_utils import latexify
from pymatgen.util.plotting_utils import get_publication_quality_plot
from pymatgen.util.coord_utils import in_coord_list
class PourbaixPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: A PhaseDiagram object.
show_unstable: Whether unstable phases will be plotted as well as
red crosses. Defaults to False.
"""
def __init__(self, pourbaixdiagram, show_unstable=False):
self._pd = pourbaixdiagram
self.lines = uniquelines(self._pd.facets)
self.show_unstable = show_unstable
@property
def pourbaix_hull_plot_data(self):
"""
Pourbaix diagram convex hull data.
Returns:
(lines, stable_entries, unstable_entries)
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
facetlines = self.lines
lines = list()
stable_entries = dict()
for line in facetlines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
x = [data[line[0]][0], data[line[1]][0]]
y = [data[line[0]][1], data[line[1]][1]]
z = [data[line[0]][2], data[line[1]][2]]
coord = [x, y, z]
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
allentries = pd.all_entries
alldata = np.array(pd.qhull_data)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(len(allentries)):
entry = allentries[i]
if entry not in stable:
x = [alldata[i][0], alldata[i][0]]
y = [alldata[i][1], alldata[i][1]]
z = [alldata[i][2], alldata[i][2]]
coord = [x, y, z]
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def show(self, label_stable=True, label_unstable=False, filename=""):
"""
Draws the convex hull diagram using Matplotlib and show it.
"""
plt = self._get_plot(label_stable=label_stable,
label_unstable=label_unstable)
if filename == "":
plt.show()
else:
plt.savefig(filename, bbox_inches=0)
def _get_plot(self, label_stable=True, label_unstable=False):
"""
Plot convex hull of Pourbaix Diagram entries
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(14)
(lines, labels, unstable) = self.pourbaix_hull_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = self.print_name(entry)
if label_stable:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(
count, latexify_ion(latexify(label))))
count += 1
if label_unstable:
for entry in unstable.keys():
label = self.print_name(entry)
coords = unstable[entry]
ax.plot([coords[0], coords[0]], [coords[1], coords[1]],
[coords[2], coords[2]], "bo", markerfacecolor="g",
markersize=10)
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(
count, latexify_ion(latexify(label))))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
plt.xlabel("pH")
plt.ylabel("V")
return plt
def plot_planes(self):
"""
Plot the free energy planes as a function of pH and V
"""
if self.show_unstable:
entries = self._pd._all_entries
else:
entries = self._pd.stable_entries
num_plots = len(entries)
import matplotlib.pyplot as plt
colormap = plt.cm.gist_ncar
fig = plt.figure().gca(projection='3d')
color_array = [colormap(i) for i in np.linspace(0, 0.9, num_plots)]
labels = []
color_index = -1
for entry in entries:
normal = np.array([-PREFAC * entry.npH, -entry.nPhi, +1])
d = entry.g0
color_index += 1
pH, V = np.meshgrid(np.linspace(-10, 28, 100),
np.linspace(-3, 3, 100))
g = (-normal[0] * pH - normal[1] * V + d) / normal[2]
lbl = latexify_ion(
latexify(entry._entry.composition.reduced_formula))
labels.append(lbl)
fig.plot_surface(pH, V, g, color=color_array[color_index],
label=lbl)
plt.legend(labels)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.show()
def plot_chempot_range_map(self, limits=None, title="", filename=""):
self.plot_pourbaix(limits, title, filename)
def plot_pourbaix(self, limits=None, title="", filename="", label_domains=True):
plt = self.get_pourbaix_plot(limits=limits, title=title, label_domains=label_domains)
if filename == "":
plt.show()
else:
f = plt.gcf()
f.set_size_inches((11.5, 9))
plt.tight_layout(pad=1.09)
def pourbaix_plot_data(self, limits=None):
"""
Get data required to plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
Returns:
stable_entries, unstable_entries
stable_entries: dict of lines. The keys are Pourbaix Entries, and
lines are in the form of a list
unstable_entries: list of unstable entries
"""
analyzer = PourbaixAnalyzer(self._pd)
self._analyzer = analyzer
if limits:
analyzer.chempot_limits = limits
chempot_ranges = analyzer.get_chempot_range_map(limits)
self.chempot_ranges = chempot_ranges
stable_entries_list = collections.defaultdict(list)
for entry in chempot_ranges:
for line in chempot_ranges[entry]:
x = [line.coords[0][0], line.coords[1][0]]
y = [line.coords[0][1], line.coords[1][1]]
coords = [x, y]
stable_entries_list[entry].append(coords)
unstable_entries_list = [entry for entry in self._pd.all_entries
if entry not in self._pd.stable_entries]
return stable_entries_list, unstable_entries_list
def get_center(self, lines):
"""
Returns coordinates of center of a domain. Useful
for labeling a Pourbaix plot.
Args:
lines:
Lines corresponding to a domain
limits:
Limits of Pourbaix diagram
Returns:
center_x, center_y:
x,y coordinate of center of domain. If domain lies
outside limits, center will lie on the boundary.
"""
center_x = 0.0
center_y = 0.0
coords = []
count_center = 0.0
for line in lines:
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
center_x += cx
center_y += cy
count_center += 1.0
if count_center == 0.0:
count_center = 1.0
center_x /= count_center
center_y /= count_center
return center_x, center_y
def get_pourbaix_plot(self, limits=None, title="", label_domains=True):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
Returns:
plt:
matplotlib plot object
"""
# plt = get_publication_quality_plot(24, 14.4)
plt = get_publication_quality_plot(16)
(stable, unstable) = self.pourbaix_plot_data(limits)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, lines in stable.items():
center_x = 0.0
center_y = 0.0
coords = []
count_center = 0.0
for line in lines:
(x, y) = line
plt.plot(x, y, "k-", linewidth=lw)
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
center_x += cx
center_y += cy
count_center += 1.0
if count_center == 0.0:
count_center = 1.0
center_x /= count_center
center_y /= count_center
if ((center_x <= xlim[0]) | (center_x >= xlim[1]) |
(center_y <= ylim[0]) | (center_y >= ylim[1])):
continue
xy = (center_x, center_y)
if label_domains:
plt.annotate(self.print_name(entry), xy, fontsize=20, color="b")
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def print_name(self, entry):
"""
Print entry name if single, else print multientry
"""
str_name = ""
if isinstance(entry, MultiEntry):
if len(entry.entrylist) > 2:
return str(self._pd.qhull_entries.index(entry))
for e in entry.entrylist:
str_name += latexify_ion(latexify(e.name)) + " + "
str_name = str_name[:-3]
return str_name
else:
return latexify_ion(latexify(entry.name))
def legend(self, label_unstable=False, legend_file=""):
if self._pd._multielement:
unprocessed_entries = self._pd.unprocessed_entries
set_of_entries = set()
list_of_entries = {}
for entry in self._pd.stable_entries:
index_ent = self._pd.qhull_entries.index(entry)
str_ename = ""
for e in entry.entrylist:
str_ename += e.name + " + "
for ent in unprocessed_entries:
if ent.name == e.name:
indx = unprocessed_entries.index(ent)
set_of_entries.add(indx)
continue
str_ename = str_ename[:-3]
list_of_entries[index_ent] = str_ename
if label_unstable:
for entry in [entry for entry in self._pd.all_entries
if entry not in self._pd.stable_entries]:
for e in entry.entrylist:
indx = unprocessed_entries.index(e)
set_of_entries.add(indx)
str_labels = " Species: \n"
if legend_file:
f = open(legend_file, 'w')
for i in list_of_entries.keys():
str_labels += str(i) + " : " + list_of_entries[i] + "\n"
f.write(str_labels)
f.close()
return str_labels
def write_image(self, plt, stream, image_format="svg"):
"""
Writes the phase diagram to an image in a stream.
Args:
plt:
matplotlib plot
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
"""
f = plt.gcf()
f.set_size_inches((12, 10))
plt.tight_layout(pad=1.09)
plt.savefig(stream, format=image_format)
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
if entry not in self._analyzer.pourbaix_domain_vertices.keys():
return []
return self._analyzer.pourbaix_domain_vertices[entry]
def get_pourbaix_plot_colorfill_by_element(self, limits=None, title="",
label_domains=True, element=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
entry_dict_of_multientries = collections.defaultdict(list)
plt = get_publication_quality_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_color = ['#FFFFA0', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
hatch = ['/', '\\', '|', '-', '+', 'o', '*']
(stable, unstable) = self.pourbaix_plot_data(limits)
num_of_overlaps = {key: 0 for key in stable.keys()}
for entry in stable:
if isinstance(entry, MultiEntry):
for e in entry.entrylist:
if element in e.composition.elements:
entry_dict_of_multientries[e.name].append(entry)
num_of_overlaps[entry] += 1
else:
entry_dict_of_multientries[entry.name].append(entry)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
from pymatgen import Composition, Element
from pymatgen.core.ion import Ion
def len_elts(entry):
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
return len([el for el in comp.elements if el not in
[Element("H"), Element("O")]])
sorted_entry = entry_dict_of_multientries.keys()
sorted_entry.sort(key=len_elts)
i = -1
label_chr = map(chr, list(range(65, 91)))
for entry in sorted_entry:
color_indx = 0
x_coord = 0.0
y_coord = 0.0
npts = 0
i += 1
for e in entry_dict_of_multientries[entry]:
hc = 0
fc = 0
bc = 0
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
x_coord += c[0]
y_coord += c[1]
npts += 1
color_indx = i
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
if len([el for el in comp.elements if el not in
[Element("H"), Element("O")]]) == 1:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
patch = Polygon(xy, facecolor=optim_colors[color_indx],
closed=True, lw=3.0, fill=True)
bc = optim_colors[color_indx]
else:
if color_indx >= len(hatch):
color_indx = color_indx - int(color_indx / len(hatch)) * len(hatch)
patch = Polygon(xy, hatch=hatch[color_indx], closed=True, lw=3.0, fill=False)
hc = hatch[color_indx]
ax.add_patch(patch)
xy_center = (x_coord / npts, y_coord / npts)
if label_domains:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
fc = optim_font_color[color_indx]
if bc and not hc:
bbox = dict(boxstyle="round", fc=fc)
if hc and not bc:
bc = 'k'
fc = 'w'
bbox = dict(boxstyle="round", hatch=hc, fill=False)
if bc and hc:
bbox = dict(boxstyle="round", hatch=hc, fc=fc)
# bbox.set_path_effects([PathEffects.withSimplePatchShadow()])
plt.annotate(latexify_ion(latexify(entry)), xy_center,
color=bc, fontsize=30, bbox=bbox)
# plt.annotate(label_chr[i], xy_center,
# color=bc, fontsize=30, bbox=bbox)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def get_pourbaix_mark_passive(self, limits=None, title="", label_domains=True, passive_entry=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
from pymatgen import Element
from itertools import chain
import operator
plt = get_publication_quality_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_colors = ['#FFC000', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
(stable, unstable) = self.pourbaix_plot_data(limits)
mark_passive = {key: 0 for key in stable.keys()}
if self._pd._elt_comp:
maxval = max(six.iteritems(self._pd._elt_comp), key=operator.itemgetter(1))[1]
key = [k for k, v in self._pd._elt_comp.items() if v == maxval]
passive_entry = key[0]
def list_elts(entry):
elts_list = set()
if isinstance(entry, MultiEntry):
for el in chain.from_iterable([[el for el in e.composition.elements]
for e in entry.entrylist]):
elts_list.add(el)
else:
elts_list = entry.composition.elements
return elts_list
for entry in stable:
if passive_entry + str("(s)") in entry.name:
mark_passive[entry] = 2
continue
if "(s)" not in entry.name:
continue
elif len(set([Element("O"), Element("H")]).intersection(set(list_elts(entry)))) > 0:
mark_passive[entry] = 1
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
for e in stable.keys():
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
if mark_passive[e] == 1:
color = optim_colors[0]
fontcolor = optim_font_colors[0]
colorfill = True
elif mark_passive[e] == 2:
color = optim_colors[1]
fontcolor = optim_font_colors[1]
colorfill = True
else:
color = "w"
colorfill = False
fontcolor = "k"
patch = Polygon(xy, facecolor=color, closed=True, lw=3.0, fill=colorfill)
ax.add_patch(patch)
if label_domains:
plt.annotate(self.print_name(e), c, color=fontcolor, fontsize=20)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def latexify_ion(formula):
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
|
sonium0/pymatgen
|
pymatgen/analysis/pourbaix/plotter.py
|
Python
|
mit
| 24,142
|
[
"pymatgen"
] |
43472bcd65dac1448988b34ccc25e8930541096bd82504a9f9af30e55607a8f4
|
# Orca
#
# Copyright (C) 2013-2014 Igalia, S.L.
#
# Author: Joanmarie Diggs <jdiggs@igalia.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2013-2014 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import orca.orca as orca
import orca.orca_state as orca_state
import orca.scripts.default as default
from .script_utilities import Utilities
class Script(default.Script):
def __init__(self, app):
default.Script.__init__(self, app)
def getUtilities(self):
return Utilities(self)
def onActiveDescendantChanged(self, event):
"""Callback for object:active-descendant-changed accessibility events."""
role = event.source.getRole()
try:
focusedRole = orca_state.locusOfFocus.getRole()
except:
pass
else:
# This is very likely typeahead search and not a real focus change.
tableRoles = [pyatspi.ROLE_TABLE, pyatspi.ROLE_TREE_TABLE]
if focusedRole == pyatspi.ROLE_TEXT and role in tableRoles:
orca.setLocusOfFocus(event, event.source, False)
default.Script.onActiveDescendantChanged(self, event)
def onFocus(self, event):
"""Callback for focus: accessibility events."""
# NOTE: This event type is deprecated and Orca should no longer use it.
# This callback remains just to handle bugs in applications and toolkits
# during the remainder of the unstable (3.11) development cycle.
role = event.source.getRole()
# https://bugzilla.gnome.org/show_bug.cgi?id=711397
if role == pyatspi.ROLE_COMBO_BOX:
orca.setLocusOfFocus(event, event.source)
return
# The above issue also seems to happen with spin buttons.
if role == pyatspi.ROLE_SPIN_BUTTON:
orca.setLocusOfFocus(event, event.source)
return
# https://bugzilla.gnome.org/show_bug.cgi?id=720987
if role == pyatspi.ROLE_TABLE_COLUMN_HEADER:
orca.setLocusOfFocus(event, event.source)
return
# https://bugzilla.gnome.org/show_bug.cgi?id=720989
if role == pyatspi.ROLE_MENU == event.source.parent.getRole():
orca.setLocusOfFocus(event, event.source)
return
# Unfiled. But this happens when you are in gtk-demo's application demo,
# get into a menu and then press Escape. The text widget emits a focus:
# event, but not a state-changed:focused event.
#
# A similar issue can be seen when a text widget starts out having
# focus, such as in the old gnome-screensaver dialog.
if role in [pyatspi.ROLE_TEXT, pyatspi.ROLE_PASSWORD_TEXT]:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled. When a context menu first appears and an item is already
# selected, we get a focus: event for that menu item, but there is
# not a state-changed event for that item, nor a selection-changed
# event for the menu.
menuItems = [pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM]
if role in menuItems:
if orca_state.locusOfFocus \
and orca_state.locusOfFocus.parent != event.source.parent:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled, but in at least some dialogs, the first time a push
# button gains focus, we only get a focus: event for it.
# Seems to happen for checkboxes too. This is why we can't have
# nice things.
if role in [pyatspi.ROLE_PUSH_BUTTON, pyatspi.ROLE_CHECK_BOX]:
orca.setLocusOfFocus(event, event.source)
return
# Unfiled, but yet another case of only getting a focus: event when
# a widget appears in a parent container and is already focused.
if role == pyatspi.ROLE_TABLE:
obj = event.source
selectedChildren = self.utilities.selectedChildren(obj)
if selectedChildren:
obj = selectedChildren[0]
orca.setLocusOfFocus(event, obj)
return
def onTextSelectionChanged(self, event):
"""Callback for object:text-selection-changed accessibility events."""
obj = event.source
if not self.utilities.isSameObject(obj, orca_state.locusOfFocus):
return
default.Script.onTextSelectionChanged(self, event)
|
chrys87/orca-beep
|
src/orca/scripts/toolkits/GAIL/script.py
|
Python
|
lgpl-2.1
| 5,302
|
[
"ORCA"
] |
fc74a08f12daee81d78b2c4df43ff0d4f63446043bb058ab1b6d65fd168fcbdd
|
#
# Copyright 2014, 2018 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import numpy as np
from scipy.interpolate import interp1d
import ase.io
from ase.io.netcdftrajectory import NetCDFTrajectory
from ase.atoms import Atoms
from ase.md import VelocityVerlet
from ase.optimize.fire import FIRE
from matscipy.fracture_mechanics.idealbrittlesolid import (IdealBrittleSolid,
triangular_lattice_slab,
find_crack_tip,
set_initial_velocities,
set_constraints,
extend_strip)
from matscipy.fracture_mechanics.crack import (thin_strip_displacement_y,
ConstantStrainRate)
from matscipy.numerical import numerical_forces
sys.path.insert(0, '.')
import params
calc = IdealBrittleSolid(rc=params.rc, k=params.k, a=params.a, beta=params.beta)
x_dimer = np.linspace(params.a-(params.rc-params.a),
params.a+1.1*(params.rc-params.a),51)
dimers = [Atoms('Si2', [(0, 0, 0), (x, 0, 0)],
cell=[10., 10., 10.], pbc=True) for x in x_dimer]
calc.set_reference_crystal(dimers[0])
e_dimer = []
f_dimer = []
f_num = []
for d in dimers:
d.set_calculator(calc)
e_dimer.append(d.get_potential_energy())
f_dimer.append(d.get_forces())
f_num.append(numerical_forces(d))
e_dimer = np.array(e_dimer)
f_dimer = np.array(f_dimer)
f_num = np.array(f_num)
assert abs(f_dimer - f_num).max() < 0.1
crystal = triangular_lattice_slab(params.a, 3*params.N, params.N)
calc.set_reference_crystal(crystal)
crystal.set_calculator(calc)
e0 = crystal.get_potential_energy()
l = crystal.cell[0,0]
h = crystal.cell[1,1]
print 'l=', l, 'h=', h
# compute surface (Griffith) energy
b = crystal.copy()
b.set_calculator(calc)
shift = calc.parameters['rc']*2
y = crystal.positions[:, 1]
b.positions[y > h/2, 1] += shift
b.cell[1, 1] += shift
e1 = b.get_potential_energy()
E_G = (e1 - e0)/l
print 'Griffith energy', E_G
# compute Griffith strain
eps = 0.0 # initial strain is zero
eps_max = 2/np.sqrt(3)*(params.rc-params.a)*np.sqrt(params.N-1)/h # Griffith strain assuming harmonic energy
deps = eps_max/100. # strain increment
e_over_l = 0.0 # initial energy per unit length is zero
energy = []
strain = []
while e_over_l < E_G:
c = crystal.copy()
c.set_calculator(calc)
c.positions[:, 1] *= (1.0 + eps)
c.cell[1,1] *= (1.0 + eps)
e_over_l = c.get_potential_energy()/l
energy.append(e_over_l)
strain.append(eps)
eps += deps
energy = np.array(energy)
eps_of_e = interp1d(energy, strain, kind='linear')
eps_G = eps_of_e(E_G)
print 'Griffith strain', eps_G
c = crystal.copy()
c.info['E_G'] = E_G
c.info['eps_G'] = eps_G
# open up the cell along x and y by introducing some vaccum
orig_cell_width = c.cell[0, 0]
orig_cell_height = c.cell[1, 1]
c.center(params.vacuum, axis=0)
c.center(params.vacuum, axis=1)
# centre the slab on the origin
c.positions[:, 0] -= c.positions[:, 0].mean()
c.positions[:, 1] -= c.positions[:, 1].mean()
c.info['cell_origin'] = [-c.cell[0,0]/2, -c.cell[1,1]/2, 0.0]
ase.io.write('crack_1.xyz', c, format='extxyz')
width = (c.positions[:, 0].max() -
c.positions[:, 0].min())
height = (c.positions[:, 1].max() -
c.positions[:, 1].min())
c.info['OrigHeight'] = height
print(('Made slab with %d atoms, original width and height: %.1f x %.1f A^2' %
(len(c), width, height)))
top = c.positions[:, 1].max()
bottom = c.positions[:, 1].min()
left = c.positions[:, 0].min()
right = c.positions[:, 0].max()
crack_seed_length = 0.3*width
strain_ramp_length = 5.0*params.a
delta_strain = params.strain_rate*params.dt
# fix top and bottom rows, and setup Stokes damping mask
# initial use constant strain
set_constraints(c, params.a)
# apply initial displacment field
c.positions[:, 1] += thin_strip_displacement_y(
c.positions[:, 0],
c.positions[:, 1],
params.delta*eps_G,
left + crack_seed_length,
left + crack_seed_length +
strain_ramp_length)
print('Applied initial load: delta=%.2f strain=%.4f' %
(params.delta, params.delta*eps_G))
ase.io.write('crack_2.xyz', c, format='extxyz')
c.set_calculator(calc)
# relax initial structure
#opt = FIRE(c)
#opt.run(fmax=1e-3)
ase.io.write('crack_3.xyz', c, format='extxyz')
dyn = VelocityVerlet(c, params.dt, logfile=None)
set_initial_velocities(dyn.atoms)
crack_pos = []
traj = NetCDFTrajectory('traj.nc', 'w', c)
dyn.attach(traj.write, 10, dyn.atoms, arrays=['stokes', 'momenta'])
dyn.attach(find_crack_tip, 10, dyn.atoms,
dt=params.dt*10, store=True, results=crack_pos)
# run for 2000 time steps to reach steady state at initial load
for i in range(20):
dyn.run(100)
if extend_strip(dyn.atoms, params.a, params.N, params.M, params.vacuum):
set_constraints(dyn.atoms, params.a)
# start decreasing strain
#set_constraints(dyn.atoms, params.a, delta_strain=delta_strain)
strain_atoms = ConstantStrainRate(dyn.atoms.info['OrigHeight'],
delta_strain)
dyn.attach(strain_atoms.apply_strain, 1, dyn.atoms)
for i in range(1000):
dyn.run(100)
if extend_strip(dyn.atoms, params.a, params.N, params.M, params.vacuum):
set_constraints(dyn.atoms, params.a)
traj.close()
time = 10.0*dyn.dt*np.arange(dyn.get_number_of_steps()/10)
np.savetxt('crackpos.dat', np.c_[time, crack_pos])
|
libAtoms/matscipy
|
scripts/fracture_mechanics/run_ideal_brittle_solid.py
|
Python
|
lgpl-2.1
| 6,486
|
[
"ASE",
"CRYSTAL",
"Matscipy"
] |
77dfaf298b340c9eec76184b23c02ea268c80572a708bdc83d8fa6bef22a9b5d
|
from lib_spm import *
from scipy.stats import norm
#out_dir = os.path.join('/data42s/comparat/firefly/v1_1_0/figures', 'mass-redshift-presentation')
out_dir = os.path.join(os.environ['HOME'], 'wwwDir', 'firefly')
def plotDIFF(imf_ref, imf_1, imf_2, m_bins = n.arange(-10., 10., 0.1)):
print('eboss')
stellar_mass = imf_ref+'stellar_mass'
age = imf_ref+'age_lightW'
metal = imf_ref+'metallicity_lightW'
# selections
redshift_reliable_boss = (boss['CLASS_NOQSO'] == "GALAXY") & ( boss['Z_ERR_NOQSO'] > 0.0) & (boss['ZWARNING_NOQSO'] == 0) & (boss['Z_NOQSO']>0.001) & (boss['Z_NOQSO'] > boss['Z_ERR_NOQSO'] ) # (boss['SN_MEDIAN_ALL'] > 0.1 ) &
error_reliable_boss = (boss[stellar_mass+'_up_1sig'] > boss[stellar_mass+'_low_1sig'] ) & (boss[stellar_mass+'_up_1sig'] > 0. ) & ( boss[stellar_mass+'_low_1sig'] > 0. ) & (boss[stellar_mass+'_up_1sig'] < 1e14 ) & ( boss[stellar_mass+'_low_1sig'] < 1e14 )
mass_reliable_boss_02 = (boss[stellar_mass] > 1e6 ) & ( boss[stellar_mass] < 1e14 ) & ((n.log10(boss[stellar_mass+'_up_1sig']) - n.log10(boss[stellar_mass+'_low_1sig']))/2. < 0.2 )
mass_reliable_boss_04 = (boss[stellar_mass] > 1e6 ) & ( boss[stellar_mass] < 1e14 ) & ((n.log10(boss[stellar_mass+'_up_1sig']) - n.log10(boss[stellar_mass+'_low_1sig']))/2. < 0.4)
ok_boss_02 = (error_reliable_boss) & (mass_reliable_boss_02) & (redshift_reliable_boss)
ok_boss_04 = (error_reliable_boss) & (mass_reliable_boss_04) & (redshift_reliable_boss)
# defines quantities
A_04_ref = boss[age][ok_boss_04]
M_04_ref = boss[stellar_mass][ok_boss_04]
Z_04_ref = boss[metal][ok_boss_04]
# defines errors
eM_04_ref = (boss[stellar_mass+'_up_1sig'][ok_boss_04]-boss[stellar_mass+'_low_1sig'][ok_boss_04])/2.
eA_04_ref = (boss[age+'_up_1sig'][ok_boss_04]-boss[age+'_low_1sig'][ok_boss_04])/2.
eZ_04_ref = (boss[metal+'_up_1sig'][ok_boss_04]-boss[metal+'_low_1sig'][ok_boss_04])/2.
# quantity to compare to
stellar_mass = imf_1+'stellar_mass'
age = imf_1+'age_lightW'
metal = imf_1+'metallicity_lightW'
A_04_1 = boss[age][ok_boss_04]
M_04_1 = boss[stellar_mass][ok_boss_04]
Z_04_1 = boss[metal][ok_boss_04]
eM_04_1 = (boss[stellar_mass+'_up_1sig'][ok_boss_04]-boss[stellar_mass+'_low_1sig'][ok_boss_04])/2.
eA_04_1 = (boss[age+'_up_1sig'][ok_boss_04]-boss[age+'_low_1sig'][ok_boss_04])/2.
eZ_04_1 = (boss[metal+'_up_1sig'][ok_boss_04]-boss[metal+'_low_1sig'][ok_boss_04])/2.
# second comparison
stellar_mass = imf_2+'stellar_mass'
age = imf_2+'age_lightW'
metal = imf_2+'metallicity_lightW'
A_04_2 = boss[age][ok_boss_04]
M_04_2 = boss[stellar_mass][ok_boss_04]
Z_04_2 = boss[metal][ok_boss_04]
eM_04_2 = (boss[stellar_mass+'_up_1sig'][ok_boss_04]-boss[stellar_mass+'_low_1sig'][ok_boss_04])/2.
eA_04_2 = (boss[age+'_up_1sig'][ok_boss_04]-boss[age+'_low_1sig'][ok_boss_04])/2.
eZ_04_2 = (boss[metal+'_up_1sig'][ok_boss_04]-boss[metal+'_low_1sig'][ok_boss_04])/2.
# normalized comparison ratio
delta_m1 = (M_04_1-M_04_ref)*(eM_04_ref**2.+eM_04_1**2.)**(-0.5)
delta_m2 = (M_04_2-M_04_ref)*(eM_04_ref**2.+eM_04_2**2.)**(-0.5)
# figure
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
p.hist(delta_m1, bins=m_bins, histtype='step', label=imf_1.split('_')[1]+"-"+imf_ref.split('_')[1] , normed=True )
p.hist(delta_m2, bins=m_bins, histtype='step', label=imf_2.split('_')[1]+"-"+imf_ref.split('_')[1] , normed=True )
p.plot(m_bins, norm.pdf(m_bins, loc=0, scale=1), label='N(0,1)', ls='dashed')
p.ylabel('normed distribution')
p.xlabel(r'$(M_1-M_{ref})/\sqrt{\sigma^2_{M_1}+\sigma^2_{M_{ref}}}$')
p.title('eBOSS '+imf_ref.split('_')[0])
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-7.5, 7.5))
p.grid()
p.savefig(os.path.join(out_dir, "delta_M_distribution_eboss"+imf_ref.split('_')[0]+".png" ))
p.clf()
# normalized comparison ratio
delta_m1 = (A_04_1-A_04_ref)*(eA_04_ref**2.+eA_04_1**2.)**(-0.5)
delta_m2 = (A_04_2-A_04_ref)*(eA_04_ref**2.+eA_04_2**2.)**(-0.5)
# figure
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
bad = (delta_m1==-n.inf)|(delta_m1==n.inf)
p.hist(delta_m1[bad==False], bins=m_bins, histtype='step', label=imf_1.split('_')[1]+"-"+imf_ref.split('_')[1] , normed=True )
bad = (delta_m2==-n.inf)|(delta_m2==n.inf)
p.hist(delta_m2[bad==False], bins=m_bins, histtype='step', label=imf_2.split('_')[1]+"-"+imf_ref.split('_')[1] , normed=True )
p.plot(m_bins, norm.pdf(m_bins, loc=0, scale=1), label='N(0,1)', ls='dashed')
p.ylabel('normed distribution')
p.xlabel(r'$(age_1-age_{ref})/\sqrt{\sigma^2_{age_1}+\sigma^2_{age_{ref}}}$')
#p.yscale('log')
p.title('eBOSS '+imf_ref.split('_')[0])
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-7.5, 7.5))
p.grid()
p.savefig(os.path.join(out_dir, "delta_A_distribution_eboss"+imf_ref.split('_')[0]+".png" ))
p.clf()
# normalized comparison ratio
delta_m1 = (Z_04_1-Z_04_ref)*(eZ_04_ref**2.+eZ_04_1**2.)**(-0.5)
delta_m2 = (Z_04_2-Z_04_ref)*(eZ_04_ref**2.+eZ_04_2**2.)**(-0.5)
# figure
p.figure(2, (6.5, 3.5))
p.axes([0.12,0.18,0.8,0.73])
bad = (delta_m1==-n.inf)|(delta_m1==n.inf)|n.isnan(delta_m1)
p.hist(delta_m1[bad==False], bins=m_bins, histtype='step', label=imf_1.split('_')[1]+"-"+imf_ref.split('_')[1] , normed=True )
bad = (delta_m2==-n.inf)|(delta_m2==n.inf)|n.isnan(delta_m2)
p.hist(delta_m2[bad==False], bins=m_bins, histtype='step', label=imf_2.split('_')[1]+"-"+imf_ref.split('_')[1] , normed=True )
p.plot(m_bins, norm.pdf(m_bins, loc=0, scale=1), label='N(0,1)', ls='dashed')
p.ylabel('normed distribution')
p.xlabel(r'$(Z_1-Z_{ref})/\sqrt{\sigma^2_{Z_1}+\sigma^2_{Z_{ref}}}$')
#p.yscale('log')
p.title('eBOSS '+imf_ref.split('_')[0])
p.legend(loc=2, frameon = False, fontsize=11)
p.xlim((-7.5, 7.5))
p.grid()
p.savefig(os.path.join(out_dir, "delta_Z_distribution_eboss"+imf_ref.split('_')[0]+".png" ))
p.clf()
plotDIFF( imf_ref = imfs[0], imf_1 = imfs[1], imf_2 = imfs[2], m_bins = n.arange(-10., 10., 0.1))
plotDIFF( imf_ref = imfs[3], imf_1 = imfs[4], imf_2 = imfs[5], m_bins = n.arange(-10., 10., 0.1))
plotDIFF( imf_ref = imfs[6], imf_1 = imfs[7], imf_2 = imfs[8], m_bins = n.arange(-10., 10., 0.1))
|
JohanComparat/pySU
|
spm/bin_SMF/plot_differences_age.py
|
Python
|
cc0-1.0
| 6,094
|
[
"Firefly",
"Galaxy"
] |
f0abc5d41ea1a3c4c95b423ebb9aa9de6ac369b7d7bf124de9fbdda4b1e64271
|
#!/usr/bin/env python
from __future__ import division
import unittest
import numpy as np
import warnings
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import PointGroup, SpaceGroup
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "4/10/14"
class PointGroupTest(unittest.TestCase):
def test_order(self):
order = {"mmm": 8, "432": 24, "-6m2": 12}
for k, v in order.items():
pg = PointGroup(k)
self.assertEqual(order[k], len(pg.symmetry_ops))
def test_get_orbit(self):
pg = PointGroup("mmm")
self.assertEqual(len(pg.get_orbit([0.1, 0.1, 0.1])), 8)
self.assertEqual(len(pg.get_orbit([0, 0, 0.1])), 2)
self.assertEqual(len(pg.get_orbit([1.2, 1.2, 1])), 8)
def test_is_sub_super_group(self):
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
pgmmm = PointGroup("mmm")
pgmm2 = PointGroup("mm2")
pg222 = PointGroup("222")
pg4 = PointGroup("4")
self.assertTrue(pgmmm.is_supergroup(pgmm2))
self.assertTrue(pgmm2.is_subgroup(pgmmm))
self.assertTrue(pgmmm.is_supergroup(pg222))
self.assertFalse(pgmmm.is_supergroup(pg4))
pgm3m = PointGroup("m-3m")
pg6mmm = PointGroup("6/mmm")
pg3m = PointGroup("-3m")
# TODO: Fix the test below.
# self.assertTrue(pg3m.is_subgroup(pgm3m))
self.assertTrue(pg3m.is_subgroup(pg6mmm))
self.assertFalse(pgm3m.is_supergroup(pg6mmm))
class SpaceGroupTest(unittest.TestCase):
def test_abbrev_symbols(self):
sg = SpaceGroup("P2/c")
self.assertEqual(sg.int_number, 13)
sg = SpaceGroup("R-3mH")
self.assertEqual(sg.int_number, 166)
def test_attr(self):
sg = SpaceGroup("Fm-3m")
self.assertEqual(sg.full_symbol, "F4/m-32/m")
self.assertEqual(sg.point_group, "m-3m")
def test_full_symbols(self):
sg = SpaceGroup("P2/m2/m2/m")
self.assertEqual(sg.symbol, "Pmmm")
def test_order_symm_ops(self):
for name in SpaceGroup.SG_SYMBOLS:
sg = SpaceGroup(name)
self.assertEqual(len(sg.symmetry_ops), sg.order)
def test_get_settings(self):
self.assertEqual({'Fm-3m(a-1/4,b-1/4,c-1/4)', 'Fm-3m'},
SpaceGroup.get_settings("Fm-3m"))
self.assertEqual({'Pmmn', 'Pmnm:1', 'Pnmm:2', 'Pmnm:2', 'Pnmm',
'Pnmm:1', 'Pmmn:1', 'Pmnm', 'Pmmn:2'},
SpaceGroup.get_settings("Pmmn"))
self.assertEqual({'Pnmb', 'Pman', 'Pncm', 'Pmna', 'Pcnm', 'Pbmn'},
SpaceGroup.get_settings("Pmna"))
def test_crystal_system(self):
sg = SpaceGroup("R-3c")
self.assertEqual(sg.crystal_system, "trigonal")
sg = SpaceGroup("R-3cH")
self.assertEqual(sg.crystal_system, "trigonal")
def test_get_orbit(self):
sg = SpaceGroup("Fm-3m")
p = np.random.randint(0, 100 + 1, size=(3,)) / 100
self.assertLessEqual(len(sg.get_orbit(p)), sg.order)
def test_is_compatible(self):
cubic = Lattice.cubic(1)
hexagonal = Lattice.hexagonal(1, 2)
rhom = Lattice.rhombohedral(3, 80)
tet = Lattice.tetragonal(1, 2)
ortho = Lattice.orthorhombic(1, 2, 3)
sg = SpaceGroup("Fm-3m")
self.assertTrue(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m:H")
self.assertFalse(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m:R")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pnma")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P12/c1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P-1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertTrue(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pmmn:2")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
def test_symmops(self):
sg = SpaceGroup("Pnma")
op = SymmOp.from_rotation_and_translation([[1, 0, 0], [0, -1, 0],
[0, 0, -1]], [0.5, 0.5, 0.5])
self.assertIn(op, sg.symmetry_ops)
def test_other_settings(self):
sg = SpaceGroup("Pbnm")
self.assertEqual(sg.int_number, 62)
self.assertEqual(sg.order, 8)
self.assertRaises(ValueError, SpaceGroup, "hello")
def test_subgroup_supergroup(self):
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
self.assertTrue(SpaceGroup('Pma2').is_subgroup(SpaceGroup('Pccm')))
self.assertFalse(SpaceGroup.from_int_number(229).is_subgroup(
SpaceGroup.from_int_number(230)))
if __name__ == '__main__':
unittest.main()
|
tallakahath/pymatgen
|
pymatgen/symmetry/tests/test_groups.py
|
Python
|
mit
| 6,002
|
[
"pymatgen"
] |
ca0d5c1901692e0e7febab0add29a3222ad9d0e6fb1e5aa569e493d02323767b
|
# -*- coding: utf-8 -*-
#
## we set up default information.
## first, easy to maintain lists which can eventually be moved to
## files.
from typing import Collection, List, Mapping
from .abstractLang import AbstractLanguage
class Language(AbstractLanguage):
# TRANSLATOR WARNING: DO NOT TRANSLATE THE FIELD NAMES: ONLY THE VALUES!!!
CREDITS = "Gregory"
# only translate the items in the list [..] (and feel free to create categories
# that make sense for your locale -- no need to translate these ones). DO NOT
# translate 'cuisine','rating','source' or 'category'
fields={'cuisine': ['Amerikaans','Italiaans','Mexicaans',
'Zuid-westers','Aziatisch/Thais','Aziatisch/Vietnamees',
'Aziatisch/Chinees','Aziatisch/Japans',],
'rating' : ['Excellent','Zeer goed','Goed','Matig','Zwak'],
'source' : ['Kookboek'],
'category' : ['Dessert','Voorgerecht','Salade','Soep',
'Ontbijt'],
}
# In English, there are a heck of a lot of synonyms. This is a list
# for those synonyms. ["preferred word","alternate word","alternate word"]
# If there are none of these that you can think of in your language, just
# set this to:
# SYNONYMS=[]
SYNONYMS: Collection[List[str]] = []
# a dictionary key=ambiguous word, value=list of possible non-ambiguous terms
AMBIGUOUS: Mapping[str, List[str]] = {}
# triplicates ITEM, KEY, SHOPPING CATEGORY
# These will be defaults. They should include whatever foods might be
# standard for your locale, with whatever sensible default categories
# you can think of (again, thinking of your locale, not simply translating
# what I've done).
INGREDIENT_DATA = [("alfalfa spruiten","alfalfa spruiten","land- en tuinbouw producten"),
("anijs","anijs","land- en tuinbouw producten"),
("artisjok","artisjok","land- en tuinbouw producten"),
("arugula","arugula","land- en tuinbouw producten"),
("asperge","asperge","land- en tuinbouw producten"),
("aubergine","aubergine","land- en tuinbouw producten"),
("avocado","avocado","land- en tuinbouw producten"),
("groene bonen","groene bonen","land- en tuinbouw producten"),
("azukibonen","azukibonen","land- en tuinbouw producten"),
("boonspruiten","boonspruiten","land- en tuinbouw producten"),
("zwarte bonen","zwarte bonen","land- en tuinbouw producten"),
("black-eyed peas","black-eyed peas","land- en tuinbouw producten"),
("borlotti bonen","borlotti bonen","land- en tuinbouw producten"),
("broad bonen","broad bonen","land- en tuinbouw producten"),
("kikkererwten, garbanzos","kikkererwten, garbanzos","land- en tuinbouw producten"),
("nierbonen","nierbonen","land- en tuinbouw producten"),
("linzen","linzen","land- en tuinbouw producten"),
("limabonen of boterbonen","limabonen of boterbonen","land- en tuinbouw producten"),
("mung bonen","mung bonen","land- en tuinbouw producten"),
("navy bonen","navy bonen","land- en tuinbouw producten"),
("pronkbonen of Spaase bonen","pronkbonen of Spaanse bonen","land- en tuinbouw producten"),
("sojabonen","sojabonen","land- en tuinbouw producten"),
("erwten","erwten","land- en tuinbouw producten"),
("snap peas","snap peas","land- en tuinbouw producten"),
("bok choy","bok choy","land- en tuinbouw producten"),
("broodfruit","broodfruit","land- en tuinbouw producten"),
("groene bloemkool","groene bloemkool","land- en tuinbouw producten"),
("broccoli","broccoli","land- en tuinbouw producten"),
("spruitjes","spruitjes","land- en tuinbouw producten"),
("kool","kool","land- en tuinbouw producten"),
("bloemkool","bloemkool","land- en tuinbouw producten"),
("selder","selder","land- en tuinbouw producten"),
("maïssalade","maïssalade","land- en tuinbouw producten"),
("witloof","witloof","land- en tuinbouw producten"),
("kropsla","kropsla","land- en tuinbouw producten"),
("maïs","maïs","land- en tuinbouw producten"),
("champignons","champignons","land- en tuinbouw producten"),
("netels","netels","land- en tuinbouw producten"),
("bieslook","bieslook","land- en tuinbouw producten"),
("look","look","land- en tuinbouw producten"),
("prei","prei","land- en tuinbouw producten"),
("ajuin","ajuin","land- en tuinbouw producten"),
("sjalot","sjalot","land- en tuinbouw producten"),
("peterselie","peterselie","land- en tuinbouw producten"),
("peper","peper","land- en tuinbouw producten"),
("zwarte peper","zwarte peper","land- en tuinbouw producten"),
("witte peper","witte peper","land- en tuinbouw producten"),
("chili peper","chili peper","land- en tuinbouw producten"),
("jalapeño peper","peper, jalapeño","land- en tuinbouw producten"),
("habanero peper","peper, habanero","land- en tuinbouw producten"),
("rabarber","rabarber","land- en tuinbouw producten"),
("biet","biet","land- en tuinbouw producten"),
("wortel","wortel","land- en tuinbouw producten"),
("maniok","maniok","land- en tuinbouw producten"),
("ginger","ginger","land- en tuinbouw producten"),
("radijs","radijs","land- en tuinbouw producten"),
("wasabi","wasabi","land- en tuinbouw producten"),
("witte radijs","witte radijs","land- en tuinbouw producten"),
("spinazie","spinazie","land- en tuinbouw producten"),
("komkommer","komkommer","land- en tuinbouw producten"),
("pompoen","pompoen","land- en tuinbouw producten"),
("spaghetti squash","squash, spaghetti","land- en tuinbouw producten"),
("tomaat","tomaat","land- en tuinbouw producten"),
("aardappel","aardappel","land- en tuinbouw producten"),
("zoete aardappel","zoete aardappel","land- en tuinbouw producten"),
("waterkers","waterkers","land- en tuinbouw producten"),
("appel","appel","land- en tuinbouw producten"),
("juneberry","juneberry","land- en tuinbouw producten"),
("granaatappel","granaatappel","land- en tuinbouw producten"),
("abricot","abricot","land- en tuinbouw producten"),
("kers","kers","land- en tuinbouw producten"),
("perzik","perzik","land- en tuinbouw producten"),
("nectarine","nectarine","land- en tuinbouw producten"),
("braambes","braambes","land- en tuinbouw producten"),
("framboos","framboos","land- en tuinbouw producten"),
("bergbraambes","bergbraambes","land- en tuinbouw producten"),
("beredruif","beredruif","land- en tuinbouw producten"),
("bosbes","bosbes","land- en tuinbouw producten"),
("Amerikaanse veenbes","Amerikaanse veenbes","land- en tuinbouw producten"),
("lingonberry","lingonberry","land- en tuinbouw producten"),
("berberis","berberis","land- en tuinbouw producten"),
("rode bes","bes, rood","land- en tuinbouw producten"),
("zwarte bes","bes, zwart","land- en tuinbouw producten"),
("witte bes","bes, wit","land- en tuinbouw producten"),
("vlierbes","vlierbes","land- en tuinbouw producten"),
("kruisbes","kruisbes","land- en tuinbouw producten"),
("overzees-wegedoorn","overzees-wegedoorn","land- en tuinbouw producten"),
("moerbeiboom","moerbeiboom","land- en tuinbouw producten"),
("kiwi","kiwi","land- en tuinbouw producten"),
("papaja","papaja","land- en tuinbouw producten"),
("peer","peer","land- en tuinbouw producten"),
("kantaloep","kantaloep","land- en tuinbouw producten"),
("watermeloen","watermeloen","land- en tuinbouw producten"),
("aardbei","aardbei","land- en tuinbouw producten"),
("vijg","vijg","land- en tuinbouw producten"),
("druif","druif","land- en tuinbouw producten"),
("pompelmoes","pompelmoes","land- en tuinbouw producten"),
("citroen","citroen","land- en tuinbouw producten"),
("limoen","limoen","land- en tuinbouw producten"),
("mandarijn","mandarijn","land- en tuinbouw producten"),
("clementine","clementine","land- en tuinbouw producten"),
("sinaasappel","sinaasappel","land- en tuinbouw producten"),
("lychee","lychee","land- en tuinbouw producten"),
("passievrucht","passievrucht","land- en tuinbouw producten"),
("banaan","banaan","land- en tuinbouw producten"),
("ster fruit","ster fruit","land- en tuinbouw producten"),
("kokosnoot","kokosnoot","land- en tuinbouw producten"),
("durian","durian","land- en tuinbouw producten"),
("mango","mango","land- en tuinbouw producten"),
("ananas","ananas","land- en tuinbouw producten"),
("ansjovis","ansjovis","vis & zeevruchten"),
("baars","baars","vis & zeevruchten"),
("blowfish","blowfish","vis & zeevruchten"),
("meerval","meerval","vis & zeevruchten"),
("kabeljouw","kabeljouw","vis & zeevruchten"),
("paling","paling","vis & zeevruchten"),
("bot","bot","vis & zeevruchten"),
("schelvis","schelvis","vis & zeevruchten"),
("heilbot","heilbot","vis & zeevruchten"),
("zeebaars","zeebaars","vis & zeevruchten"),
("snoek","snoek","vis & zeevruchten"),
("sardien","sardien","vis & zeevruchten"),
("zalm","zalm","vis & zeevruchten"),
("zeebaars","zeebaars","vis & zeevruchten"),
("haai","haai","vis & zeevruchten"),
("snapper","snapper","vis & zeevruchten"),
("schorpioenvis","schorpioenvis","vis & zeevruchten"),
("tong","tong","vis & zeevruchten"),
("steur","steur","vis & zeevruchten"),
("zwaardvis","zwaardvis","vis & zeevruchten"),
("tegelvis","tegelvis","vis & zeevruchten"),
("forel","forel","vis & zeevruchten"),
("tonijn","tonijn","vis & zeevruchten"),
("witing","witing","vis & zeevruchten"),
("kuiten","kuiten","vis & zeevruchten"),
("kaviaar","kaviaar","vis & zeevruchten"),
("zalmkuiten","zalmkuiten","vis & zeevruchten"),
("krab","krab","vis & zeevruchten"),
("rivierkreeften","rivierkreeften","vis & zeevruchten"),
("kreeft","kreeft","vis & zeevruchten"),
("garnaal","garnaal","vis & zeevruchten"),
("scampi","scampi","vis & zeevruchten"),
("tweekleppig schelpdier","tweekleppig schelpdier","vis & zeevruchten"),
("mossel","mossel","vis & zeevruchten"),
("octopus","octopus","vis & zeevruchten"),
("oester","oester","vis & zeevruchten"),
("slak","slak","vis & zeevruchten"),
("inktvis","inktvis","vis & zeevruchten"),
("kammossel","kammossel","vis & zeevruchten"),
("spek","spek","vlees"),
("chorizo","chorizo","vlees"),
("fuet","fuet","vlees"),
("salami","salami","vlees"),
("ham","ham","vlees"),
("mutton","mutton","vlees"),
("lam","lam","vlees"),
("kalf","kalf","vlees"),
("steak","steak","vlees"),
("hamburger","hamburger","vlees"),
("roast beef","roast beef","vlees"),
("konijn","konijn","vlees"),
("struisvogel","struisvogel","vlees"),
("muskusrat","muskusrat","vlees"),
("waterkonijn","waterkonijn","vlees"),
("kip","kip","vlees"),
("kalkoen","kalkoen","vlees"),
("eend","eend","vlees"),
("gans","gans","vlees"),
## my old list
('plantaardige bouillon','bouillon, plantaardig','soepen & sausen'),
('basilicum','basilicum','land- en tuinbouw producten',),
('lichtbruine suiker','suiker, lichtbruin','bakken',),
('azijn','azijn','wijnen & oliën',),
('okkernoot','okkernoot','bakken',),
('korianderzaadjes','korianderzaadjes','kruiden',),
('couscous','couscous','pasta\'s',),
('rijst','rijst','pasta\'s',),
('olijfolie','olie, olijf','wijnen & oliën',),
('vanille extract','vanille extract','bakken',),
('rode aardappel','aardappel, rood','land- en tuinbouw producten',),
('currypoeder','currypoeder','kruiden',),
('gedroogde garnaal','garnaal, gedroogd','internationaal',),
('dijon mosterd','mosterd, dijon','condiments',),
('maïsolie','olie, maïs','wijnen & oliën',),
('feta kaas','kaas, feta','zuivelproducten',),
('kerstomaat','tomaat, kers','land- en tuinbouw producten',),
('spaghetti','spaghetti','pasta\'s',),
('witte ajuin','ajuin, wit','land- en tuinbouw producten',),
('yoghurt','yoghurt','zuivelproducten',),
('croutons','croutons','brood',),
('chili poeder','chili poeder','kruiden',),
('krulsla','sla, gekruld','land- en tuinbouw producten',),
('rijstazijn','azijn, rijst','internationaal',),
('pasta','pasta','pasta\'s',),
('zure room','zure room','zuivelproducten',),
('sinaasappelsap','sinaasappelsap','land- en tuinbouw producten',),
('spinazie','spinazie','land- en tuinbouw producten',),
('plantaardige olie','olie, plantaardig','wijnen & oliën',),
('pindakaas','pindakaas','brood',),
('ei','ei','zuivelproducten',),
('limoen','limoen','land- en tuinbouw producten',),
('olijven','olijven','land- en tuinbouw producten',),
('boter','boter','zuivelproducten',),
('cheddar kaas','kaas, cheddar','zuivelproducten',),
('mozzarella kaas','kaas, mozzarella','zuivelproducten',),
('witte suiker','suiker, wit','bakken',),
('fresh kaas white goat','kaas, fresh white goat','zuivelproducten',),
('geel maïsmeel','maïsmeel, geel','bakken',),
('paprika','paprika','kruiden',),
('ster anijs','ster anijs','kruiden',),
('bruine suiker','suiker, bruin','bakken',),
('honing','honing','bakken',),
('citroensap','citroensap','land- en tuinbouw producten',),
('rode ajuin','ajuin, rood','land- en tuinbouw producten',),
('melk','melk','zuivelproducten',),
('bakpoeder','bakpoeder','bakken',),
('tomatensap','tomatensap','soepen & sausen',),
('mosterdzaadjes','zaadjes, mosterd','kruiden',),
('parmesan kaas','kaas, parmesan','pasta\'s',),
('mayonnaise','mayonnaise','brood',),
('gerookte Gouda kaas','kaas, gerookte Gouda','zuivelproducten',),
('kokosmelk','kokosmelk','internationaal',),
('bloem','bloem','bakken',),
('salsa','salsa','internationaal',),
('broccoli','broccoli','land- en tuinbouw producten',),
('witte wijn','wijn, wit','wijnen & oliën',),
('rode wijn','wijn, rood','wijnen & oliën',),
('bruin bier','bier, bruin','wijnen & oliën',),
('blond bier','bier, blond','wijnen & oliën',),
('witbier','bier, wit','wijnen & oliën',),
('zwarte gemalen peper','zwarte peper, gemalen','kruiden',),
('witte gemalen peper','witte peper, gemalen','kruiden',),
('sojasaus','sojasaus','internationaal',),
('sesamzaadjes','sesamzaadjes','internationaal',),
('zout','zout','bakken',),
('kurkuma','kurkuma','kruiden',),
('kipfilet' ,'kip, filet' , 'vlees',),
('hele kip' ,'kip, heel' , 'vlees',),
('kippenboutje' ,'kip, bouten' , 'vlees',),
('rundsvlees' ,'rundsvlees' , 'vlees',),
('gehakt' ,'gehakt' , 'vlees',),
('varkensvlees' ,'varkensvlees' , 'vlees',),
]
# THESE ARE STANDARD UNIT CONVERSIONS. You can simply translate unit names where
# you know them. Eliminate entries that are untranslatable or don't exist in your
# locale. And please add any additional units that you know of.
CONVERTER_TABLE = {
("l", "ml"):1000,
("l", "cl"):100,
("l", "dl"):10,
("kg", "g"):1000,
("g", "mg"):1000
}
# DENSITIES of common foods. This allows us to convert between mass and volume.
DENSITY_TABLE={
"water":1,
"sap, druif":1.03,
"plantaardige bouillon":1,
"bouillon, plantaardig":1,
"bouillon, kip":1,
"melk":1.029,
"melk, vol":1.029,
"melk, mager":1.033,
"melk, 2%":1.031,
"melk, 1%":1.03,
"kokosmelk":0.875,
"karnemelk":1.03,
"zware room":0.994,
"lichte room":1.012,
"half en half":1.025,
"honing":1.420,
"suiker, wit":1.550,
"zout":2.165,
"boter":0.911,
"olie, plantaardig":0.88,
"olie, olijf":0.88,
"olie, maïs":0.88,
"olie, sesam":0.88,
"bloem, alle doeleinden": 0.6,
"bloem, whole wheat": 0.6,
"maïszetmeel": 0.6,
"suiker, bloem": 0.6,
"suiker, zoetigheden": 0.6
}
# Stand unit names and alternate unit names that might appear.
# For example: "c." is our standard for cup. "cup","c." or "cups" might appear
# in a recipe we are importing.
# Each item of this list looks like this:
#
# ["standard", ["alternate1","alternate2","alternate3",...]]
UNITS = [("bucket" , ["bucket","buckets","bckt."]),
("grains", ["grain","grains"]),
("dram", ["dram","drams"]),
("drop",["drop"]),
("bos", ["bossen","bosje"]),
("theelp.", ["theelepel","theelepels","theelepeltje","theelepeltjes","theelpl.","theelpl"]),
("eetlp.", ["eetlepel","eetlepels","eetlpl.","eetlpl"]),
("lb.", [ "pond","lb","lb.","lbs.","ponden"]),
("k.", ["kop","kopje","kp.","koppen"]),
("qt.", ["quart","qt.","quarts"]),
("pt.", ["pint","pt.","pints"]),
("ml", ["mililiter","ml.","mlit.","mililiters"]),
("cl", ["centiliter","cl.","clit.","centiliters"]),
("dl", ["deciliter","dl.","dlit.","deciliters"]),
("l", ["liter","l.","lit.","liters"]),
("g", ["gram","g.","gr","gr.","grammen"]),
("mg", ["miligram","mg.","mg","miligrammen"]),
("kg", ["kilogram","kg.","kg","kilogrammen"]),
]
# The following sets up unit groups. Users will be able to turn
# these on or off (American users, for example, would likely turn
# off metric units, since we don't use them).
METRIC_RANGE = (1,999)
UNIT_GROUPS = {
'metric mass':[('mg',METRIC_RANGE),
('g',METRIC_RANGE),
('kg',(1,None))],
'metric volume':[('ml',METRIC_RANGE),
('cl',(1,99)),
('dl',(1,9)),
('l',(1,None)),],
'imperial weight':[('grains',(0,27)),
('dram',(0.5,15)),
('oz.',(0.25,32)),
('lb.',(0.25,None)),
],
'imperial volume':[('drop',(0,3)),
('tsp.',(0.125,3)),
('tbs.',(1,4)),
('k.',(0.25,6)),
('pt.',(1,1)),
('qt.',(1,3)),
('gallon',(1,None)),
('peck',(1,2)),
('bucket',(1,2)),
('bushel',(1,None))]
}
# The units here need to correspond to the standard unit names defined
# above in UNITS
CROSS_UNIT_TABLE = {
## This if for units that require an additional
## bit of information -- i.e. to convert between
## volume and mass you need the density of an
## item. In these cases, the additional factor
## will be provided as an 'item' that is then looked
## up in the dictionary referenced here (i.e. the density_table)
## currently, 'density' is the only keyword used
("pt.", "lb."):('density',1),
("tbs.", "oz."):('density',0.5),
("k.", "oz."):('density',8),
("pt.", "oz."):('density',16),
("ml", "g"):('density',1)}
# The units here need to correspond to the standard unit names defined
# in UNITS
VOL_TO_MASS_TABLE = {
("pt.", "lb.") : 1,
("tbs.", "oz.") : 0.5,
("c.", "oz.") : 8,
("pt.", "oz.") : 16,
("ml", "g") : 1,
("ml", "mg") : 1000,
("ml", "kg"): 0.001,
("cl", "kg"): 0.01,
("cl", "g") : 10,
("dl", "kg") : 0.1,
("dl", "g") : 100,
("l", "kg") : 1}
# These functions are rather important! Our goal is simply to
# facilitate look ups -- if the user types in "tomatoes", we want to
# find "tomato." Note that the strings these functions produce will
# _never_ be shown to the user, so it's fine to generate nonsense
# words as well as correct answers -- our goal is to generate a list
# of possible hits rather than to get the plural/singular form "right".
@staticmethod
def guess_singulars (s): return []
@staticmethod
def guess_plurals (s):
"""This is a very lame attempt at Dutch grammar!
Obviously this isn't close to a good plural generator, but I
thought it might make an occasional match, so what the hell.
"""
return [s+"en",s+"s"]
IGNORE: Collection[str] = []
|
kirienko/gourmet
|
src/gourmet/defaults/defaults_nl.py
|
Python
|
gpl-2.0
| 25,199
|
[
"Octopus"
] |
dc03a825f4102be070f57918b210c13ccf704ba23eed83f06c04ac366cb17c55
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import print_function
import h2o
import sys
sys.path.insert(1,"../../../") # allow us to run this standalone
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
from tests import pyunit_utils
def stackedensemble_guassian_test():
"""This test check the following (for guassian regression):
1) That H2OStackedEnsembleEstimator executes w/o errors on a 3-model manually constructed ensemble.
2) That .predict() works on a stack.
3) That .model_performance() works on a stack.
4) That the training and test performance is better on ensemble vs the base learners.
5) That the validation_frame arg on H2OStackedEnsembleEstimator works correctly.
"""
col_types = ["numeric", "numeric", "numeric", "enum", "enum", "numeric", "numeric", "numeric", "numeric"]
dat = h2o.upload_file(path=pyunit_utils.locate("smalldata/extdata/prostate.csv"),
destination_frame="prostate_hex",
col_types= col_types)
train, test = dat.split_frame(ratios=[.8], seed=1)
print(train.summary())
# Identify predictors and response
x = ["CAPSULE","GLEASON","RACE","DPROS","DCAPS","PSA","VOL"]
y = "AGE"
# set number of folds
nfolds = 5
# train and cross-validate a GBM
my_gbm = H2OGradientBoostingEstimator(distribution="gaussian",
max_depth=3,
learn_rate=0.2,
nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
seed=1)
my_gbm.train(x=x, y=y, training_frame=train)
# evaluate the performance
perf_gbm_train = my_gbm.model_performance(train=True)
perf_gbm_test = my_gbm.model_performance(test_data=test)
print("GBM training performance: ")
print(perf_gbm_train)
print("GBM test performance: ")
print(perf_gbm_test)
# train and cross-validate a RF
my_rf = H2ORandomForestEstimator(ntrees=30,
nfolds=nfolds,
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
seed=1)
my_rf.train(x=x, y=y, training_frame=train)
# evaluate performance
perf_rf_train = my_rf.model_performance(train=True)
perf_rf_test = my_rf.model_performance(test_data=test)
print("RF training performance: ")
print(perf_rf_train)
print("RF test performance: ")
print(perf_rf_test)
# Train and cross-validate an extremely-randomized RF
my_xrf = H2ORandomForestEstimator(ntrees=50,
nfolds=nfolds,
histogram_type="Random",
fold_assignment="Modulo",
keep_cross_validation_predictions=True,
seed=1)
my_xrf.train(x=x, y=y, training_frame=train)
# evaluate performance
perf_xrf_train = my_xrf.model_performance(train=True)
perf_xrf_test = my_xrf.model_performance(test_data=test)
print("XRF training performance: ")
print(perf_xrf_train)
print("XRF test performance: ")
print(perf_xrf_test)
# Train a stacked ensemble using the GBM and GLM above
stack = H2OStackedEnsembleEstimator(model_id="my_ensemble_guassian",
base_models=[my_gbm.model_id, my_rf.model_id, my_xrf.model_id])
stack.train(x=x, y=y, training_frame=train, validation_frame=test) # also test that validation_frame is working
# Check that prediction works
pred = stack.predict(test_data= test)
assert pred.nrow == test.nrow, "expected " + str(pred.nrow) + " to be equal to " + str(test.nrow)
assert pred.ncol == 1, "expected " + str(pred.ncol) + " to be equal to 1 but it was equal to " + str(pred.ncol)
# Does predict() have ugly side effects?
pred = stack.predict(test_data= test)
assert pred.nrow == test.nrow, "expected " + str(pred.nrow) + " to be equal to " + str(test.nrow)
assert pred.ncol == 1, "expected " + str(pred.ncol) + " to be equal to 1 but it was equal to " + str(pred.ncol)
# Evaluate ensemble performance
perf_stack_train = stack.model_performance()
perf_stack_test = stack.model_performance(test_data=test)
# Does performance() have ugly side effects?
perf_stack_train = stack.model_performance()
perf_stack_test = stack.model_performance(test_data=test)
# Training RMSE for each base learner
baselearner_best_rmse_train = min(perf_gbm_train.rmse(), perf_rf_train.rmse(), perf_xrf_train.rmse())
stack_rmse_train = perf_stack_train.rmse()
print("Best Base-learner Training RMSE: {0}".format(baselearner_best_rmse_train))
print("Ensemble Training RMSE: {0}".format(stack_rmse_train))
#assert stack_rmse_train < baselearner_best_rmse_train, "expected stack_rmse_train would be less than " \
# " found it wasn't baselearner_best_rmse_train"
# Check that stack perf is better (smaller) than the best (smaller) base learner perf:
# Test RMSE for each base learner
baselearner_best_rmse_test = min(perf_gbm_test.rmse(), perf_rf_test.rmse(), perf_xrf_test.rmse())
stack_rmse_test = perf_stack_test.rmse()
print("Best Base-learner Test RMSE: {0}".format(baselearner_best_rmse_test))
print("Ensemble Test RMSE: {0}".format(stack_rmse_test))
assert stack_rmse_test < baselearner_best_rmse_test, "expected stack_rmse_test would be less than " \
" baselearner_best_rmse_test, found it wasn't " \
"baselearner_best_rmse_test = "+ \
str(baselearner_best_rmse_test) + ",stack_rmse_test " \
" = "+ str(stack_rmse_test)
# Check that passing `test` as a validation_frame produces the same metric as stack.model_performance(test)
# since the metrics object is not exactly the same, we can just test that RSME is the same
perf_stack_validation_frame = stack.model_performance(valid=True)
assert stack_rmse_test == perf_stack_validation_frame.rmse(), "expected stack_rmse_test to be the same as " \
"perf_stack_validation_frame.rmse() found they were not " \
"perf_stack_validation_frame.rmse() = " + \
str(perf_stack_validation_frame.rmse()) + \
"stack_rmse_test was " + str(stack_rmse_test)
if __name__ == "__main__":
pyunit_utils.standalone_test(stackedensemble_guassian_test)
else:
stackedensemble_guassian_test()
|
spennihana/h2o-3
|
h2o-py/tests/testdir_algos/stackedensemble/pyunit_stackedensemble_gaussian.py
|
Python
|
apache-2.0
| 7,384
|
[
"Gaussian"
] |
c4c752d86a627ef2f90d4eeef59d225fcb38ea1c1214bf42151b6b522b9f2505
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Philippe LAWRENCE
#
# This file is part of pyBar.
# pyBar is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyBar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyBar; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
try:
import pygtk
except:
print "Librairie pygtk indisponible"
sys.exit(0)
try:
pygtk.require('2.0')
except:
print "Nécessite pygtk2.0"
sys.exit(0)
# revoir sys.platform dans Const !!!!
import gtk
#print gtk.pygtk_version
#print gtk.gtk_version
import pango
import cairo
import gio # debug py2exe windows
import classEditor
import classDrawing
import classRdm
import classLigneInflu
import classDialog
import Const
import classProfilManager
import classPrefs
import classCMenu
import gobject
import threading
import copy
import os
#import pickle
import function
import file_tools
#from time import sleep
import xml.etree.ElementTree as ET
file_tools.set_user_dir()
if Const.SYS == "win32":
path = os.path.join(Const.PATH, "stdout.log")
sys.stdout = open(path, "w")
path = os.path.join(Const.PATH, "stderr.log")
sys.stderr = open(path, "w")
gobject.threads_init()
__version__ = Const.VERSION
__author__ = Const.AUTHOR
__date__ = "2008-06-01"
__file__ = "pyBar" # redéfini pour py2exe en attendant mieux
print "pyBar%s Copyright (C) 2007 %s\nThis program comes with ABSOLUTELY NO WARRANTY\nThis is free software, and you are welcome to redistribute it under certain conditions." % (__version__, __author__)
class CombiButton(gtk.CheckButton):
"""Boutons à cocher des combinaisons"""
def __init__(self, label):
gtk.CheckButton.__init__(self, label)
#self.n_type = n_type
def About():
dialog = gtk.AboutDialog()
dialog.set_icon_from_file("glade/logo.png")
dialog.set_name("pyBar")
dialog.set_version(Const.VERSION)
dialog.set_authors([Const.AUTHOR])
dialog.set_website(Const.SITE_URL)
dialog.set_comments("pyBar est un logiciel libre de calcul de structures planes, basé sur la méthode des déplacements, écrit en Python et pyGTK")
dialog.set_license("Vous pouvez modifier et redistribuer ce programme\nsous les conditions énoncées\npar la licence GNU GPL (version 2 ou ultérieure).\nUne copie de la licence GPL\nest dans le fichier « COPYING » fourni avec pyBar.\nAucune garantie n'est fournie pour l'utilisation de ce programme.")
result = dialog.run()
dialog.destroy()
class CombiBox(gtk.VBox):
# revoir main_win, study
def __init__(self, *args, **kwargs):
gtk.VBox.__init__(self, *args, **kwargs)
self.set_name("combi")
def fill_box(self, study, main_win):
self.handler_list = []
rdm = study.rdm
try:
status = rdm.status
except AttributeError: # for EmptyRdm
status = -1
if status == -1:
return
Cases = rdm.Cases
n_cases = len(Cases)
try:
ErrorCases = rdm.char_error
except AttributeError:
ErrorCases = []
CombiCoef = rdm.CombiCoef
combis = function.sortedDictKeys(CombiCoef)
n_combi = len(combis)
# création de la liste des cas de charge
label = gtk.Label("Cas de charge:")
label.set_alignment(0.2, 0.7)
label.set_size_request(-1, 30)
self.pack_start(label, False, False, 0)
for i, val in enumerate(Cases):
button = CombiButton(val)
button.set_size_request(-1, 25)
id = button.connect("clicked", main_win.event_combi_button, i)
self.handler_list.append(id)
button.set_name(str(i))
if val in ErrorCases:
button.set_sensitive(False)
self.pack_start(button, False, False, 0)
# création de la liste des combinaisons
if not n_combi == 0:
label = gtk.Label("Combinaisons:")
label.set_alignment(0.2, 0.7)
label.set_size_request(-1, 30)
self.pack_start(label, False, False, 0)
for i, val in enumerate(combis):
button = CombiButton(val)
n = i + n_cases
button.set_size_request(-1, 25)
# numéro pour combinaison négatif à partir de -1
id = button.connect("clicked", main_win.event_combi_button, n)
self.handler_list.append(id)
button.set_name(str(n))
self.pack_start(button, False, False, 0)
self.show_all()
#####################################################################
#
# CLASSE PRINCIPALE
#
#####################################################################
class MainWindow(object):
def __init__(self):
# initialisation de la page d'accueil
builder = self.builder = gtk.Builder()
builder.add_from_file("glade/main.glade")
# XXX enlever le mapping comme dans Editor
builder.connect_signals(self)
self.window = builder.get_object("window1")
self.main_box = builder.get_object("main_box")
self._ini_first_page()
self.window.show() # après le resize
self._handler_id = {}
self._tabs = []
self.studies = {}
self.message = classDialog.Message()
self.is_press = False # attribut pour le bouton "Clic Gauche"
self.key_press = False # attribut pour la clavier "Control_L"
# enlever dans main.glade
def on_state_event(self, widget, event):
"""Evènement de type passage en plein écran ou retour"""
#print event.type
return
if event.changed_mask & gtk.gdk.WINDOW_STATE_ICONIFIED:
if event.new_window_state & gtk.gdk.WINDOW_STATE_ICONIFIED:
print 'Window was minimized!'
else:
print 'Window was unminimized!'
def on_w1_configure(self, widget, event):
"""Gère les évènements correspondant au redimensionnement de la fenetre"""
#print "Main::_configure_event"
pass
#print widget.window.get_state()
#function.debug_get_props(widget)
def on_w1_destroy(self, widget, event=None):
"""Closing main window - Save user preferences"""
self.new_version = False
menu_button = self.builder.get_object("menu_cas")
display_combi = menu_button.get_active() == True and 'on' or 'off'
w, h = self.window.get_size()
self.UP.save_w1_config(w, h, display_combi, self.options)
if hasattr(self, "editor"):
changes = self.editor.get_modified_studies()
must_save = self._get_record_id(changes)
if must_save is None:
# must return True to prevent window closing
return True
ed_data = self.editor.data_editors
for id in must_save:
self._set_name(id)
ed_study = ed_data[id]
if not ed_study.path is None:
self.editor.save_study(ed_study)
self.UP.save_w2_config(self.editor._w, self.editor._h)
# sauvegarde des préférences des études
studies = self.studies
for study in studies.values():
self.save_drawing_prefs(study)
gtk.main_quit()
def _ini_first_page(self):
"""Dessine la page de lancement de l'application
Read size User preferences"""
self.new_version = None
#self._set_user_dir()
self.UP = classPrefs.UserPrefs()
menu_button = self.builder.get_object("menu_cas")
tag = self.UP.get_w1_box()
menu_button.set_active(tag)
menu_button.connect('activate', self._manage_combi_window)
sizes = self.UP.get_w1_size()
if sizes is None:
height = gtk.gdk.screen_height()
if height > 880:
self.window.resize(700, 700)
else:
w, h = sizes
self.window.resize(w, h)
image = gtk.Image()
image.set_from_file("glade/home.png")
image.show()
self.main_box.pack_start(image, True, True, 0)
# new version search
opt = self.UP.get_version()
if opt == 0:
try:
gobject.timeout_add(1000, self._get_info_version, opt) # destroy if callback return False
except:
pass
else:
self.UP.save_version(opt-1)
# options d'affichage (à déplacer?)
self.options = self.UP.get_w1_options()
# -----------------------------------------------------------
#
# Méthodes relatives au notebook des drawings
#
# -----------------------------------------------------------
def _ini_application(self):
"""drawings notebook initilisation - button setup
return drawing_book"""
# suppression image accueil
self.main_box.remove(self.main_box.get_children()[0])
# drawings notebook ini
book = gtk.Notebook()
book.set_scrollable(True)
#book.set_show_tabs(False)
# provisoire en attendant version 2.20 de gtk
b = gtk.Button()
b.connect('clicked', self.on_new_tab)
function.add_icon_to_button(b, gtk.STOCK_ADD)
page = gtk.HBox()
book.append_page(page, b)
self.book = book
self._handler_id['book'] = book.connect("switch_page", self.on_switch_page)
# ligne pour les messages
hbox = gtk.HBox()
hbox.set_property('border_width', 4)
hbox.set_size_request(-1, 55)
self.message.ini_message(hbox) # évite pb avec singleton
self.bottom_info_box = hbox
self.main_box.pack_start(book, True, True, 0)
self.main_box.pack_start(hbox, False, True, 0)
self.main_box.show_all()
return book
def _ini_drawing_page(self, position):
"""Initialisation d'un onglet de dessin"""
#print "Main::_ini_drawing_page"
book = self.main_box.get_children()[0]
# initialisation
if not isinstance(book, gtk.Notebook):
book = self._ini_application()
self._set_buttons_ini()
book.disconnect(self._handler_id["book"])
tab = self._add_book_page(book, position)
# scrolling arrows for notebook
n_pages = book.get_n_pages()
#if n_pages >= 2:
# book.set_show_tabs(True)
self._handler_id['book'] = book.connect("switch_page", self.on_switch_page)
area = tab.area
area.set_flags(gtk.CAN_FOCUS)
area.grab_focus()
area.connect("size-allocate", tab.configure_event)
area.connect("expose-event", tab.expose_event)
tab.handler_area = area.connect("motion-notify-event", self.motion_notify_event)
area.connect("leave-notify-event", self.leave_notify_event)
area.connect("button-press-event", self.button_press_event)
area.connect("button-release-event", self._button_release_event)
area.connect("key-press-event", self._key_press_event)
area.connect("key-release-event", self._key_release_event)
def _add_book_page(self, book, position):
"""Ajoute une page au notebook des dessins"""
#print "Main::_add_book_page"
tab = classDrawing.Tab(self)
self.active_tab = tab
vbox = gtk.VBox(False, 0)
w = self.window.allocation.width-180
hpaned = gtk.HPaned()
hpaned.set_position(w)
hpaned.add1(tab.sw)
menu_button = self.builder.get_object("menu_cas")
if menu_button.get_active() == True:
sw = self._make_combi_box(tab)
hpaned.pack2(sw, False)
#hpaned.add2(sw)
else:
tab.right_menu = None
hpaned.show()
vbox.pack_start(hpaned, True, True, 0)
vbox.show()
#eventbox = gtk.EventBox()
tab_box = gtk.HBox(False, 2)
tab_label = gtk.Label() # gérer en fonction de la longueur dispo
tab_label.set_padding(4, 0)
tab.title = tab_label
close_b = gtk.Button()
close_b.connect('clicked', self._on_remove_page, book, vbox)
function.add_icon_to_button(close_b, gtk.STOCK_CLOSE)
tab_box.pack_start(tab_label, False)
tab_box.pack_start(close_b, False)
tab_box.show_all()
#eventbox.add(tab_box)
self._tabs.insert(position, tab)
book.insert_page(vbox, tab_box, position)
# à tester fonctionne pour les widgets mais évidemment pas pour les Tab
#book.set_tab_reorderable(vbox, True)
book.set_current_page(position)
return tab
def _remove_page(self, book, n_page):
"""Fonction de suppression de page
Attention le notebook n'est pas actualisé
avant le changement de page qui suit"""
studies = self.studies
tabs = self._tabs
closed_tab = tabs[n_page]
opened_studies = [] # études ouvertes sur une autre page
for tab in tabs:
if tab is closed_tab:
continue
drawings = tab.drawings
for drawing in drawings.values():
id_study = drawing.id_study
if not id_study in opened_studies:
opened_studies.append(id_study)
drawings = closed_tab.drawings
for drawing in drawings.values():
id_study = drawing.id_study
if id_study in opened_studies:
continue
try:
del (studies[id_study])
except KeyError:
continue
if hasattr(self, "editor"):
ed_data = self.editor.data_editors
for id in ed_data.keys():
if id in opened_studies:
continue
del(self.editor.data_editors[id])
del(self._tabs[n_page])
frame = book.get_nth_page(n_page)
# on déconnecte le changement de page pour éviter un numéro de page éroné
book.disconnect(self._handler_id["book"])
book.remove(frame)
self._handler_id['book'] = book.connect("switch_page", self.on_switch_page)
n_page = book.get_current_page()
n_pages = book.get_n_pages()
if n_page == n_pages-1:
book.set_current_page(n_page-1)
def _on_remove_page(self, button, book, frame):
"""Méthode de suppression d'une page"""
n_page = book.page_num(frame)
n_pages = self.book.get_n_pages()
if n_pages == 2:
return
tab = self._tabs[n_page]
if hasattr(self, "editor"):
for drawing in tab.drawings.values():
try:
ed_data = self.editor.data_editors[drawing.id_study]
except KeyError:
continue
if ed_data.is_changed:
if file_tools.exit_as_ok_func2("Enregistrer le fichier '%s'?" % ed_data.name):
if ed_data.path is None:
path = file_tools.recursive_file_select(self.UP.get_default_path())
if not path is None:
ed_data.path = path
self.editor.save_study(ed_data)
self._remove_page(book, n_page)
def on_switch_page(self, widget=None, page=None, n=0):
"""Gestionnaire des évènements lors du changement de page du notebook"""
#print 'Main::on_switch_page', n
book = self.book
n_pages = book.get_n_pages()
if n == n_pages-1:
book.stop_emission("switch-page")
return
self.active_tab = tab = self._tabs[n]
drawing = tab.active_drawing
if drawing is None:
rdm_status = 0
errors = []
else:
id_study = drawing.id_study
study = self.studies[id_study]
rdm_status = study.rdm.status
errors = study.rdm.errors
# mise à jour de l'éditeur
if hasattr(self, "editor"):
if self.editor.w2.window is None:
del (self.editor)
else:
self._update_editor()
self._set_buttons_rdm(rdm_status)
self._update_titles()
self._show_message(errors, False)
# -----------------------------------------------------------
#
# Méthodes relatives aux évènements
#
# -----------------------------------------------------------
def _key_release_event(self, widget, event):
tab = self.active_tab
key = gtk.gdk.keyval_name (event.keyval)
if key == 'Control_L':
self.key_press = False
event = gtk.gdk.Event(gtk.gdk.MOTION_NOTIFY)
tab.area.emit("motion-notify-event", event)
# attention si la fenetre de pybar n'a pas le focus, les événements clavier ne sont pas interceptés alors que les évènements souris le sont.
def _key_press_event(self, widget, event):
key = gtk.gdk.keyval_name (event.keyval)
tab = self.active_tab
is_selected = tab.is_selected
if key == 'Control_L':
self.key_press = True
elif key == 'Escape':
tab.is_selected = False
tab.remove_tools_box()
tab.area.window.set_cursor(None)
tab.new_surface(tab.area_w, tab.area_h)
tab.paint_all_struct(tab.cr, None, 1.)
tab.area.queue_draw()
elif key == 'Return':
if not is_selected:
return
selected = is_selected[0]
if selected == 'draw':
drawing = is_selected[1]
tab.active_drawing = drawing
tab.do_new_drawing(False)
self._update_combi_box()
elif key == 'Delete':
if not is_selected:
return
selected = is_selected[0]
if selected == 'value':
drawing, n_case, legend = is_selected[1:]
self.on_hide_value(None, drawing, n_case, legend)
def _button_release_event(self, widget, event):
self.active_tab.finish_dnd(event, self.is_press)
self.is_press = False
def motion_notify_event(self, area, event):
self.active_tab.motion_notify_action(area, event, self.is_press)
# mettre une info ici
def leave_notify_event(self, layout, event):
"""événement : le curseur quitte la zone du layout"""
if not self.is_press is False:
return
tab = self.active_tab
tab.new_surface(tab.area_w, tab.area_h)
tab.paint_all_struct(tab.cr, None, 1.)
layout.queue_draw() # emit ne marche pas ici
def button_press_event(self, widget, event):
#print "button_press_event"
tab = self.active_tab
try:
obj_selected = tab.is_selected
except AttributeError:
obj_selected = False
if event.type == gtk.gdk.BUTTON_PRESS:
watch = gtk.gdk.Cursor(gtk.gdk.FLEUR)
if event.button == 1:
self.is_press = (event.x, event.y)
tab.motion = (0, 0) # provisoire, en attendant mieux
if obj_selected is False:
return
drawing = obj_selected[1]
status = drawing.status
if obj_selected[0] == 'entry':
entry = obj_selected[2]
destroy_ev = gtk.gdk.Event(gtk.gdk.DESTROY)
entry.emit("event", destroy_ev)
tab.remove_entry_box()
tab.remove_tools_box()
tab.is_selected = ('draw', drawing)
#notify = gtk.gdk.Event(gtk.gdk.MOTION_NOTIFY)
#tab.layout_motion_event(tab.area, notify) # emit ne fonctionne pas ici
return
if obj_selected[0] == 'curve':
self._select_curve(drawing, obj_selected[2])
return
elif obj_selected[0] == 'draw':
tab.area.window.set_cursor(watch)
self._select_drawing(obj_selected[1])
return
elif obj_selected[0] == 'info':
tab.area.window.set_cursor(watch)
return
elif obj_selected[0] == 'value':
tab.area.window.set_cursor(watch)
return
elif obj_selected[0] == 'node':
content = tab.get_message()
self.message.set_message(content)
return
elif obj_selected[0] == 'bar':
content = tab.get_message()
self.message.set_message(content)
return
elif event.button == 3:
#self.is_press = (event.x, event.y)
x, y = event.x, event.y
if obj_selected is False:
self._create_menu5(event, x, y)
return
drawing = obj_selected[1]
if obj_selected[0] == 'value':
self._create_menu7(event, obj_selected[1], obj_selected[2], obj_selected[3])
return
if obj_selected[0] == 'curve':
self._create_menu6(event, obj_selected[1], obj_selected[2], obj_selected[4])
return
if obj_selected[0] == 'node':
widget.window.set_cursor(None)
node = obj_selected[2]
# ajouter ici les menus pour les noeuds
self._create_menu3(event, drawing, node)
return
if obj_selected[0] == 'bar':
widget.window.set_cursor(None)
self._create_menu2(event, obj_selected[2])
return
self._create_menu1(event, obj_selected[1])
return
elif event.type == gtk.gdk._2BUTTON_PRESS:
if obj_selected is False:
return
drawing = obj_selected[1]
status = drawing.status
if obj_selected[0] == 'node':
return
# ajouter ici les menus pour les noeuds
if obj_selected[0] == 'bar':
self.on_bar_select(None, barre=obj_selected[2])
return
if obj_selected[0] == 'curve':
self._select_curve(drawing, obj_selected[2])
return
if obj_selected[0] == 'info':
if not drawing.title_id == obj_selected[2]:
return
self._on_edit_title(drawing, obj_selected[2])
return
if obj_selected[0] == 'value':
self._on_edit_value(drawing, obj_selected[2], obj_selected[3])
return
# double clic
def on_delete_value(self, widget, drawing, n_curve, legend):
"""Supprime une valeur sur une courbe"""
drawing.delete_value(n_curve, legend)
drawing.s_case = n_curve
drawing.del_patterns()
tab = self.active_tab
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
def on_hide_value(self, widget, drawing, n_curve, legend):
"""Cache une valeur sur une courbe"""
drawing.set_hide_value(n_curve, legend)
drawing.s_case = n_curve
drawing.del_patterns()
tab = self.active_tab
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
def on_set_anchor(self, widget, drawing, n_curve, obj):
"""Ancre une valeur sur le dessin"""
user_values = drawing.user_values
tab = self.active_tab
is_selected = tab.is_selected
barre = is_selected[3]
if not drawing.status in user_values:
user_values[drawing.status] = {}
values = user_values[drawing.status]
if not n_curve in values:
values[n_curve] = {}
if not barre in values[n_curve]:
values[n_curve][barre] = {}
value = values[n_curve][barre]
pos = obj.is_selected[2]
if pos is None:
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
arc = rdm.struct.Curves[barre]
pos = arc.get_curve_abs(obj.is_selected[1], obj.is_selected[0], rdm.struct.Lengths)
#pos = arc.pos[obj.is_selected[1]]
value[pos] = {0: (0, 0, False)} # dx, dy, hidden
drawing.del_patterns()
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
def on_display_value(self, widget, drawing, n_curve):
"""Affiche les valeurs sur la courbe n_curve"""
if widget.get_active():
drawing.s_values.append(n_curve)
drawing.restore_values(n_curve)
else:
# provisoire : astuce pour remettre les valeurs de la courbe s_curve
if drawing.s_curve == n_curve:
drawing.restore_values(n_curve)
try:
drawing.s_values.remove(n_curve)
except ValueError:
pass
self._do_new_drawing()
def on_display_char(self, widget, drawing, n_curve, curve=None):
"""Ouvre un dessin du chargement"""
tab = self.active_tab
#drawing = tab.active_drawing
drawing.s_case = n_curve
tab.add_char_drawing(drawing)
def on_select_curve(self, widget, drawing, n_curve, obj):
"""Sélectionne une courbe sur un dessin depuis un menu"""
self._select_curve(drawing, n_curve)
def _select_curve(self, drawing, n_curve):
"""Sélectionne une courbe sur un dessin"""
#print "_select_curve", drawing.id
tab = self.active_tab
tab.active_drawing = drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
if drawing.status == 8:
drawing.s_influ = n_curve
content = drawing.get_influ_message(study, n_curve)
self.message.set_message(content)
return
drawing.s_curve = n_curve
drawing.del_patterns()
tab.del_surface()
# actualisation dessin de chargement si il existe
#char_drawing = drawing.char_drawing
key = drawing.get_char_drawing()
if not key is None:
child = drawing.childs[key]
child.s_case = n_curve
child.del_patterns()
tab.configure_event(tab.area)
tab.area.queue_draw()
content = tab.get_char_message(rdm, n_curve)
self.message.set_message(content)
def _on_edit_value(self, drawing, n_case, legend):
"""Modification de la position en x (sur la barre) de la légende"""
tab = self.active_tab
tab.on_show_value_box(drawing, n_case, legend)
def _on_edit_title(self, drawing, info_id):
"""Action de modification du titre d'un dessin"""
#print "_on_edit_title", info_id
tab = self.active_tab
tab.on_show_title_box(drawing)
def on_select_drawing(self, widget, drawing):
"""Sélectionne le diagramme"""
#print "on_select_drawing"
self._select_drawing(drawing)
def _select_drawing(self, drawing):
tab = self.active_tab
prec_drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
if drawing.get_is_char_drawing():
tab.do_new_drawing(False)
content = tab.get_char_message(rdm, drawing.s_case)
self.message.set_message(content)
return
tab.active_drawing = drawing
tab.do_new_drawing(False)
self._fill_right_menu()
self._update_combi_box()
# maj de l'éditeur
if hasattr(self, "editor") and not (prec_drawing is drawing):
self._update_editor()
self._update_titles()
self._show_message(rdm.errors, False)
self._set_buttons_rdm(rdm.status)
def on_select_bars(self, widget, drawing):
"""Lance l'ouverture de la fenetre de choix des barres et remplace l'set s_influ_bars"""
#tab = self.active_tab
#drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
bars = rdm.struct.GetBarsNames()
# trier barre ?? XXX
#bars.sort()
try:
s_influ_bars = drawing.s_influ_bars
except AttributeError:
s_influ_bars = []
bars = file_tools.open_dialog_bars(bars, s_influ_bars)
if bars is False or bars == []:
return
drawing.s_influ_bars = bars
self._fill_right_menu()
self._do_new_drawing()
def on_node_display(self, widget, drawing):
"""Relance un affichage en fonction de l'état de l'option"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.options['Node'] = widget.get_active()
self._do_new_drawing()
def on_barre_display(self, widget, drawing):
"""Relance un affichage en fonction de l'état de l'option"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.options['Barre'] = widget.get_active()
self._do_new_drawing()
def on_axis_display(self, widget, drawing):
"""Relance un affichage en fonction de l'état de l'option"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.options['Axis'] = widget.get_active()
self._do_new_drawing()
def on_title_display(self, widget, drawing):
"""Affichage du titre du dessin"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.set_title_visibility(widget.get_active())
drawing.options['Title'] = widget.get_active()
self._do_new_drawing()
def on_series_display(self, widget, drawing):
"""Affiche les légendes des courbes"""
tab = self.active_tab
tab.active_drawing = drawing
drawing.set_series_visibility(widget.get_active())
drawing.options['Series'] = widget.get_active()
self._do_new_drawing()
def on_synchronise(self, widget, drawing):
drawing.options['Sync'] = widget.get_active()
if widget.get_active():
drawing.s_cases = drawing.parent.s_cases
drawing.s_case = drawing.parent.s_case
else:
drawing.s_cases = copy.copy(drawing.parent.s_cases)
self._do_new_drawing()
self._fill_right_menu()
self._update_combi_box()
def on_add_sigma_drawing(self, widget, drawing):
"""Ajoute un dessin des contraintes normales"""
tab = self.active_tab
id_study = drawing.id_study
study = self.studies[id_study]
tab.add_sigma_drawing(drawing, study)
def on_add_drawing(self, widget, drawing):
"""Ajoute un diagramme à partir du diagramme sélectionné"""
#print "on_add_drawing"
tab = self.active_tab
id_study = drawing.id_study
study = self.studies[id_study]
tab.add_drawing(drawing, study)
self._fill_right_menu()
self._update_combi_box()
def save_drawing_prefs(self, study):
""""Sauve les préférences du dessin de l'étude study"""
#print "save_drawing_prefs"
id_study = study.id
tab = self.active_tab
rdm = study.rdm
if isinstance(rdm, classRdm.EmptyRdm):
return
xml = rdm.struct.XML
prefs = xml.getiterator('draw')
root = xml.getroot()
if len(prefs) == 1:
node = prefs[0]
root.remove(node)
drawing_pref = ET.SubElement(root, "draw", {"id": "prefs"})
for drawing in tab.drawings.values():
if not drawing.id_study == id_study:
continue
if not drawing.get_is_parent():
continue
node1 = drawing.get_xml_prefs(drawing_pref)
for key in drawing.childs:
d = drawing.childs[key]
node2 = d.get_xml_prefs(node1)
path = study.path
if path is None:
return
function.indent(root)
#print ET.tostring(root)
#return
try:
xml.write(path, encoding="UTF-8", xml_declaration=True)
except IOError:
print "Ecriture impossible dans %s" % path
def on_save_drawings(self, widget, drawing):
"""Enregistre l'état de l'étude (graphes et préférences)"""
tab = self.active_tab
id_study = drawing.id_study
study = self.studies[id_study]
self.save_drawing_prefs(study)
tab.remove_drawings_by_study(drawing)
self._fill_right_menu()
self._update_combi_box()
if hasattr(self, "editor"):
try:
del(self.editor.data_editors[id_study])
except KeyError:
pass
self._update_editor()
self._update_titles()
drawing = tab.active_drawing
if drawing is None:
status = 2
else:
status = study.rdm.status
self._set_buttons_rdm(status)
def on_del_drawing(self, widget, drawing):
"""Supprime le diagramme sélectionné"""
#print "on_del_drawing", len(self.studies), drawing.id_study
tab = self.active_tab
id_study = drawing.id_study
study = self.studies[id_study]
if drawing.get_is_parent():
self.save_drawing_prefs(study)
tab.remove_drawing(drawing)
self._fill_right_menu()
self._update_combi_box()
if hasattr(self, "editor"):
try:
del(self.editor.data_editors[id_study])
except KeyError:
pass
self._update_editor()
self._update_titles()
drawing = tab.active_drawing
if drawing is None:
status = 2
else:
status = study.rdm.status
self._set_buttons_rdm(status)
def on_bar_select(self, widget, barre):
tab = self.active_tab
drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
li = drawing.get_bar_drawings()
for key in li:
child = drawing.childs[key]
child.draw_new_bar(tab, study.rdm.struct, barre.name)
drawing.s_bar = barre.name
def on_del_influ(self, widget, drawing, n, curve):
"""Efface une courbe de ligne d'influence donnée par n"""
try:
del(drawing.user_values[drawing.status][n])
except KeyError:
pass
del(drawing.influ_list[n])
drawing.s_influ = None
self._do_new_drawing()
def open_node_dialog(self, widget, node):
"""Clic droit sur un noeud à terminer ou supprimer"""
pass
# --------------------------------------------------
#
# Menus contextuels
#
# --------------------------------------------------
def _create_menu2(self, event, barre):
"""Crée et affiche le menu contextuel survol barre"""
tab = self.active_tab
drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
menu_cont = classCMenu.CMenu(self)
menu_cont.menu2(barre, drawing, rdm)
popup_menu = menu_cont.uimanager.get_widget('/popup')
popup_menu.popup(None, None, None, event.button, event.time)
tab.sw.window.set_cursor(None)
def _create_menu1(self, event, drawing):
"""Crée et affiche le menu contextuel pour le survol zone drawing
- Afficher noeuds
- Afficher barres
- Afficher options drawing (sélectionner, supprimer, ajouter)
- Afficher les cas et combinaisons si nécessaire"""
study = self.studies[drawing.id_study]
rdm = study.rdm
menu_cont = classCMenu.CMenu(self)
menu_cont.get_menu1(drawing, rdm)
popup_menu = menu_cont.uimanager.get_widget('/popup')
popup_menu.popup(None, None, None, event.button, event.time)
# désactivé, ne pas effacer
def _create_menu3(self, event, drawing, node):
"""Menu contextuel survol des noeuds"""
menu_cont = classCMenu.CMenu(self)
menu_cont.get_menu3(node, drawing)
popup_menu = menu_cont.uimanager.get_widget('/popup')
if not popup_menu == None:
popup_menu.popup(None, None, None, event.button, event.time)
def _create_menu4(self, event, chart):
"""Menu contextuel survol"""
pass
def _create_menu5(self, event, x, y):
"""Menu contextuel survol zone vide"""
menu_cont = classCMenu.CMenu(self)
menu_cont.get_menu5((x, y))
popup_menu = menu_cont.uimanager.get_widget('/popup')
if not popup_menu == None:
popup_menu.popup(None, None, None, event.button, event.time)
def _create_menu6(self, event, drawing, n_curve, curve):
"""Menu contextuel survol courbe"""
#print "menu6", n_curve, curve
status = drawing.status
menu_cont = classCMenu.CMenu(self)
if status == 8:
menu_cont.get_menu4(drawing, n_curve, curve)
else:
menu_cont.get_menu6(drawing, n_curve, curve)
popup_menu = menu_cont.uimanager.get_widget('/popup')
if not popup_menu == None:
popup_menu.popup(None, None, None, event.button, event.time)
# voir utilité des args
def _create_menu7(self, event, drawing, n_curve, legend):
"""Menu contextuel survol valeur courbe"""
menu_cont = classCMenu.CMenu(self)
menu_cont.menu7(drawing, n_curve, legend)
popup_menu = menu_cont.uimanager.get_widget('/popup')
if not popup_menu == None:
popup_menu.popup(None, None, None, event.button, event.time)
# -----------------------------------------------------------
#
# Méthodes relatives au menu des combinaisons et cas
#
# -----------------------------------------------------------
def _click_close_combi(self, widget):
"""Gère l'évènement de fermeture de la boite des combinaisons"""
#print 'Main::_click_close_combi'
menu_button = self.builder.get_object("menu_cas")
menu_button.set_active(False)
def _close_combi(self):
"""Ferme la boite de gestion des combinaisons"""
book = self.book
n_pages = book.get_n_pages()
for i in range(n_pages-1): # dernier onglet = bouton
page = book.get_nth_page(i)
hbox = page.get_children()[0]
child = hbox.get_children()
if not len(child) == 2:
break
sw = child[1]
hbox.remove(sw)
tab = self._tabs[i]
tab.right_menu = None
def _open_combi(self, widget=None):
"""Ouvre la boite de gestion des combinaisons"""
#print 'Main::_open_combi'
book = self.book
n_pages = book.get_n_pages()
for i in range(n_pages-1): # dernier onglet pour bouton
book_page = book.get_nth_page(i)
paned = book_page.get_children()[0]
tab = self._tabs[i]
sw = self._make_combi_box(tab)
paned.add2(sw)
self._fill_right_menu(i)
self._update_combi_box()
def _manage_combi_window(self, widget):
"""Gère l'évènement d'ouverture ou de fermeture de la fenetre des combi"""
#print "Main::_manage_combi_window"
if not widget.get_active():
self._close_combi()
else:
self._open_combi()
def _make_combi_box(self, tab):
"""Crée la boite pour les combi (sw, bouton fermeture, box pour contenu
Retourne la zone (box) pour le contenu"""
#print "_make_combi_box"
sw = gtk.ScrolledWindow()
sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
pbox = gtk.VBox(False, 0)
# close button
align = gtk.Alignment(1, 1, 0, 0)
image = gtk.Image()
image.set_from_stock(gtk.STOCK_CLOSE, gtk.ICON_SIZE_MENU)
button = gtk.Button()
button.set_relief(gtk.RELIEF_NONE)
button.connect('clicked', self._click_close_combi)
button.add(image)
align.add(button)
pbox.pack_start(align, False, False, 2)
# combi and cas in box
sw.add_with_viewport(pbox)
sw.show_all()
tab.right_menu = pbox
return sw
def _fill_right_menu(self, n=None):
"""Supprime et crée un nouveau contenu dans la boite des menu de droite"""
#print "_fill_right_menu"
if n is None:
tab = self.active_tab
else:
tab = self._tabs[n]
box = tab.right_menu
if box is None:
return
drawing = tab.active_drawing
if drawing is None:
self._fill_combi_menu(tab, box)
elif drawing.status == 8:
self._fill_influ_menu(tab, box)
else:
self._fill_combi_menu(tab, box)
def _fill_combi_menu(self, tab, box):
"""Supprime et crée un nouveau contenu dans la boite des combinaisons"""
#print "_fill_combi_menu"
drawing = tab.active_drawing
childs = box.get_children()
try:
child = childs[1]
box.remove(child)
except IndexError:
pass
if drawing is None:
return
pbox = CombiBox(False, 0)
box.pack_start(pbox, False, False, 0)
study = self.studies[drawing.id_study]
pbox.fill_box(study, self)
# renommer
def _update_combi_box(self):
"""Positionne la sensibilité et l'activité des boutons des cas et combis en fonction du status et des erreurs rencontrées"""
#print "_update_combi_box"
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
box = tab.right_menu
if box is None:
return
if drawing.status == 8:
box.set_sensitive(True)
return
study = self.studies[drawing.id_study]
rdm = study.rdm
view = drawing.get_combi_view(rdm, self._get_has_textview())
if view is None:
box.set_sensitive(False)
return
box.set_sensitive(True)
pbox = box.get_children()[1]
buttons = pbox.get_children()
i = 0
for button in buttons:
if not isinstance(button, CombiButton):
continue
ind = int(button.get_name())
button.handler_block(pbox.handler_list[ind])
etat = view[i]
button.set_active(etat[0])
button.set_sensitive(etat[1])
button.handler_unblock(pbox.handler_list[ind])
i += 1
def _event_combi_radio(self, widget):
"""Fonctionnement des boutons des combis en mode radio"""
#print "_event_combi_radio"
tab = self.active_tab
box = tab.right_menu.get_children()[1]
drawing = tab.active_drawing
if widget.get_active():
buttons = box.get_children()
for button in buttons:
if not isinstance(button, CombiButton):
continue
ind = int(button.name)
if widget is button:
drawing.s_case = ind
drawing.s_curve = ind
continue
button.handler_block(box.handler_list[ind])
button.set_active(False)
button.handler_unblock(box.handler_list[ind])
return True
ind = int(widget.name)
widget.handler_block(box.handler_list[ind])
widget.set_active(True)
widget.handler_unblock(box.handler_list[ind])
return False
def _event_combi_check(self, widget, n_case):
"""Fonctionnement des boutons des combis en mode case à cocher"""
#print "_event_combi_check", n_case
tab = self.active_tab
drawing = tab.active_drawing
study = self.studies[drawing.id_study]
rdm = study.rdm
s_cases = drawing.s_cases
if widget.get_active():
if not n_case in s_cases:
s_cases.append(n_case)
drawing.s_curve = n_case
else:
if n_case in s_cases:
s_cases.remove(n_case)
try:
drawing.s_curve = s_cases[0]
except IndexError:
drawing.s_curve = None
s_cases.sort()
content = tab.get_char_message(rdm, drawing.s_curve)
self.message.set_message(content)
def event_menu_button(self, widget, drawing, n_case):
"""Evènement sur un bouton à cocher de combinaisons depuis le menu contextuel"""
#tab = self.active_tab
#drawing = tab.active_drawing
status = drawing.status
if status in [0, 2, 3]:
drawing.s_case = n_case
else:
self._event_combi_check(widget, n_case)
self._do_new_drawing()
def event_combi_button(self, widget, n_case):
"""Evènement sur un bouton à cocher de combinaisons"""
tab = self.active_tab
drawing = tab.active_drawing
status = drawing.status
study = self.studies[drawing.id_study]
sw = tab.sw
if self._get_has_textview() == True:
is_drawing = False
else:
is_drawing = True
# mode radio
if not is_drawing or status in [0, 2, 3]:
if not self._event_combi_radio(widget):
return
# mode checkbutton
else:
self._event_combi_check(widget, n_case)
if is_drawing:
self._do_new_drawing()
else:
textview = sw.get_child().get_children()[0]
self._print_message(textview)
# Maj sensibilité boutons
self._set_buttons_rdm(study.rdm.status)
# -----------------------------------------------------------
#
# Méthodes relatives à la mise à jour des boutons et titre
#
# -----------------------------------------------------------
def _update_titles(self):
"""Affichage du titre de la zone de dessin"""
#print "Main::update_titles"
book = self.book
w1 = self.window
tab = self.active_tab
tab_label = tab.title
drawing = tab.active_drawing
if drawing is None:
w1.set_title("pyBar")
tab_label.set_text("(Vide)")
tab_label.set_tooltip_text("")
return
status = drawing.status
study = self.studies[drawing.id_study]
name = study.name
path = study.path
tab_label.set_text(name)
if not path is None:
tab_label.set_tooltip_text(path)
titre = "%s - " % name
if status == 0:
titre += "Noeuds"
elif status == 1:
titre += "Barres"
elif status == 2:
titre += "Chargement"
elif status == 3:
titre += "Réaction d'appuis"
elif status == 4:
titre += "Effort normal"
elif status == 5:
titre += "Effort tranchant"
elif status == 6:
titre += "Moment fléchissant"
elif status == 7:
titre += "Déformée"
elif status == 8:
titre += "Ligne d'influence"
w1.set_title("pyBar - %s" % titre)
def _set_buttons_ini(self):
"""Activation des boutons après la page d'accueil"""
items = [
"menu_save",
"menu_save_as",
"menu_save_copy",
"menu_export",
"menu_reload",
"menu_cas",
"button_export",
"button_zoom_best",
"button_zoom_more",
"button_zoom_less",
"button_chart_less",
"button_chart_more",
]
# activation des boutons
for item in items:
widget = self.builder.get_object(item)
widget.set_sensitive(True)
def _set_buttons_rdm(self, rdm_status):
"""Fonction qui sert à modifier la sensibilité des boutons
en fonction de l'état de l'objet rdm"""
#print "_set_buttons_rdm", rdm_status
items1 = ["button_ddl",
"menu_ddl",
"button_eq",
"menu_eq",
"button_barre",
"menu_barre",
"menu_char",
"button_char",
"menu_degree",
"menu_reac",
"button_reac",
"menu_n",
"button_n",
"menu_v",
"button_v",
"menu_m",
"button_m",
"menu_defo",
"button_defo",
"menu_influ",
"button_influ",
]
items2 = ["button_build",
"menu_build"
]
items3 = ["button_editor",
"button_error",
"menu_editor",
"menu_error",
]
# activation des boutons
if rdm_status == 2:
status = True
else:
status = False
for item in items1:
widget = self.builder.get_object(item)
widget.set_sensitive(status)
if rdm_status == -1:
status = False
else:
status = True
for item in items2:
widget = self.builder.get_object(item)
widget.set_sensitive(status)
if rdm_status == -1:
status = False
else:
status = True
for item in items3:
widget = self.builder.get_object(item)
widget.set_sensitive(status)
# -----------------------------------------------------------
#
# Méthodes relatives aux actions sur des boutons
#
# -----------------------------------------------------------
def on_zoom_more(self, widget):
"""Agrandit la taille du drawing_area"""
tab = self.active_tab
if not tab.status == 0:
return
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
drawing.set_zoom("+")
drawing.set_scale(study.rdm.struct)
tab.get_layout_size({drawing.id: drawing})
drawing.del_patterns()
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
def on_zoom_100(self, widget):
"""Agrandit la taille du drawing_area"""
tab = self.active_tab
if not tab.status == 0:
return
drawing = tab.active_drawing
if drawing is None:
return
status = drawing.status
study = self.studies[drawing.id_study]
w, h = drawing.width, drawing.height
m = Const.AREA_MARGIN_MIN
sw = tab.sw
sw_w = float(sw.get_hadjustment().page_size) - 2*m
sw_h = float(sw.get_vadjustment().page_size) - 2*m
if w == 0 and h == 0:
return
if w == 0:
coef = sw_h/h
elif h == 0:
coef = sw_w/w
else:
coef = min(sw_w/w, sw_h/h)
drawing.zoom_best(coef, study.rdm.struct)
tab.get_layout_size(tab.drawings)
drawing.del_patterns()
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
def on_zoom_less(self, widget):
"""Agrandit la taille du drawing_area"""
tab = self.active_tab
if not tab.status == 0:
return
drawing = tab.active_drawing
if drawing is None:
return
status = drawing.status
study = self.studies[drawing.id_study]
drawing.set_zoom("-")
drawing.set_scale(study.rdm.struct)
area_w, area_h = tab.area.size_request()
drawing.del_patterns()
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
def on_chart_zoom_more(self, widget):
tab = self.active_tab
if not tab.status == 0:
return
self._set_chart_zoom(None, 'more')
def on_chart_zoom_less(self, widget):
tab = self.active_tab
if not tab.status == 0:
return
self._set_chart_zoom(None, 'less')
def _set_chart_zoom(self, widget=None, tag='more'):
"""Augmente ou diminue la valeur du zoom du graphe"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
status = drawing.status
#zoom = drawing.chart_zoom
if tag == "more":
zoom = 1.2
else:
zoom = 1 / 1.2
if status in drawing.chart_zoom:
#try:
drawing.chart_zoom[status] *= zoom
else:
drawing.chart_zoom[status] = zoom
# return
self._do_new_drawing()
def on_open_file(self, widget=None, x=None, y=None):
"""Evènement d'ouverture d'une étude existante"""
try:
book = self.book
except AttributeError:
self._ini_drawing_page(0)
self._open_study()
self._update_titles()
if hasattr(self, "editor"):
self._update_editor()
def on_new_tab(self, widget):
"""Evènement d'ouverture d'un onglet"""
#print "on_new_tab"
try:
book = self.book
pos = book.get_n_pages() - 1
except AttributeError:
pos = 0
self._ini_drawing_page(pos)
if hasattr(self, "editor"):
self._update_editor()
self._update_titles()
def _write_save_file(self, file):
"""Ecriture du fichier de sauvegarde"""
#print "_write_save_file"
tab = self.active_tab
study = self.studies[tab.active_drawing.id_study]
content = study.rdm.struct.RawReadFile()
try:
f = open(file, 'w')
f.write(content)
f.close()
except IOError as e:
content = ("%s" % e, 0) # formatage obligatoire
classDialog.Message().set_message(content)
except:
content = ("Enregistrement impossible", 0)
classDialog.Message().set_message(content)
def _open_study(self):
"""Ouverture d'une étude dans l'onglet actif"""
def ConvertDXF2XML(path): # pour test
return path[-3:]+"dat"
book = self.book
page = book.get_current_page()
tab = self.active_tab
path = self.UP.get_default_path()
path = file_tools.file_selection(path)
if not path:
return False
if os.path.splitext(path)[1].lower() == '.dxf':
#path = ConvertDXF2XML(path) # convertit et retourne le chemin du .dat
if path is None:
return False
self.UP.save_default_path(os.path.dirname(path))
if self._file_is_closed(path):
study, drawings = tab.add_study(path, self.options)
if drawings == []:
self._show_message(study.rdm.errors)
return
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
rdm = study.rdm
self._fill_right_menu()
self._update_combi_box()
rdm_status = rdm.status
self._set_buttons_rdm(rdm_status)
if not rdm_status in [-1, 0]:
self._write_save_file('%s.dat~' % path[:-4])
self._show_message(rdm.errors)
else:
file_tools.open_as_ok_func(path)
def on_save(self, widget=None):
"""Evènement d'enregistrement d'une étude modifiée"""
#print "Main::_on_save"
if not hasattr(self, 'editor'):
content = ("Etude déjà enregistrée ou vide", 2)
classDialog.Message().set_message(content)
return
if not hasattr(self.editor, 'w2'):
content = ("Etude déjà enregistrée", 2)
classDialog.Message().set_message(content)
return
win = self.editor.w2.window
if win is None:
content = ("Etude déjà enregistrée", 2)
classDialog.Message().set_message(content)
return
self.update_from_editor()
def on_save_as(self, widget):
"""Enregistre une étude et l'ouvre à la place de l'étude précédente"""
#print "on_save_as"
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
if isinstance(rdm, classRdm.EmptyRdm):
self.on_save()
return
content = rdm.struct.RawReadFile()
path = file_tools.file_save(self.UP.get_default_path())
if not path:
return
self.UP.save_default_path(os.path.dirname(path))
if not file_tools.save_as_ok_func(path):
return
try:
f = open(path, 'w')
f.write(content)
f.close()
except IOError as e:
content = ("%s" % e, 0) # formatage obligatoire
classDialog.Message().set_message(content)
return
name = os.path.basename(path)
if self._file_is_closed(path):
rdm.struct.RenameObject(path)
study.path = path
study.name = name
drawing.set_status(1)
self._do_new_drawing()
else:
content = ("Enregistrement impossible: étude déjà ouverte", 0)
classDialog.Message().set_message(content)
# mise à jour de la fenetre de l'éditeur
if hasattr(self, "editor"):
self._update_editor()
self._update_titles()
self._update_combi_box()
def on_save_copy(self, widget):
"""Enregistre une étude et l'ouvre à la place de l'étude précédente"""
#print "on_save_copy"
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
if isinstance(rdm, classRdm.EmptyRdm):
content = ("Impossible de copier une étude vide", 1)
classDialog.Message().set_message(content)
return
content = rdm.struct.RawReadFile()
path = file_tools.file_save(self.UP.get_default_path())
if not path:
return
self.UP.save_default_path(os.path.dirname(path))
if not file_tools.save_as_ok_func(path):
return
try:
f = open(path, 'w')
f.write(content)
f.close()
except IOError as e:
content = ("%s" % e, 0)
classDialog.Message().set_message(content)
def on_reload(self, widget):
"""Recharge l'étude active"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
if isinstance(rdm, classRdm.EmptyRdm):
return
structure = classRdm.StructureFile(study.path)
if structure.status == -1: # suppression fichier ou erreur
content = ("Une erreur est survenue durant le chargement", 0)
classDialog.Message().set_message(content)
return
study.rdm = classRdm.R_Structure(structure)
self._do_new_drawing()
if hasattr(self, "editor"):
self._update_editor()
self._update_titles()
self._fill_right_menu()
self._update_combi_box()
def on_new_file(self, widget):
"""Ouverture d'une nouvelle étude dans l'onglet actif"""
try:
book = self.book
except AttributeError:
self._ini_drawing_page(0)
gobject.idle_add(self.on_new_study) # permet à la zone de dessin de se mettre en place
def on_new_study(self, widget=None, x=None, y=None):
"""Ouverture d'une nouvelle étude dans l'onglet actif"""
book = self.book
current_page = book.get_current_page()
tab = self._tabs[current_page]
study, drawing = tab.add_empty_study(self.options, x, y)
if hasattr(tab, "surface"):
tab.del_surface()
tab.configure_event(tab.area)
else:
event = gtk.gdk.Event(gtk.gdk.CONFIGURE)
tab.area.emit("configure-event", event)
tab.area.queue_draw()
name = study.name
if hasattr(self, "editor"):
self._update_editor()
else:
self.editor = classEditor.Editor(study, self)
self.editor.w2.connect("delete-event", self._destroy_editor)
self._update_titles()
self._set_buttons_rdm(0)
self._fill_right_menu()
self._update_combi_box()
def _file_is_closed(self, path):
"""Vérifie si une étude de chemin path est déjà ouverte"""
#print "Main::_file_is_closed"
#for tab in self._tabs:
for study in self.studies.values():
if study.path == path:
return False
return True
def on_open_editor(self, widget):
"""Ouverture de l'éditeur"""
#print "Main::_open_editor"
if hasattr(self, 'editor') and not self.editor.w2 is None:
self.editor.w2.present()
else:
book = self.book
n_pages = book.get_n_pages()
current_page = book.get_current_page()
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
self.editor = classEditor.Editor(study, self)
self.editor.w2.connect("delete-event", self._destroy_editor)
#self.editor.record_button.connect("clicked", self.update_from_editor)
def on_edit_eq(self, widget):
"""Ouvre le textview pour l'affichage des équations"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
status = drawing.status
if not status in [4, 5, 6, 7, 8]:
return
if status == 8 and drawing.s_influ is None:
return
if not drawing.status == 8:
self._textview_commun()
self.active_tab.status = 3
self._clear_sw_content()
textview = self._add_textview()
self._print_message(textview)
def on_edit_error(self, widget):
"""Ouvre le textview pour les messages d'erreurs"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
#if not drawing.status == 8:
self._textview_commun()
self.active_tab.status = 2
self._clear_sw_content()
textview = self._add_textview()
self._print_message(textview)
def on_edit_ddl(self, widget):
"""Ouvre le textview pour les résultats numériques"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
self.active_tab.status = 1
self._textview_commun()
self._clear_sw_content()
textview = self._add_textview()
self._print_message(textview)
self._update_combi_box()
def _textview_commun(self):
tab = self.active_tab
box = tab.right_menu
if box is None:
return
pbox = box.get_children()[1]
if pbox.get_name() == 'influ':
self._fill_combi_menu(tab, box)
def _export_jpg(self, file, reso):
"""Exporte le tracé au format jpeg"""
tab = self.active_tab
area = tab.area
width = tab.area_w
height = tab.area_h
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, width, height)
colormap = gtk.gdk.colormap_get_system()
rect = gtk.gdk.Rectangle(0, 0, width, height)
tab.expose_event(area, rect)
try:
drawable = area.bin_window
except AttributeError:
return
pixbuf.get_from_drawable(drawable, colormap, 0, 0, 0, 0, width, height)
pixbuf.save(file, "jpeg", {"quality": str(reso)})
def _export_svg(self, file):
"""Exporte le tracé au format svg"""
self.active_tab.draw_svg_file(file)
def _export_png(self, file):
"""Exporte le tracé au format svg"""
self.active_tab.draw_png_file(file)
def on_export(self, widget):
"""Effectue une sauvegarde de l'écran au format jpg ou svg"""
tab = self.active_tab
if not tab.status == 0:
return
drawing = tab.active_drawing
try:
status = drawing.status
except AttributeError:
status = -1
if status == -1:
return
data = file_tools.file_export(self.UP.get_default_path())
if data is None:
return
file = data[0]
format = data[1]
if not file_tools.save_as_ok_func(file):
return
watch = gtk.gdk.Cursor(gtk.gdk.WATCH)
self.window.window.set_cursor(watch)
if format == 'JPEG':
reso = file_tools.open_dialog_resol()
if reso == False:
return
self._export_jpg(file, reso)
if format == 'PNG':
self._export_png(file)
elif format == 'SVG':
self._export_svg(file)
self.window.window.set_cursor(None)
def on_about(self, widget):
About()
def _get_info_version(self, value):
"""Vérifie la dernière version et lance Dialog - Return False"""
#print "_get_info_version", self.new_version, value
if self.new_version is None:
return True
if self.new_version is False:
return False
self._open_dialog_version(self.new_version)
return False
def _open_dialog_version(self, last):
"""Ouverture du Dialog de la nouvelle version"""
dialog = gtk.Dialog("Nouvelle version",
None,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
dialog.set_icon_from_file("glade/logo.png")
text = "La version %s de pyBar est disponible." % last
button = gtk.LinkButton(Const.DOWNLOAD_URL, text)
# todo ne fonctionne pas sous windows
button.set_relief(gtk.RELIEF_NONE)
button.connect('clicked', self._dialog_destroy)
button.set_border_width(20)
vbox = dialog.vbox
vbox.add(button)
button = gtk.CheckButton("Me le rappeler plus tard")
button.connect('clicked', self._set_version_pref)
vbox.add(button)
vbox.show_all()
result = dialog.run()
dialog.destroy()
def _set_version_pref(self, widget):
"""Enregistre la préférence pour la recherche de la nouvelle version"""
if widget.get_active():
self.UP.save_version(10)
else:
self.UP.save_version(0)
def _dialog_destroy(self, widget):
"""Fermeture du Dialog de la nouvelle version"""
widget.get_parent().get_parent().destroy()
def on_open_help(self, widget):
import webbrowser
try:
webbrowser.open(Const.HELP_URL)
except:
classDialog.Message().set_message("Erreur avec le navigateur", 0)
def on_edit_degree(self, widget):
"""Affiche le degré d'hyperstaticité"""
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
try:
deg = str(rdm.struct.CalculDegreH())
state = 2
except AttributeError:
deg = 'Une erreur est survenue'
state = 0
content = ("Degré d'hyperstaticité de la structure: %s" % deg, state)
classDialog.Message().set_message(content)
def _show_message(self, content, dialog=True):
#print "_show_message", content
errors = [i[0] for i in content if i[1] == 0]
warnings = [i[0] for i in content if i[1] == 1]
if errors:
self.message.set_message((errors[0], 0))
if dialog:
classDialog.Dialog(errors)
elif warnings:
self.message.set_message(('', 1))
else:
self.message.set_message(None)
# -----------------------------------------------------------
#
# Méthodes relatives au dessin
#
# -----------------------------------------------------------
def _do_new_drawing(self):
"""Lance une mise à jour de l'area sans refaire de calcul de l'instance rdm """
#print "Main::_do_new_drawing"
tab = self.active_tab
sw = tab.sw
if isinstance(sw.get_child().get_children()[0], gtk.TextView):
self._clear_sw_content()
self._add_drawing_widget()
tab.do_new_drawing(True)
def update_drawing(self, case_page=None):
"""Met à jour le dessin en status 0 depuis l'éditeur de données"""
#print "update_drawing", case_page
if self.editor.data_editor.need_drawing == False:
return
#print "update_drawing"
tab = self.active_tab
drawing = tab.active_drawing
if not drawing.parent is None:
drawing = drawing.parent
if not drawing.status == 0:
return
study = self.studies[drawing.id_study]
if case_page is None:
if not self.editor.xml_status == -1:
self.editor.data_editor.set_xml_structure()
study.rdm = classRdm.EmptyRdm(self.editor.data_editor.XML, self.editor.data_editor.name)
#root = self.editor.data_editor.XML.getroot()
#function.indent(root)
#print ET.tostring(root)
self._fill_right_menu()
self._update_combi_box()
self._set_buttons_rdm(study.rdm.status)
self.editor.data_editor.need_drawing = False
else:
drawing.s_case = case_page
self._fill_right_menu()
self._update_combi_box()
tab.do_new_drawing2(study, drawing)
self.message.set_message(("Enregistrer l'étude pour continuer", 1))
def on_dynamic_expose(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
study = self.studies[drawing.id_study]
rdm = study.rdm
try:
n_cases = rdm.n_cases
except AttributeError:
n_cases = 1
if drawing.s_case > n_cases-1:
drawing.s_case = 0
if hasattr(self, "editor"):
ed_data = self.editor.data_editors[drawing.id_study]
drawing.status = 0
if ed_data.is_changed:
self.update_drawing()
else:
self._expose_commun(0)
self._update_combi_box()
self._update_titles()
else:
self._expose_commun(0)
self._update_combi_box()
self._update_titles()
def on_bar_expose(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(1)
self._update_combi_box()
self._update_titles()
def on_char_expose(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(2)
self._update_combi_box()
self._update_titles()
def on_expose_reac(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(3)
self._update_combi_box()
self._update_titles()
def on_expose_n(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(4)
self._update_combi_box()
self._update_titles()
def on_expose_v(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(5)
self._update_combi_box()
self._update_titles()
def on_expose_m(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(6)
self._update_combi_box()
self._update_titles()
def on_expose_defo(self, widget):
drawing = self.active_tab.active_drawing
if drawing is None:
return
self._expose_commun(7)
self._update_combi_box()
self._update_titles()
def _expose_commun(self, new_status):
tab = self.active_tab
tab.status = 0
drawing = tab.active_drawing
old_status = drawing.status
drawing.set_status(new_status)
if old_status == 8:
self._fill_right_menu()
if not new_status in [4, 5, 6, 7]:
key = drawing.get_char_drawing()
if not key is None:
char_drawing = tab.drawings[key]
del(drawing.childs[key])
char_drawing.mapping.remove_map(key)
del(tab.drawings[key])
li = drawing.get_bar_drawings()
for key in li:
child = drawing.childs[key]
sync = child.options['Sync']
if sync:
child.del_patterns()
child.set_status(new_status)
if not drawing.get_is_parent():
parent = drawing.parent
sync = drawing.options['Sync']
if sync:
parent.del_patterns()
parent.set_status(new_status)
drawing.del_patterns()
tab.del_surface()
layout = tab.area
sw = tab.sw
if isinstance(sw.get_child().get_children()[0], gtk.TextView):
self._clear_sw_content()
self._add_drawing_widget()
else:
tab.configure_event(layout)
tab.area.queue_draw()
# -----------------------------------------------------------
#
# Méthodes relatives au basculement de mode (graphe/info)
#
# -----------------------------------------------------------
# pas très utile
def _get_has_textview(self):
"""Return True if screen is textview"""
tab = self.active_tab
if tab.status == 0:
return False
return True
def _add_drawing_widget(self, tab=None):
"""Ajoute le dessin"""
#print "_add_drawing_widget"
if tab is None:
tab = self.active_tab
tab.status = 0
sw = tab.sw
area = tab.area
sw.add_with_viewport(area) # déclenche 2 configure_event
#sw.add(area) # déclenche 2 configure_event
def _add_textview(self):
"""Ajoute le textview"""
tab = self.active_tab
sw = tab.sw
textview = gtk.TextView()
textview.show()
sw.add_with_viewport(textview)
#sw.add(textview)
return textview
#à renommer
def _clear_sw_content(self, tab=None):
"""Supprime le layout ou le textview"""
#print "_clear_sw_content"
if tab is None:
tab = self.active_tab
sw = tab.sw
viewport = sw.get_children()[0]
child = sw.get_child().get_children()[0]
viewport.remove(child)
def _do_buffer_error(self, textview):
"""Crée le buffer avec mise en forme pour afficher
les erreurs"""
#print 'Main::_do_buffer_error'
textbuffer = gtk.TextBuffer()
end_iter = textbuffer.get_end_iter()
h1 = textbuffer.create_tag("h1", weight = pango.WEIGHT_BOLD,
size_points = 12.0, foreground = "purple")
h2 = textbuffer.create_tag("h2", weight = pango.WEIGHT_BOLD,
size_points = 11.0)
p = textbuffer.create_tag("p", weight = pango.WEIGHT_NORMAL,
size_points = 9.0)
id_image = {0 : gtk.STOCK_STOP, 1 : gtk.STOCK_DIALOG_WARNING, 2 : gtk.STOCK_INFO, 3 : gtk.STOCK_APPLY}
tab = self.active_tab
drawing = tab.active_drawing
study = self.studies[drawing.id_study]
rdm = study.rdm
try:
errors = rdm.errors
except AttributeError:
errors = None
text = "Messages pour l'étude \"%s\"\n" % study.name
textbuffer.insert_with_tags(end_iter, text, h1)
# li contient toujours un élément
li_anchor = []
if errors is None:
anchor = textbuffer.create_child_anchor(end_iter)
li_anchor.append((anchor, 3))
text = " Veuillez enregistrer l'étude en cours.\n"
textbuffer.insert_with_tags(end_iter, text, p)
elif len(errors) == 0:
anchor = textbuffer.create_child_anchor(end_iter)
li_anchor.append((anchor, 3))
text = " Aucune erreur a été détectée pendant la lecture des données.\n"
textbuffer.insert_with_tags(end_iter, text, p)
else:
for elem in errors:
code = elem[1]
text = elem[0]
anchor = textbuffer.create_child_anchor(end_iter)
li_anchor.append((anchor, code))
text = ' %s' % text
textbuffer.insert_with_tags(end_iter, '%s\n' % text, p)
textview.set_buffer(textbuffer)
# insertion des images
for elem in li_anchor:
code = elem[1]
image = gtk.Image()
image.set_from_stock(id_image[code], gtk.ICON_SIZE_MENU)
image.show()
textview.add_child_at_anchor(image, elem[0])
#textview.scroll_to_iter(end_iter, 0) fonctionne pas
#return textbuffer
def _do_buffer_eq(self):
textbuffer = gtk.TextBuffer()
#pixbuf = gtk.gdk.pixbuf_new_from_xpm_data(book_closed_xpm)
tab = self.active_tab
drawing = tab.active_drawing
status = drawing.status
if status == 8:
self.fill_buffer1(textbuffer, drawing)
else:
self.fill_buffer2(textbuffer, drawing)
return textbuffer
def fill_buffer1(self, textbuffer, drawing):
"""Remplit le buffer pour une ligne d'influence"""
h1 = textbuffer.create_tag("h1", weight=pango.WEIGHT_BOLD, size_points=12.0, foreground="purple")
h2 = textbuffer.create_tag("h2", weight=pango.WEIGHT_BOLD, size_points=11.0)
end_iter = textbuffer.get_end_iter()
study = self.studies[drawing.id_study]
rdm = study.influ_rdm
struct = rdm.struct
units = struct.units
factor_F = units['F']
factor_L = units['L']
unit_F = study.get_unit_name('F')
unit_L = study.get_unit_name('L')
if drawing.s_influ is None:
return
obj = drawing.influ_list[drawing.s_influ]
status = obj.status
u = obj.u
elem = obj.elem
if status == 1 or status == 4:
type = "F"
elif status == 2:
type = "M"
elif status == 3:
type = "L"
texts = {1: "Effort tranchant", 2: "Moment fléchissant", 3: "Déformée", 4: "Réaction d'appui"}
text = "Equations des courbes d'influence :\n%s\n" % texts[status]
textbuffer.insert_with_tags(end_iter, text, h1)
if status == 4:
text = "Noeud : %s\n" % elem
else:
text = "Barre : %s, position x=%s\n" % (elem, u)
textbuffer.insert(end_iter, text)
try:
bars = drawing.s_influ_bars
except AttributeError:
bars = rdm.struct.Barres
for barre in bars:
data = rdm.InfluBarre(barre, elem, u, status, True)
text = "\tBarre = %s\n" % barre
textbuffer.insert_with_tags(end_iter, text, h2)
text2 = ''
xprec = 0.
for tu in data:
x, coefs = tu[0], tu[1]
x /= factor_L
text2 += "x compris entre %s et %s %s\n" % (xprec, x, unit_L)
text2 += self.set_equation_string(coefs, factor_L, factor_F, unit_L, unit_F, type)
xprec = x
textbuffer.insert(end_iter, text2)
def fill_buffer2(self, textbuffer, drawing):
"""Remplit le buffer pour les sollicitations ou déformée"""
h1 = textbuffer.create_tag("h1", weight=pango.WEIGHT_BOLD, size_points=12.0, foreground="purple")
h2 = textbuffer.create_tag("h2", weight=pango.WEIGHT_BOLD, size_points=11.0)
end_iter = textbuffer.get_end_iter()
study = self.studies[drawing.id_study]
status = drawing.status
rdm = study.rdm
print rdm.GetCombiMax("M")
struct = rdm.struct
units = struct.units
factor_F = units['F']
factor_L = units['L']
unit_F = study.get_unit_name('F')
unit_L = study.get_unit_name('L')
if status == 4 or status == 5:
type = "F"
elif status == 6:
type = "M"
elif status == 7:
type = "L"
n_case = drawing.s_curve
Char = rdm.GetCharByNumber(n_case)
name = rdm.GetCharNameByNumber(n_case)
text = "Equations des courbes pour \"%s\"\n" % name
textbuffer.insert_with_tags(end_iter, text, h1)
texts = {4: "Effort normal", 5: "Effort tranchant", 6: "Moment fléchissant", 7: "Déformée"}
text = texts[status]
textbuffer.insert(end_iter, "(%s)\n" % text)
for barre in rdm.struct.Barres:
text = "\tBarre = %s\n" % barre
textbuffer.insert_with_tags(end_iter, text, h2)
data = rdm.GetDataEq(barre, Char, status)
if data == []:
text = "\tpas d'équation disponible\n"
textbuffer.insert(end_iter, text)
continue
text2 = ''
xprec = 0.
for tu in data:
x, coefs = tu[0], tu[1]
x /= factor_L
text2 += "x compris entre %s et %s %s\n" % (xprec, x, unit_L)
text2 += self.set_equation_string(coefs, factor_L, factor_F, unit_L, unit_F, type)
text2 += "Salut c'est moi!"
xprec = x
textbuffer.insert(end_iter, text2)
# XXX suppression des 0 à faire
def set_equation_string(self, coefs, factor_L, factor_F, name_L, name_F, type):
"""Met en forme l'équation donnée par les coefficients"""
n_coefs = len(coefs)
li = []
if type == 'F':
name = name_F
conv = 1./factor_F
elif type == 'M':
name = "%s.%s" % (name_F, name_L)
conv = 1./factor_F/factor_L
elif type == 'L':
name = name_L
conv = 1./factor_L
for c in reversed(coefs):
c *= conv
conv *= factor_L
li.append(c)
li.reverse()
coefs = li
text = ""
if n_coefs == 2:
a, b = coefs
text += "y(%s)=%s*x+%s\n" % (name, a, b)
elif n_coefs == 4:
a, b, c, d = coefs
if a == 0.:
text += "y(%s)=%s*x^2+%s*x+%s\n" % (name, b, c, d)
else:
text += "y(%s)=%s*x^3+%s*x^2+%s*x+%s\n" % (name, a, b, c, d)
elif len(coefs) == 5:
a, b, c, d, e = coefs
if a == 0.:
text += "y(%s)=%s*x^3+%s*x^2+%s*x+%s\n" % (name, b, c, d, e)
else:
text += "y(%s)=%s*x^4+%s*x^3+%s*x^2 +%s*x+%s\n" % (name, a, b, c, d, e)
elif len(coefs) == 6:
a, b, c, d, e, f = coefs
if a == 0.:
text += "y(%s)=%s*x^4+%s*x^3+%s*x^2 +%s*x+%s\n" % (name, b, c, d, e, f)
else:
text += "y(%s)=%s*x^5+%s*x^4+%s*x^3+%s*x^2+%s*x+%s\n" % (name, a, b, c, d, e, f)
else:
print 'debug in do_buffer_eq',len(tu)
text = text.replace('+-', '-')
return text
def _do_buffer_resu(self):
"""Crée le buffer avec mise en forme pour afficher
les ddl et autres résultats"""
textbuffer = gtk.TextBuffer()
#pixbuf = gtk.gdk.pixbuf_new_from_xpm_data(function.book_closed_xpm)
end_iter = textbuffer.get_end_iter()
h1 = textbuffer.create_tag("h1", weight=pango.WEIGHT_BOLD, size_points=12.0, foreground="purple")
h2 = textbuffer.create_tag("h2", weight=pango.WEIGHT_BOLD, size_points=11.0)
h3 = textbuffer.create_tag("h3", weight=pango.WEIGHT_BOLD, size_points=10.0)
tab = self.active_tab
drawing = tab.active_drawing
study = self.studies[drawing.id_study]
rdm = study.rdm
struct = rdm.struct
units = struct.units
RotuleElast = struct.RotuleElast
case = drawing.s_case
if case is None:
try:
case = drawing.s_cases[0]
except IndexError:
case = drawing.get_first_case(rdm)
if case is None:
textbuffer.insert_with_tags(end_iter, "Aucune valeur disponible", h1)
return textbuffer
Char = rdm.GetCharByNumber(case)
factor_F = units['F']
factor_L = units['L']
unit_F = study.get_unit_name('F')
unit_L = study.get_unit_name('L')
text = "Principales valeurs numériques\npour le chargement \"%s\"\n" % Char.name
textbuffer.insert_with_tags(end_iter, text, h1)
text = "Valeurs des degrés de liberté\n"
textbuffer.insert_with_tags(end_iter, text, h2)
w_relax = Char.GetBarreRotation()
texts = ['u', 'v', 'w']
if struct.n_ddl == 0:
text = '\tAucun degré de liberté non nul\n'
textbuffer.insert(end_iter, text)
for node in struct.Nodes:
ddls = Char.ddlValue[node]
text = '\tNoeud %s\n' % node
textbuffer.insert_with_tags(end_iter, text, h3)
for i, ddl in enumerate(ddls):
if i == 0 or i == 1:
name = texts[i]
unit = unit_L
ddl /= factor_L
textbuffer.insert(end_iter, '\t\t%s=%s %s\n' % (name, ddl, unit))
elif i == 2:
name = texts[2]
unit = 'rad'
if node in RotuleElast:
barre = RotuleElast[node][0]
textbuffer.insert(end_iter, '\t\t%s=%s %s\n' % (name, ddl, unit))
textbuffer.insert(end_iter, '\t\tw=%s %s (%s)\n' % (ddls[3], unit, barre))
elif node in w_relax:
for barre, w in w_relax[node].items():
textbuffer.insert(end_iter, '\t\tw=%s %s (%s)\n' % (w, unit, barre))
else:
textbuffer.insert(end_iter, '\t\t%s=%s %s\n' % (name, ddl, unit))
text = "Sollicitations aux extrémités des barres\n"
textbuffer.insert_with_tags(end_iter, text, h2)
di = Char.GetSollicitationBarre(rdm.conv)
texts = ['N', 'V', 'M']
#unit = function.return_key(Const.UNITS['F'], factor)
for barre, nodes in di.items():
text = '\tBarre %s\n' % barre
textbuffer.insert_with_tags(end_iter, text, h3)
for node, forces in nodes.items():
text = '\t\tNoeud %s\n' % node
textbuffer.insert(end_iter, text)
for i, force in enumerate(forces):
if force == 0:
continue
force /= factor_F
name = texts[i]
if i == 2:
force /= factor_L
textbuffer.insert(end_iter, '\t\t\t%s=%s %s.%s\n' % (name, force, unit_F, unit_L))
else:
textbuffer.insert(end_iter, '\t\t\t%s=%s %s\n' % (name, force, unit_F))
text = "Calcul des réactions d'appui\n"
textbuffer.insert_with_tags(end_iter, text, h2)
try:
di = Char.Reactions
except AttributeError:
di = Char.GetCombiReac()
for node, forces in di.items():
text = '\t\tNoeud %s\n' % node
textbuffer.insert(end_iter, text)
for name, force in forces.items():
force /= factor_F
if name == 'Mz':
force /= factor_L
textbuffer.insert(end_iter, '\t\t\t%s=%s %s.%s\n' % (name, force, unit_F, unit_L))
else:
textbuffer.insert(end_iter, '\t\t\t%s=%s %s\n' % (name, force, unit_F))
return textbuffer
def _print_message(self, textview):
"""Affichage des messages écrits et mise en forme
type = 0 : errors
type = 1 : numerical values"""
#print "Main::print_message"
status = self.active_tab.status
textview.set_left_margin(10)
textview.set_pixels_above_lines(10)
textbuffer = gtk.TextBuffer()
if status == 1:
textbuffer = self._do_buffer_resu()
textview.set_buffer(textbuffer)
elif status == 2:
self._do_buffer_error(textview)
elif status == 3:
textbuffer = self._do_buffer_eq()
textview.set_buffer(textbuffer)
# -----------------------------------------------------------
#
# Méthodes en relation avec les charges roulantes
#
# -----------------------------------------------------------
def on_expose_move(self, widget):
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
#drawing.status = 9
drawing.set_status(9)
id_study = drawing.id_study
study = self.studies[id_study]
self._do_new_drawing()
# -----------------------------------------------------------
#
# Méthodes en relation avec les lignes d'influence
#
# -----------------------------------------------------------
def on_expose_influ(self, widget):
#print "on_expose_influ"
tab = self.active_tab
drawing = tab.active_drawing
if drawing is None:
return
if drawing.get_is_bar_drawing():
drawing = tab.active_drawing = drawing.parent
drawing.set_status(8)
id_study = drawing.id_study
study = self.studies[id_study]
self._fill_right_menu()
self._do_new_drawing()
self._update_combi_box()
self._update_titles()
def _fill_influ_menu(self, tab, box):
"""Crée le menu pour les lignes d'influence"""
#print "_fill_influ_menu"
childs = box.get_children()
try:
child = childs[1]
box.remove(child)
except IndexError:
pass
drawing = tab.active_drawing
id_study = drawing.id_study
study = self.studies[id_study]
rdm = study.rdm
struct = rdm.struct
barres = struct.UserBars
if len(barres) == 0 and len(struct.SuperBars) == 0:
self.message.set_message(("Les lignes d'influence ne fonctionnent que sur des barres rectilignes", 0))
try:
obj = drawing.influ_list[drawing.s_influ]
except (KeyError, AttributeError, TypeError):
obj = None
try:
drawing.s_influ
except AttributeError:
drawing.s_influ = None
try:
bars = drawing.s_influ_bars
except AttributeError:
bars = []
tab.influ_menu = classLigneInflu.LigneInfluBox(self, study, obj, bars)
tab.right_menu.pack_start(tab.influ_menu.get_box())
def area_expose_influ(self, widget, reset=True):
"""Méthode de lancement du calcul des lignes d'influ. Récupère les paramètres depuis la fenetre de dialogue. Gère les boutons et titre"""
#print "area_expose_influ"
tab = self.active_tab
drawing = tab.active_drawing
if drawing.get_is_bar_drawing():
drawing = tab.active_drawing = drawing.parent
if reset:
drawing.influ_list = {}
params = tab.influ_menu.get_data()
if params is None:
self.message.set_message(("Choisir un élément", 1))
return
influ_list = drawing.influ_list
id = 0
while True:
if not id in influ_list:
break
id += 1
Obj = classDrawing.InfluParams(id)
Obj.add(params)
influ_list[Obj.id] = Obj
drawing.s_influ = Obj.id
self._do_new_drawing()
self._update_combi_box()
self._update_titles()
self.message.set_message(None) # mettre autre message
def on_del_influs(self, widget):
"""Efface toutes les courbes de lignes d'influence"""
tab = self.active_tab
drawing = tab.active_drawing
drawing.influ_list = {}
try:
del(drawing.user_values[drawing.status])
except (KeyError, AttributeError):
pass
drawing.s_influ = None
self._do_new_drawing()
# -----------------------------------------------------------
#
# Méthodes en relation avec l'éditeur de données
#
# -----------------------------------------------------------
def _set_name(self, id_study):
"""Donne un nom à l'étude s'il n'existe pas"""
study = self.studies[id_study]
path = study.path
if not path is None:
return True
path = file_tools.recursive_file_select(self.UP.get_default_path())
if path is None:
return False
ed_data = self.editor.data_editors[id_study]
ed_data.path = path
name = os.path.basename(path)
ed_data.name = name
study.name = name
study.path = path
return True
def update_from_editor(self, widget=None):
"""Gère les évènements liés à l'enregistrement depuis l'éditeur"""
#print "Main::update_from_editor"
tab = self.active_tab
drawing = tab.active_drawing
drawings = tab.drawings
id_study = drawing.id_study
ed_data = self.editor.data_editors[id_study]
study = self.studies[id_study]
book = self.book
status = drawing.status
old_path = study.path
if not self._set_name(id_study):
return
if old_path is None: # maj du titre du dessin
drawing.mapping.infos[drawing.id][drawing.title_id].text = study.name
resize = False
if ed_data.size_changed:
resize = True
if hasattr(study, "influ_rdm"):
del(study.influ_rdm)
self._save_rdm_instance(id_study)
rdm = study.rdm # après _save_rdm_instance
p_drawings = tab.get_parent_drawings()
Barres = rdm.struct.GetBars()
reset = False # suppression des dessins enfant
del_drawings = []
if len(Barres) == 0:
reset = True
for d in p_drawings:
if not d.id_study == id_study:
continue
d.update_s_data(rdm, Barres)
if reset:
for child in d.childs:
del(tab.drawings[child.id])
d.childs = []
continue
childs = d.childs
for key in childs:
child = d.childs[key]
resu = child.update_s_data(rdm, Barres)
if resu is False:
del_drawings.append(child.id)
#del(d.childs[key])
for key in del_drawings:
d = tab.drawings[key]
tab.remove_drawing(d)
status_prec = drawing.status
self.editor.update_editor_title()
self.editor.set_is_changed
rdm_status = rdm.status
if not rdm_status == 2:
drawing.status = 0
if tab.status == 1:
tab.status = 2
if tab.status == 0:
drawings = tab.drawings
for id, drawing1 in drawings.items():
if not drawing1.id_study == id_study:
continue
if resize:
drawing1.set_scale(rdm.struct)
drawing1.del_patterns()
self._fill_right_menu()
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
else: # status = 1 ou 2
viewport = tab.sw.get_child()
viewport.remove(viewport.get_children()[0])
textview = gtk.TextView()
self._print_message(textview)
textview.show()
viewport.add(textview)
self._fill_right_menu()
self._update_combi_box()
self._update_titles()
self._show_message(rdm.errors, False)
# mise à jour des boutons
self._set_buttons_rdm(rdm_status)
def _save_rdm_instance(self, id_study):
"""Recalcule l'instance de RDM de l'étude active afin de tenir compte des modifications apportées par l'éditeur"""
# file writing
data_editor = self.editor.data_editors[id_study]
self.editor.save_study(data_editor)
study = self.studies[id_study]
path = data_editor.path
xml = data_editor.get_xml()
structure = classRdm.Structure(xml, path)
study.rdm = classRdm.R_Structure(structure)
def _restore_rdm_instance(self, id_study):
"""Recalcule l'instance de RDM de l'étude active à partir d'une nouvelle lecture du fichier"""
data_editor = self.editor.data_editors[id_study]
study = self.studies[id_study]
path = data_editor.path
if path is None: # XXX l'étude n'est pas effacée du dessin
return
structure = classRdm.StructureFile(path)
study.rdm = classRdm.R_Structure(structure)
def _update_editor(self):
"""Effectue les mises à jours de la fenetre de l'éditeur en cas de changement d'étude"""
#print "_update_editor"
if self.editor.w2.window is None:
return
tab = self.active_tab
drawing = tab.active_drawing
ed_data = self.editor.data_editors
#print len(self.studies), len(ed_data)
if drawing is None:
if len(ed_data) == 0:
self.editor.w2.destroy()
del (self.editor)
return
self.editor.w2.set_sensitive(False)
return
id_study = drawing.id_study
study = self.studies[id_study]
try:
status = drawing.status
except AttributeError:
status = 0
if status == -1:
self.editor.w2.set_sensitive(False)
else:
self.editor.w2.set_sensitive(True)
self.editor.new_page_editor(study)
#assert len(self.studies) == len(ed_data)
def _get_record_id(self, changes):
"""Récupère la liste des études qui doivent être enregistrées à partir des réponses de l'utilisateur"""
ed_data = self.editor.data_editors
must_save = []
action = 0
for i, id in enumerate(changes):
study = ed_data[id]
action = file_tools.exit_as_ok_func(study.name)
if action == -1:
return None
elif action == 1:
must_save.append(id)
elif action == 2:
must_save.extend(changes[i:])
return must_save
return must_save
def _destroy_editor(self, widget, event):
"""Gère les actions à la fermeture de l'éditeur"""
tab = self.active_tab
studies = self.studies
if not tab.status == 0:
self._clear_sw_content()
self._add_drawing_widget()
tab.status = 0
# maj des études dans la page active
changes = self.editor.get_modified_studies()
must_save = self._get_record_id(changes) # études qui doivent être enregistres
if must_save is None:
return True # keep True
else:
self.editor.w2.destroy()
for id in changes:
if id in must_save:
self._set_name(id)
self._save_rdm_instance(id)
else:
self._restore_rdm_instance(id)
for id in tab.drawings: # actualisation des dessins de l'onglet actif
drawing = tab.drawings[id]
if drawing.id_study in changes:
struct = studies[drawing.id_study].rdm.struct
drawing.set_scale(struct)
drawing.del_patterns()
self._fill_right_menu()
if not tab.active_drawing is None:
study = studies[tab.active_drawing.id_study]
self._set_buttons_rdm(study.rdm.status)
tab.del_surface()
tab.configure_event(tab.area)
tab.area.queue_draw()
self._update_titles()
self._update_combi_box()
#self._show_message(study.rdm.errors, False)
gobject.idle_add(self._bg_from_editor_update, changes)
def _bg_from_editor_update(self, changes):
"""Met à jour les dessins des onglets non visibles en arrière plan"""
studies = self.studies
for tab in self._tabs:
if tab is self.active_tab:
continue # already done
if not tab.status == 0:
self._clear_sw_content(tab)
self._add_drawing_widget(tab)
tab.status = 0
for id in tab.drawings:
drawing = tab.drawings[id]
if drawing.id_study in changes:
struct = studies[drawing.id_study].rdm.struct
drawing.set_scale(struct)
drawing.del_patterns()
tab.del_surface()
tab.configure_event(tab.area)
del (self.editor) # enlever si gobject
# -----------------------------------------------------------
#
# Méthodes en relation la fenetre Library
#
# -----------------------------------------------------------
def on_open_lib(self, widget):
"""Ouverture de la librairie des profils depuis la fenetre principale"""
lib = classProfilManager.ProfilManager()
lib.window.connect("delete_event", self._close_library, lib)
def _close_library(self, widget, event, lib):
"""Fermeture de la librairie des profils depuis la fenetre principale"""
#print "Main::_close_library"
lib.destroy()
lib.window = None
del(lib)
if hasattr(self, 'editor'):
self.editor._active_selection_button(False)
if hasattr(self.editor, 'profil_manager'):
self.editor.profil_manager.button.set_sensitive(True)
del(self.editor.profil_manager)
# ------------ tools ---------------------
def print_rdm_status(self, rdm):
"""Affichage des status des classes de Rdm :: debug"""
print "Structure::status=", rdm.struct.status
print "R_Structure::status=", rdm.status
if rdm.status == -1: return
for i in rdm.Chars:
Char = rdm.Chars[i]
print "Case Name=%s Status lecture=%s Status Inv=%s" % (Char.name, Char.status, Char.r_status)
class MyThread(threading.Thread):
def __init__(self, main):
super(MyThread, self).__init__()
self.main = main
def run(self):
self.main.new_version = self._get_next_version()
def _get_next_version(self):
"""Vérifie la dernière version en ligne et lance Dialog - Return False"""
import urllib2
try:
sock = urllib2.urlopen(Const.VERSION_URL)
except (IOError, EOFError):
return False
version = sock.read()
sock.close()
try:
next = float(version.strip())
except ValueError:
return False
if next > float(Const.VERSION):
return next
return False
if __name__ == "__main__":
try:
MyApp = MainWindow()
t = MyThread(MyApp)
t.start()
gtk.main()
except KeyboardInterrupt:
sys.exit(0)
|
wood-galaxy/pyBarEC
|
pyBar.py
|
Python
|
gpl-2.0
| 93,060
|
[
"FLEUR"
] |
e0ebe0fc3fcf2f2a60f1f97c6c1b092d399398f567e349be35fed12fc7acceff
|
"""
.. module:: components
:platform: Unix
:synopsis: module containing definitions of component objects in pysm.
.. moduleauthor: Ben Thorne <ben.thorne@physics.ox.ac.uk>
"""
from __future__ import absolute_import, print_function
import numpy as np
import healpy as hp
import os, sys, time
import scipy.constants as constants
from scipy.interpolate import interp1d, RectBivariateSpline
from scipy.misc import factorial, comb
from .common import read_key, convert_units, FloatOrArray, invert_safe, B, read_map
from .nominal import template
class Synchrotron(object):
"""Class defining attributes and scaling laws of the synchrotron
component, instantiated with a configuration dictionary containing
the required parameters of the synchrotron models. The key
item pairs are then assigned as attributes.
The current possible attributes are:
- `Model` : SED used, power law or curved power law.
- `A_I` : intensity template used -- numpy.ndarray or float.
- `A_Q` : Q template used -- numpy.ndarray or float.
- `A_U` : U template used -- numpy.ndarray or float.
- `Nu_0_I` : reference frequency of I template -- float.
- `Nu_0_P` : reference frequency of Q and U template -- float.
- `Spectral_Index` : spectral index used in power law and curved power law -- numpy.ndarray or float.
- `Spectral_Curvature` -- numpy.ndarray or float.
- `Nu_Curve` -- pivot frequency of curvature.
"""
def __init__(self, config):
for k in config.keys():
read_key(self, k, config)
return
@property
def Model(self):
try:
return self.__model
except AttributeError:
print("Synchrotron attribute 'Model' not set.")
sys.exit(1)
@property
def A_I(self):
try:
return self.__A_I
except AttributeError:
print("Synchrotron attribute 'A_I' not set.")
sys.exit(1)
@property
def A_Q(self):
try:
return self.__A_Q
except AttributeError:
print("Synchrotron attribute 'A_Q' not set.")
sys.exit(1)
@property
def A_U(self):
try:
return self.__A_U
except AttributeError:
print("%s attribute 'A_U' not set.")
sys.exit(1)
@property
def Nu_0_I(self):
try:
return self.__nu_0_I
except AttributeError:
print("Synchrotron attribute 'Nu_0_I' not set.")
sys.exit(1)
@property
def Nu_0_P(self):
try:
return self.__nu_0_P
except AttributeError:
print("Synchrotron attribute 'Nu_0_P' not set.")
sys.exit(1)
@property
def Spectral_Index(self):
try:
return self.__spectral_index
except AttributeError:
print("Synchrotron attribute 'Spectral_Index' not set.")
sys.exit(1)
@property
def Spectral_Curvature(self):
try:
return self.__spectral_curvature
except AttributeError:
print("Synchrotron attribute 'Spectral_Curvature' not set.")
sys.exit(1)
@property
def Nu_Curve(self):
try:
return self.__nu_curve
except AttributeError:
print("Synchrotron attribute 'Nu_Curve' not set.")
sys.exit(1)
def signal(self):
"""Function to return the selected SED.
:return: function -- selected model SED.
"""
return getattr(self, self.Model)()
def power_law(self):
"""Returns synchrotron (T, Q, U) maps as a function of observation
freuency, nu.
This is the simplest model, using only a power law spectral
dependence. The map of the spectral index may be a constant
or spatially varing.
:return: power law model -- function
"""
@FloatOrArray
def model(nu, **kwargs):
"""Power law scaling model.
:param nu: frequency at which to calculate the map.
:type nu: float.
:return: power law scaled maps, shape (3, Npix) -- numpy.ndarray shape
"""
scaling_I = power_law(nu, self.Nu_0_I, self.Spectral_Index)
scaling_P = power_law(nu, self.Nu_0_P, self.Spectral_Index)
return np.array([self.A_I * scaling_I, self.A_Q * scaling_P, self.A_U * scaling_P])
return model
def curved_power_law(self):
"""Returns synchrotron (T, Q, U) maps as a function of observation
frequency, nu.
This model allows for curvature of the power law SED. The
spectral curvature be a constant, or a map.
:return: power law model -- function
"""
@FloatOrArray
def model(nu, **kwargs):
"""Power law scaling model.
:param nu: frequency at which to calculate the map.
:type nu: float.
:return: power law scaled maps, shape (3, Npix) -- numpy.ndarray shape
"""
curvature_term = np.log(power_law(nu, self.Nu_Curve, self.Spectral_Curvature))
scaling_I = power_law(nu, self.Nu_0_I, self.Spectral_Index + curvature_term)
scaling_P = power_law(nu, self.Nu_0_P, self.Spectral_Index + curvature_term)
return np.array([self.A_I * scaling_I, self.A_Q * scaling_P, self.A_U * scaling_P])
return model
class Dust(object):
"""Class defining attributes and scaling laws of the dust
component, instantiated with a configuration dictionary containing
the required parameters of the synchrotron models. The key
item pairs are then assigned as attributes.
The current possible attributes are:
- `Model` : SED used, modified black body, Hensley and Draine 2017.
- `A_I` : intensity template used -- numpy.ndarray, float.
- `A_Q` : Q template used -- numpy.ndarray, float.
- `A_U` : U template used -- numpy.ndarray, float.
- `Nu_0_I` : reference frequency of I template -- float.
- `Nu_0_P` : reference frequency of Q and U template -- float.
- `Spectral_Index` : spectral index used in power law and curved power law -- numpy.ndarray, float.
- `Temp` : temperature template used in the modified black body scaling -- numpy.ndarray, float.
- `Draw_Uval` : boolean, whether or not to draw a random realisation of Uval using Planck temperature and dust data.
- `Draw_Uval_Seed` : seed for random realisations of the dust temperature and spectral index used to compute Uval if Draw_Uval = True.
- `Uval` : logarithm of the radiation field strength. Required by Henlsey Draine 2017 if draw_Uval=False.
- `F_fe` : mass fraction of silicon grains with iron inclusions relative to total silicon grains.
- `Fcar` : mass fraction of carbonaceous grains relative to silicate grains. Required by Hensley and Draine model.
- `Add_Decorrelation` : add stochastic frequency decorrelation to the SED -- bool.
- `Corr_Len` : correlation length to use in decorrelation model -- float.
"""
def __init__(self, config, mpi_comm=None):
for k in config.keys():
read_key(self, k, config)
self.mpi_comm = mpi_comm
@property
def Model(self):
try:
return self.__model
except AttributeError:
print("Dust attribute 'Model' not set.")
sys.exit(1)
@property
def A_I(self):
try:
return self.__A_I
except AttributeError:
print("Dust attribute 'A_I' not set.")
sys.exit(1)
@property
def A_Q(self):
try:
return self.__A_Q
except AttributeError:
print("Dust attribute 'A_Q' not set.")
sys.exit(1)
@property
def A_U(self):
try:
return self.__A_U
except AttributeError:
print("Dust attribute 'A_U' not set.")
sys.exit(1)
@property
def Nu_0_I(self):
try:
return self.__nu_0_I
except AttributeError:
print("Dust attribute 'Nu_0_I' not set.")
sys.exit(1)
@property
def Nu_0_P(self):
try:
return self.__nu_0_P
except AttributeError:
print("Dust attribute 'Nu_0_P' not set.")
sys.exit(1)
@property
def Spectral_Index(self):
try:
return self.__spectral_index
except AttributeError:
print("Dust attribute 'Spectral_Index' not set.")
sys.exit(1)
@property
def Temp(self):
try:
return self.__temp
except AttributeError:
print("Dust attribute 'Temp' not set.")
sys.exit(1)
@property
def Uval(self):
try:
return self.__uval
except AttributeError:
print("Dust attribute 'Uval' not set.")
sys.exit(1)
@Uval.setter
def Uval(self, value):
self.__uval = value
@property
def Fcar(self):
try:
return self.__fcar
except AttributeError:
print("Dust attribute 'Fcar' not set.")
sys.exit(1)
@property
def F_fe(self):
try:
return self.__f_fe
except AttributeError:
print("Dust attribute 'F_fe' not set.")
sys.exit(1)
@property
def Corr_Len(self):
try:
return self.__corr_len
except AttributeError:
print("Dust attribute 'Corr_Len' not set.")
sys.exit(1)
@property
def Draw_Uval(self):
try:
return self.__draw_uval
except AttributeError:
print("Dust attribute 'Draw_Uval' not set.")
sys.exit(1)
@property
def Draw_Uval_Seed(self):
try:
return self.__draw_uval_seed
except AttributeError:
print("Dust attribute 'Draw_Uval_Seed' not set.")
sys.exit(1)
@property
def Add_Decorrelation(self):
try:
return self.__add_decorrelation
except AttributeError:
print("Dust attribute 'Add_Decorrelation' not set.")
sys.exit(1)
@property
def pixel_indices(self):
try:
return self.__pixel_indices
except AttributeError:
print("Dust attribute 'pixel_indices' not set.")
@property
def nside(self):
try:
return self.__nside
except AttributeError:
print("Dust attribute 'nside' not set.")
sys.exit(1)
def signal(self, **kwargs):
"""Function to return the selected SED.
:return: function -- selected scaling model.
"""
return getattr(self, self.Model)(**kwargs)
def modified_black_body(self, mpi_comm=None):
"""Returns dust (T, Q, U) maps as a function of frequency, nu.
This is the simplest model, assuming a modified black body SED
which is the same in temperature and polarisation.
Note that the spectral index map is expected to be the index
beta_d such that:
I_nu = (nu/nu_0)^beta_d B(nu, T)/B(nu_0, T),
in flux units. Therefore beta_d ~ 1.54.
:return: function -- model (T, Q, U) maps.
"""
@Add_Decorrelation(self)
@FloatOrArray
def model(nu, **kwargs):
"""Black body model
:param nu: frequency at which to evaluate model.
:type nu: float.
:return: modified black body scaling of maps, shape (3, Npix).
"""
scaling_I = power_law(nu, self.Nu_0_I, self.Spectral_Index - 2) * black_body(nu, self.Nu_0_I, self.Temp)
scaling_P = power_law(nu, self.Nu_0_P, self.Spectral_Index - 2) * black_body(nu, self.Nu_0_P, self.Temp)
expected_length = hp.nside2npix(self.nside) if self.pixel_indices is None else len(self.pixel_indices)
assert len(scaling_I) == expected_length, "{} scaling different from expected {}".format(len(scaling_I), expected_length)
return np.array([self.A_I * scaling_I, self.A_Q * scaling_P, self.A_U * scaling_P])
return model
@staticmethod
def draw_uval(seed, nside, mpi_comm=None):
#Use Planck MBB temperature data to draw realisations of the temperature and spectral
#index from normal distribution with mean equal to the maximum likelihood commander value,
# and standard deviation equal to the commander std.
T_mean = read_map(template("COM_CompMap_dust-commander_0256_R2.00.fits"), 256, field = 3, mpi_comm=mpi_comm, verbose = False)
T_std = read_map(template("COM_CompMap_dust-commander_0256_R2.00.fits"), 256, field = 5, mpi_comm=mpi_comm, verbose = False)
beta_mean = read_map(template("COM_CompMap_dust-commander_0256_R2.00.fits"), 256, field = 6, mpi_comm=mpi_comm, verbose = False)
beta_std = read_map(template("COM_CompMap_dust-commander_0256_R2.00.fits"), 256, field = 8, mpi_comm=mpi_comm, verbose = False)
#draw the realisations
np.random.seed(seed)
T = T_mean + np.random.randn(len(T_mean)) * T_std
beta = beta_mean + np.random.randn(len(beta_mean)) * beta_std
#use modified stefan boltzmann law to relate radiation field strength to temperature and
#spectral index. Since the interpolated data is only valid for -3 < uval <5 we clip
#the generated values (the generated values are no where near these limits, but it is good
#to note this for the future). We then udgrade the uval map to whatever nside is being
#considered.Since nside is not a parameter Sky knows about we have to get it from
#A_I, which is not ideal.
uval_map = hp.ud_grade(np.clip((4. + beta) * np.log10(T / np.mean(T)), -3., 5.), nside_out = nside)
return uval_map
@staticmethod
def read_hd_data(mpi_comm=None):
# Read in precomputed dust emission properties in infrared as a function of U
# the radiation field strength for a given grain composition and grain size distribution.
if (mpi_comm is not None and mpi_comm.rank==0) or (mpi_comm is None):
data = dict()
#data_sil contains the emission properties for silicon grains with no iron inclusions.
data["sil"] = np.genfromtxt(template("sil_fe00_2.0.dat"))
#data_silfe containts the emission properties for sillicon grains with 5% iron inclusions.
data["silfe"] = np.genfromtxt(template("sil_fe05_2.0.dat"))
#data_car contains the emission properties of carbonaceous grains.
data["car"] = np.genfromtxt(template("car_1.0.dat"))
elif mpi_comm is not None and mpi_comm.rank>0:
data = None
if mpi_comm is not None:
data = mpi_comm.bcast(data, root=0)
#get the wavelength and the set of field strengths over which these values were calculated.
wav = data["sil"][:, 0]
uvec = np.arange(-3., 5.01, 0.1)
return data["sil"], data["silfe"], data["car"], wav, uvec
def hensley_draine_2017(self, *args, **kwargs):
"""Returns dust (T, Q, U) maps as a function of observing frequenvy in GHz, nu. Uses the Hensley and Draine 2017 model.
This is based on a microphysical model of dust grains, taking into account the strength of the local radiation field, U,
the grain compositions (carbonaceous, and silicate with varying degrees of iron abundance) and solving for the
full temperature distribution with grain size.
*Model Parameters*
- log U (uval): Radiation field intensity parameter, sets grain temperatures. Must be between -3 and 5. U is the radiation field energy density relative to the MMP83 radiation field. So uval = -0.5 corresponds to a radiation field 10^-0.5 times as intense as the standard interstellar radiation field.
- fcar: Mass fraction of carbonaceous grains relative to silicate grains
- f_fe: Fraction of silicate grains with iron inclusions relative to silicate grains.
Model is calibrated such that fcar = 1 and f_fe = 0 reproduce the Planck
FIR dust SED. fcar = f_fe >> 1 will also do so but with different
frequency-dependence of the polarized dust emission. In general,
fcar =~ 1 + fsilfe is expected, meaning that: 1-f_fe + f_fe = f_car.
So in the current implementation f_car should stay ~1.
:return: function - model (T, Q, U) maps.
"""
data_sil, data_silfe, data_car, wav, uvec = self.read_hd_data(mpi_comm=self.mpi_comm)
#interpolate the pre-computed solutions for the emissivity as a function of grain composition F_fe, Fcar, and
#field strenth U, to get emissivity as a function of (U, wavelength).
sil_i = RectBivariateSpline(uvec, wav, (data_sil[:, 3 : 84] * (wav[:, np.newaxis] * 1.e-6 / constants.c) * 1.e23).T) # to Jy/sr/H
car_i = RectBivariateSpline(uvec, wav, (data_car[:, 3 : 84] * (wav[:, np.newaxis] * 1.e-6 / constants.c) * 1.e23).T) # to Jy/sr/H
silfe_i = RectBivariateSpline(uvec, wav, (data_silfe[:, 3 : 84] * (wav[:, np.newaxis] * 1.e-6 / constants.c) * 1.e23).T) # to Jy/sr/H
sil_p = RectBivariateSpline(uvec, wav, (data_sil[:, 84 : 165] * (wav[:, np.newaxis] * 1.e-6 / constants.c) * 1.e23).T) # to Jy/sr/H
car_p = RectBivariateSpline(uvec, wav, (data_car[:, 84 : 165] * (wav[:, np.newaxis] * 1.e-6 / constants.c) * 1.e23).T) # to Jy/sr/H
silfe_p = RectBivariateSpline(uvec, wav, (data_silfe[:, 84 : 165] * (wav[:, np.newaxis] * 1.e-6 / constants.c) * 1.e23).T) # to Jy/sr/H
#now draw the random realisation of uval if draw_uval = true
if self.Draw_Uval:
self.Uval = self.draw_uval(self.Draw_Uval_Seed, self.nside, mpi_comm=self.mpi_comm)
elif not self.Draw_Uval:
pass
else:
print("Hensley_Draine_2017 model selected, but draw_uval not set. Set 'draw_uval' to True or False.")
@FloatOrArray
def model(nu, **kwargs):
"""Model of Hensley and Draine 2017.
:param nu: frequency in GHz at which to evaluate the model.
:type nu: float.
:return: maps produced using Hensley and Draine 2017 SED.
"""
if ('use_bandpass' in kwargs) and (kwargs['use_bandpass']):
return np.zeros((3, len(self.A_I)))
#Interpolation is done in wavelength and PySMvuses nu in GHz so we must convert from fequency
#in GHz to wavelength in microns for both the evaluation frequencies and reference frequencies.
nu_to_lambda = lambda x: 1.e-3 * constants.c / x #Note this is in SI units.
#Define lambda functions for the evaluation of the intensity and polarisation models.
#Note that the HD model intepolates in units of Jysr, so we convert to uK_RJ to match the
#other scalings.
eval_HD17_I = lambda nu, nu_0: convert_units("Jysr", "uK_RJ", nu) / convert_units("Jysr", "uK_RJ", nu_0) *(
(1. - self.F_fe) * sil_i.ev(self.Uval, nu_to_lambda(nu))
+ self.Fcar * car_i.ev(self.Uval, nu_to_lambda(nu))
+ self.F_fe * silfe_i.ev(self.Uval, nu_to_lambda(nu)) ) / (
(1. - self.F_fe) * sil_i.ev(self.Uval, nu_to_lambda(nu_0))
+ self.Fcar * car_i.ev(self.Uval, nu_to_lambda(nu_0))
+ self.F_fe * silfe_i.ev(self.Uval, nu_to_lambda(nu_0))
)
eval_HD17_P = lambda nu, nu_0: convert_units("Jysr", "uK_RJ", nu) / convert_units("Jysr", "uK_RJ", nu_0) *(
(1. - self.F_fe) * sil_p.ev(self.Uval, nu_to_lambda(nu))
+ self.Fcar * car_p.ev(self.Uval, nu_to_lambda(nu))
+ self.F_fe * silfe_p.ev(self.Uval, nu_to_lambda(nu)) ) / (
(1. - self.F_fe) * sil_p.ev(self.Uval, nu_to_lambda(nu_0))
+ self.Fcar * car_p.ev(self.Uval, nu_to_lambda(nu_0))
+ self.F_fe * silfe_p.ev(self.Uval, nu_to_lambda(nu_0))
)
"""The interpolation above is only valid for nu > 10GHz. Therefore for frequencies below this
we implement a fudge and use the Rayleigh Jeans formula. The dust signal at this point should
be negligible in any case.
nu_break is the lowest frequency in the interpolation files given for the HD17 model.
"""
nu_break = 10.
if (nu <= nu_break):
#calculate the RJ scaling factor for frequencies below nu_break. At these frequencies
#dust is largely irrelevant, and so we just use a constant spectral index of 1.54.
RJ_factor = (nu / nu_break) ** 1.54
#calculate the HD17 model at the break frequency.
scaling_I = RJ_factor * eval_HD17_I(nu_break, self.Nu_0_I)
scaling_P = RJ_factor * eval_HD17_P(nu_break, self.Nu_0_P)
else:
#calculate the intensity scaling from reference frequency
#self.Nu_0_I to frequency nu.
scaling_I = eval_HD17_I(nu, self.Nu_0_I)
scaling_P = eval_HD17_P(nu, self.Nu_0_P)
try:
scaling_I = hp.ud_grade(scaling_I, nside_out = self.nside)
scaling_P = hp.ud_grade(scaling_P, nside_out = self.nside)
if not self.pixel_indices is None:
scaling_I = scaling_I[self.pixel_indices]
scaling_P = scaling_P[self.pixel_indices]
except IndexError:
pass
return np.array([scaling_I * self.A_I, scaling_P * self.A_Q, scaling_P * self.A_U])
return model
class AME(object):
"""Class defining attributes and scaling laws of the synchrotron
component, instantiated with a configuration dictionary containing
the required parameters of the synchrotron models. The key
item pairs are then assigned as attributes.
The current possible attributes are:
- `Model` : SED used, power law or curved power law.
- `A_I` : intensity template used -- numpy.ndarray or float.
- `Nu_0_I` : reference frequency of I template -- float.
- `Nu_0_P` : reference frequency of Q and U template -- float.
- `Emissivity` : numerically computed emissivity used to scale AME. In the nominal models this is produced using the SpDust2 code (Ali-Haimoud 2008) -- numpy.ndarray
- `Nu_Peak_0` : parameter required by SpDust2 -- float
- `Nu_Peak` : parameter required by SpDust2 -- float, numpy.ndarray
- `Pol_Frac` : polarisation fraction used in polarised AME model.
- `Angle_Q` : Q template from which to calculate polarisation angle for AME.
- `Angle_U` : U template from which to calculate polarisation angle for AME.
"""
def __init__(self, config):
for k in config.keys():
read_key(self, k, config)
return
@property
def A_I(self):
try:
return self.__A_I
except AttributeError:
print("AME attribute 'A_I' not set.")
sys.ext(1)
@property
def Emissivity(self):
try:
return self.__emissivity
except AttributeError:
print("AME attribute 'Emissivity' not set.")
sys.exit(1)
@property
def Model(self):
try:
return self.__model
except AttributeError:
print("AME attribute 'Model' not set.")
sys.exit(1)
@property
def Nu_0_I(self):
try:
return self.__nu_0_I
except AttributeError:
print("AME attribute 'Nu_0_I' not set.")
sys.exit(1)
@property
def Nu_Peak(self):
try:
return self.__nu_peak
except AttributeError:
print("AME attribute 'Nu_Peak' not set.")
sys.exit(1)
@property
def Nu_Peak_0(self):
try:
return self.__nu_peak_0
except AttributeError:
print("AME attribute 'Nu_Peak_0' not set.")
sys.exit(1)
@property
def Angle_Q(self):
try:
return self.__angle_q
except AttributeError:
print("AME attribute 'Angle_Q' not set.")
sys.exit(1)
@property
def Angle_U(self):
try:
return self.__angle_u
except AttributeError:
print("AME attribute 'Angle_U' not set.")
sys.exit(1)
@property
def Pol_Frac(self):
try:
return self.__pol_frac
except AttributeError:
print("AME attribute 'Pol_Frac' not set.")
sys.exit(1)
def signal(self):
"""Function to return the selected SED.
:return: function -- selected model SED.
"""
return getattr(self, self.Model)()
def spdust_scaling(self, nu):
"""Returns AME SED at frequency in GHz, nu.
Implementation of the SpDust2 code of (Ali-Haimoud et al 2012), evaluated for a
Cold Neutral Medium.
:param nu: frequency at which to calculate SED.
:type nu: float.
:return: spdust SED - float.
"""
J = interp1d(self.Emissivity[0], self.Emissivity[1], bounds_error = False, fill_value = 0)
arg1 = nu * self.Nu_Peak_0 / self.Nu_Peak
arg2 = self.Nu_0_I * self.Nu_Peak_0 / self.Nu_Peak
scaling = ((self.Nu_0_I / nu) ** 2) * (J(arg1) / J(arg2))
return scaling
def spdust(self):
"""Returns AME (T, Q, U) maps as a function of observing frequency, nu.
:return: function -- AME spdust2 scaling as a function of frequency.
"""
@FloatOrArray
def model(nu, **kwargs):
"""Spdust2 unpolarised model.
:param nu: frequency in GHz at which to calculate the AME maps using
spdust2.
:type nu: float.
:return: AME maps at frequency nu, shape (3, Npix) -- numpy.ndarray.
"""
return np.array([self.spdust_scaling(nu) * self.A_I, np.zeros_like(self.A_I), np.zeros_like(self.A_I)])
return model
def spdust_pol(self):
"""Returns AME (T,Q, U) maps a a function of observing frequency Polarisation
version of :meth:`pysm.components.spdust` in which the Q and U templates
are calculated using the polarisation angle from the input Q_Angle and
U_Angle tepmlates, and the given Pol_Frac.
Scaling is the same as spdust(self).
:return: function -- polarised spdust2 model as a function of frequency.
"""
@FloatOrArray
def model(nu, **kwargs):
"""We use input Q and U from dust templates in order to make the
polarisation angle consistent after down or up grading
resolution. Downgrading polarisatoin angle templates gives
a different result to downgrading Q and U maps then
calculating polarisation angle.
:param nu: frequency in GHz at which to evaluate the model.
:type nu: float.
:return: numpy.ndarray -- maps of polarised AME model, shape (3, Npix).
"""
pol_angle = np.arctan2(self.Angle_U, self.Angle_Q)
A_Q = self.A_I * self.Pol_Frac * np.cos(pol_angle)
A_U = self.A_I * self.Pol_Frac * np.sin(pol_angle)
return self.spdust_scaling(nu) * np.array([self.A_I, A_Q, A_U])
return model
class Freefree(object):
"""Class defining attributes and scaling laws of the free-free
component, instantiated with a configuration dictionary containing
the required parameters of the free-free models. The key
item pairs are then assigned as attributes.
The current possible attributes are:
- `Model` : SED used, for free-free only power law is available.
- `A_I` : intensity template used -- numpy.ndarray or float.
- `Nu_0_I` : reference frequency of I template -- float.
- `Spectral_Index` : spectral index used in power law and curved power law -- numpy.ndarray or float.
"""
def __init__(self, config):
for k in config.keys():
read_key(self, k, config)
return
@property
def Model(self):
try:
return self.__model
except AttributeError:
print("Freefree attribute 'Model' not set.")
sys.exit(1)
@property
def A_I(self):
try:
return self.__A_I
except AttributeError:
print("Freefree attribute 'A_I' not set.")
sys.exit(1)
@property
def Nu_0_I(self):
try:
return self.__nu_0_I
except AttributeError:
print("Freefree attribute 'Nu_0_I' not set.")
sys.exit(1)
@property
def Spectral_Index(self):
try:
return self.__spectral_index
except AttributeError:
print("Freefree attribute 'Spectral_Index' not set.")
sys.exit(1)
def signal(self):
"""Function to return the selected SED.
:return: function -- selected scaling model.
"""
return getattr(self, self.Model)()
def power_law(self):
"""Returns synchrotron (T, Q, U) maps as a function of observation
freuency, nu.
This is the simplest model, using only a power law spectral
dependence. The map of the spectral index may be a constant
or spatially varing.
:return: function -- power law model.
"""
@FloatOrArray
def model(nu, **kwargs):
"""Power law scaling model.
:param nu: frequency at which to calculate the map.
:type nu: float.
:return: numpy.ndarray -- power law scaled maps, shape (3, Npix).
"""
scaling = power_law(nu, self.Nu_0_I, self.Spectral_Index)
zeros = np.zeros_like(self.A_I)
return np.array([self.A_I * scaling, zeros, zeros])
return model
class CMB(object):
"""Class defining attributes and scaling laws of the synchrotron
component, instantiated with a configuration dictionary containing
the required parameters of the synchrotron models. The key
item pairs are then assigned as attributes.
The current possible attributes are:
- `Model` : SED law, e.g. taylens.
- `A_I` : intensity template used -- numpy.ndarray or float.
- `A_Q` : Q template used -- numpy.ndarray or float.
- `A_U` : U template used -- numpy.ndarray or float.
- `cmb_specs` : input unlensed cls in CAMB format -- numpy.ndarray
- `delensing_ells` : delensing fraction as a function of ell -- numpy.ndarray
- `nside` : nside at which to generate CMB.
- `cmb_seed` : random seed for CMB generation.
- `cmb_specs_lensed` : input lensed cls in CAMB format` -- numpy.ndarray
"""
def __init__(self, config):
for k in config.keys():
read_key(self, k, config)
return
@property
def Model(self):
try:
return self.__model
except AttributeError:
print("CMB attribute 'Model' not set.")
sys.exit(1)
@property
def CMB_Specs(self):
try:
return self.__cmb_specs
except AttributeError:
print("CMB attribute 'CMB_Specs' not set.")
sys.exit(1)
@property
def CMB_Specs_Lensed(self):
try:
return self.__cmb_specs_lensed
except AttributeError:
print("CMB attribute 'CMB_Specs_Lensed' not set.")
sys.exit(1)
@property
def Delens(self):
try:
return self.__delens
except AttributeError:
print("CMB attribute 'Delens' not set.")
sys.exit(1)
@property
def Delensing_Ells(self):
try:
return self.__delensing_ells
except AttributeError:
print("CMB attribute 'Delensing_Ells' not set.")
sys.exit(1)
@property
def Nside(self):
try:
return self.__nside
except AttributeError:
print("CMB attribute 'Nside' not set.")
sys.exit(1)
@property
def CMB_Seed(self):
try:
return self.__cmb_seed
except AttributeError:
print("CMB attribute 'CMB_Seed' not set.")
sys.exit(1)
@property
def A_I(self):
try:
return self.__A_I
except AttributeError:
print("CMB attribute 'A_I' not set.")
@property
def A_Q(self):
try:
return self.__A_Q
except AttributeError:
print("CMB attribute 'A_Q' not set.")
@property
def A_U(self):
try:
return self.__A_U
except AttributeError:
print("CMB attribute 'A_U' not set.")
@property
def pixel_indices(self):
try:
return self.__pixel_indices
except AttributeError:
print("CMB attribute 'pixel_indices' not set.")
def signal(self):
"""Function to return the selected SED.
:return: function -- selected model SED.
"""
return getattr(self, self.Model)()
def taylens(self):
"""Returns CMB (T, Q, U) maps as a function of observing frequency, nu.
This code is extracted from the taylens code (reference).
:return: function -- CMB maps.
"""
synlmax = 8 * self.Nside #this used to be user-defined.
data = self.CMB_Specs
lmax_cl = len(data[0]) + 1
l = np.arange(int(lmax_cl + 1))
synlmax = min(synlmax, l[-1])
#Reading input spectra in CAMB format. CAMB outputs l(l+1)/2pi hence the corrections.
cl_tebp_arr=np.zeros([10, lmax_cl + 1])
cl_tebp_arr[0, 2:] = 2 * np.pi * data[1] / (l[2:] * (l[2:] + 1)) #TT
cl_tebp_arr[1, 2:] = 2 * np.pi * data[2] / (l[2:] * (l[2:] + 1)) #EE
cl_tebp_arr[2, 2:] = 2 * np.pi * data[3] / (l[2:] * (l[2:] + 1)) #BB
cl_tebp_arr[4, 2:] = 2 * np.pi * data[4] / (l[2:] * (l[2:] + 1)) #TE
cl_tebp_arr[5, :] = np.zeros(lmax_cl + 1) #EB
cl_tebp_arr[7, :] = np.zeros(lmax_cl + 1) #TB
if self.Delens:
cl_tebp_arr[3, 2:] = 2 * np.pi * data[5] * self.Delensing_Ells[1] / (l[2:] * (l[2:] + 1)) ** 2 #PP
cl_tebp_arr[6,:] = np.zeros(lmax_cl + 1) #BP
cl_tebp_arr[8, 2:] = 2 * np.pi * data[7] * np.sqrt(self.Delensing_Ells[1]) / (l[2:] * (l[2:] + 1)) ** 1.5 #EP
cl_tebp_arr[9, 2:] = 2 * np.pi * data[6] * np.sqrt(self.Delensing_Ells[1]) / (l[2:] * (l[2:] + 1)) ** 1.5 #TP
else:
cl_tebp_arr[3,2:] = 2 * np.pi * data[5] / (l[2:] * (l[2:] + 1)) ** 2 #PP
cl_tebp_arr[6,:] =np.zeros(lmax_cl+1) #BP
cl_tebp_arr[8,2:] = 2 * np.pi * data[7] / (l[2:] * (l[2:] + 1)) ** 1.5 #EP
cl_tebp_arr[9,2:] = 2 * np.pi * data[6] / (l[2:] * (l[2:] + 1)) ** 1.5 #TP
# Coordinates of healpix pixel centers
ipos = np.array(hp.pix2ang(self.Nside, np.arange(12 * (self.Nside ** 2))))
# Simulate a CMB and lensing field
cmb, aphi = simulate_tebp_correlated(cl_tebp_arr, self.Nside, synlmax, self.CMB_Seed)
if cmb.ndim == 1:
cmb = np.reshape(cmb, [1, cmb.size])
# Compute the offset positions
phi, phi_dtheta, phi_dphi = hp.alm2map_der1(aphi, self.Nside, lmax = synlmax)
del aphi
opos, rot = offset_pos(ipos, phi_dtheta, phi_dphi, pol=True, geodesic=False) #geodesic used to be used defined.
del phi, phi_dtheta, phi_dphi
# Interpolate maps one at a time
maps = []
for comp in cmb:
for m in taylor_interpol_iter(comp, opos, 3, verbose=False, lmax=None): #lmax here needs to be fixed. order of taylor expansion is fixed to 3.
pass
maps.append(m)
del opos, cmb
#save the map computed for future referemce.
rm = apply_rotation(maps, rot)
@FloatOrArray
def model(nu, **kwargs):
cmb_map = np.array(rm) * convert_units("uK_CMB", "uK_RJ", nu)
if self.pixel_indices is None:
return cmb_map
else:
return cmb_map[:, self.pixel_indices]
return model
def synfast(self):
"""Function for the calculation of lensed CMB maps directly from
lensed Cls using healpix's synfast routine.
"""
# get the spectra. These are in CAMB format, we discard the last
# three corresponding to dd, dt, de, respectively.
ell, tt, ee, bb, te, _, _, _ = self.CMB_Specs
lmax_cl = len(ell) + 1
ell = np.arange(lmax_cl + 1)
# in CAMB format so we must divide by the scaling factor
factor = ell * (ell + 1.) / 2. / np.pi
cl_teb = np.zeros((6, lmax_cl + 1))
cl_teb[0, 2:] = tt / factor[2:]
cl_teb[1, 2:] = ee / factor[2:]
cl_teb[2, 2:] = bb / factor[2:]
cl_teb[3, 2:] = te / factor[2:]
cl_teb[4, 2:] = 0.
cl_teb[5, 2:] = 0.
np.random.seed(self.CMB_Seed)
T, Q, U = hp.synfast(cl_teb, self.Nside, pol=True, new=True, verbose=False)
@FloatOrArray
def model(nu, **kwargs):
cmb_map = np.array([T, Q, U]) * convert_units("uK_CMB", "uK_RJ", nu)
if self.pixel_indices is None:
return cmb_map
else:
return cmb_map[:, self.pixel_indices]
return model
def pre_computed(self):
"""Returns a CMB (T, Q, U) maps as a function of observing frequency, nu.
This function takes a pre-computed map of the CMB and scales
it to some new frequency.
"""
@FloatOrArray
def model(nu, **kwargs):
return np.array([self.A_I, self.A_Q, self.A_U]) * convert_units("uK_CMB", "uK_RJ", nu)
return model
def power_law(nu, nu_0, b):
"""Calculate scaling factor for power-law SED.
Returns a power law scaling by index b for a map at reference
frequency nu_0 t0 be scale to frequency nu.
:param nu: frequency being scaled to.
:type nu: float.
:param nu_0: reference frequency of power law.
:type nu_0: float.
:param b: spectral index by which to scale.
:type b: float.
"""
return (nu / nu_0) ** b
def black_body(nu, nu_0, T):
"""Calculate scaling factor for black body SED.
Factor to scale a black body emitter of temperature T template
from frequency nu_0 to frequency nu.
:param nu: frequency being scaled to.
:type nu: float.
:param nu_0: reference frequency of power law.
:type nu_0: float.
:param T: temperature of black body function used to scale.
:type T: float.
:return: float -- black body at temperature T scaling from frequency nu_0 to nu.
"""
return B(nu, T) / B(nu_0, T)
def get_decorrelation_matrices(freqs,freq_ref,corrlen) :
"""Function to compute the mean and covariance for the decorrelation
:param freqs: frequencies at which to calculate covariance structure.
:type freqs: numpy.array.
:param freq_ref: reference frequency for constrained map.
:type freq_ref: float.
:corrlen: correlation length of imposed Gaussian decorrelation.
:return: numpy.ndarray(len(freqs), len(freqs)), nump.ndarray(len(freqs)) -- the output covariance and mean.
"""
if corrlen <= 0:
rho_mean = np.ones([len(freqs), 1])
rho_covar = np.zeros([len(freqs), len(freqs)])
else:
added_freq = False
freqtot = np.array([f for f in freqs])
if not (freq_ref in freqtot):
freqtot = np.insert(freqtot, 0, freq_ref)
added_freq = True
indref = np.where(freqtot == freq_ref)[0][0]
corrmatrix = np.exp(-0.5 * ((np.log(freqtot[:, None]) - np.log(freqtot[None, :])) / corrlen) ** 2)
rho_inv = invert_safe(corrmatrix)
rho_uu = np.delete(np.delete(rho_inv, indref, axis = 0), indref, axis = 1)
rho_uu = invert_safe(rho_uu)
rho_inv_cu = rho_inv[:, indref]
rho_inv_cu=np.transpose(np.array([np.delete(rho_inv_cu, indref)]))
rho_uu_w, rho_uu_v = np.linalg.eigh(rho_uu)
rho_covar=np.dot(rho_uu_v, np.dot(np.diag(np.sqrt(np.maximum(rho_uu_w, np.zeros_like(rho_uu_w)))), np.transpose(rho_uu_v)))
rho_mean=-np.dot(rho_uu, rho_inv_cu)
if not added_freq:
rho_covar_new=np.zeros([len(freqtot), len(freqtot)])
rho_mean_new=np.ones([len(freqtot), 1])
rho_covar_new[:indref, :indref] = rho_covar[:indref,:indref]
rho_covar_new[indref + 1:, :indref] = rho_covar[indref:, :indref]
rho_covar_new[:indref, indref + 1:] = rho_covar[:indref, indref:]
rho_covar_new[indref + 1:, indref + 1:] = rho_covar[indref:, indref:]
rho_covar = rho_covar_new
rho_mean_new[:indref, :] = rho_mean[:indref, :]
rho_mean_new[indref + 1:, :] = rho_mean[indref:, :]
rho_mean = rho_mean_new
return rho_covar, rho_mean
def Add_Decorrelation(Component):
"""Function to calculate a wrapper for some model(nu) function to add
decorrelation.
:param Component: instance of one of the classes in :mod:`pysm.component`
:type Component: class
:return: function - decorator used to add stochastic decorrelation to an emission model.
Required parameters:
- Component.Add_Decorrelation: bool - True = add decorrelation. Flase = do not.
- Component.Corr_Len: float - correlation length defined in accompanying paper.
Example use:
.. code-block::
class Synchrotron(object):
def curved_power_law(self):
@Add_Decorrelation(self)
def model(nu):
return np.array([T, Q, U])
return model
"""
if Component.Add_Decorrelation:
def decorrelation(model):
"""This is the actual decorrelation decorator that will be implemented
once the add_decorrelation function is evaluated.
"""
def wrapper(nu, **kwargs):
try:
N_freqs = len(nu)
except TypeError: # nu is a single value
N_freqs = 1
nu = np.array([nu])
rho_cov_I, rho_m_I = get_decorrelation_matrices(nu, Component.Nu_0_I, Component.Corr_Len)
rho_cov_P, rho_m_P = get_decorrelation_matrices(nu, Component.Nu_0_P, Component.Corr_Len)
extra_I = np.dot(rho_cov_I, np.random.randn(N_freqs))
extra_P = np.dot(rho_cov_P, np.random.randn(N_freqs))
decorr = np.zeros((N_freqs, 3))
decorr[:, 0, None] = rho_m_I + extra_I[:, None]
decorr[:, 1, None] = rho_m_P + extra_P[:, None]
decorr[:, 2, None] = rho_m_P + extra_P[:, None]
decorrelated = decorr[..., None] * model(nu, **kwargs)
if N_freqs == 1:
return decorrelated[0]
else:
return decorrelated
return wrapper
return decorrelation
else:
"""If decorrelation not required do nothing with the decorator."""
def decorrelation(model):
def wrapper(nu, **kwargs):
return model(nu, **kwargs)
return wrapper
return decorrelation
"""The following code is edited from the taylens code: Naess,
S. K. and Louis, T. 2013 'Lensing simulations by Taylor expansion -
not so inefficient after all' Journal of Cosmology and Astroparticle
Physics September 2013. Available at:
https://github.com/amaurea/taylens
"""
def simulate_tebp_correlated(cl_tebp_arr, nside, lmax, seed):
"""This generates correlated T,E,B and Phi maps
"""
np.random.seed(seed)
alms=hp.synalm(cl_tebp_arr, lmax = lmax, new = True)
aphi=alms[-1]
acmb=alms[0 : -1]
#Set to zero above map resolution to avoid aliasing
beam_cut=np.ones(3 * nside)
for ac in acmb:
hp.almxfl(ac, beam_cut, inplace = True)
cmb=np.array(hp.alm2map(acmb, nside, pol = True, verbose = False))
return cmb, aphi
def taylor_interpol_iter(m, pos, order=3, verbose=False, lmax=None):
"""Given a healpix map m[npix], and a set of positions
pos[{theta,phi},...], evaluate the values at those positions using
harmonic Taylor interpolation to the given order (3 by
default). Successively yields values for each cumulative order up
to the specified one. If verbose is specified, it will print
progress information to stderr.
"""
nside = hp.npix2nside(m.size)
if lmax is None:
lmax = 3 * nside
# Find the healpix pixel centers closest to pos,
# and our deviation from these pixel centers.
ipos = hp.ang2pix(nside, pos[0], pos[1])
pos0 = np.array(hp.pix2ang(nside, ipos))
dpos = pos[:2] - pos0
# Take wrapping into account
bad = dpos[1] > np.pi
dpos[1, bad] = dpos[1, bad] - 2 * np.pi
bad = dpos[1] <- np.pi
dpos[1, bad] = dpos[1, bad] + 2 * np.pi
# Since healpix' dphi actually returns dphi/sintheta, we choose
# to expand in terms of dphi*sintheta instead.
dpos[1] *= np.sin(pos0[0])
del pos0
# We will now Taylor expand our healpix field to
# get approximations for the values at our chosen
# locations. The structure of this section is
# somewhat complicated by the fact that alm2map_der1 returns
# two different derivatives at the same time.
derivs = [[m]]
res = m[ipos]
yield res
for o in range(1, order + 1):
# Compute our derivatives
derivs2 = [None for i in range(o+1)]
used = [False for i in range(o+1)]
# Loop through previous level in steps of two (except last)
if verbose: tprint("order %d" % o)
for i in range(o):
# Each alm2map_der1 provides two derivatives, so avoid
# doing double work.
if i < o-1 and i % 2 == 1:
continue
a = hp.map2alm(derivs[i], use_weights = True, lmax = lmax, iter = 0)
derivs[i] = None
dtheta, dphi = hp.alm2map_der1(a, nside, lmax = lmax)[-2:]
derivs2[i : i + 2] = [dtheta, dphi]
del a, dtheta, dphi
# Use these to compute the next level
for j in range(i, min(i + 2, o + 1)):
if used[j]:
continue
N = comb(o, j) / factorial(o)
res += N * derivs2[j][ipos] * dpos[0]**(o-j) * dpos[1]**j
used[j] = True
# If we are at the last order, we don't need to waste memory
# storing the derivatives any more
if o == order: derivs2[j] = None
derivs = derivs2
yield res
"""The following functions are support routines for reading input
data and preparing it for being lensed. Most of them are only needed
to take care of tiny, curvature-related effects that can be safely
ignored.
"""
def readspec(fname):
"""Read a power spectrum with columns [l,comp1,comp2,....] into a 2d
array indexed by l. Entries with missing data are filled with
0.
"""
tmp = np.loadtxt(fname).T
l, tmp = tmp[0], tmp[1:]
res = np.zeros((len(tmp),np.max(l)+1))
res[:,np.array(l,dtype=int)] = tmp
return res
def offset_pos(ipos, dtheta, dphi, pol=False, geodesic=False):
"""Offsets positions ipos on the sphere by a unit length step along
the gradient dtheta, dphi/sintheta, taking the curvature of the
sphere into account. If pol is passed, also computes the cos and
sin of the angle by which (Q,U) must be rotated to take into
account the change in local coordinate system.
If geodesic is passed, a quick and dirty, but quite accurate,
approximation is used.
Uses the memory of 2 maps (4 if pol) (plus that of the input
maps).
"""
opos = np.zeros(ipos.shape)
if pol and not geodesic:
orot = np.zeros(ipos.shape)
else:
orot = None
if not geodesic:
# Loop over chunks in order to conserve memory
step = 0x10000
for i in range(0, ipos.shape[1], step):
small_opos, small_orot = offset_pos_helper(ipos[:,i:i+step], dtheta[i:i+step], dphi[i:i+step], pol)
opos[:,i:i+step] = small_opos
if pol: orot[:, i : i + step] = small_orot
else:
opos[0] = ipos[0] + dtheta
opos[1] = ipos[1] + dphi / np.sin(ipos[0])
opos = fixang(opos)
return opos, orot
def offset_pos_helper(ipos, dtheta, dphi, pol):
grad = np.array((dtheta, dphi))
dtheta, dphi = None, None
d = np.sum(grad ** 2, 0) ** 0.5
grad /= d
cosd, sind = np.cos(d), np.sin(d)
cost, sint = np.cos(ipos[0]), np.sin(ipos[0])
ocost = cosd * cost - sind * sint * grad[0]
osint = (1 - ocost ** 2) ** 0.5
ophi = ipos[1] + np.arcsin(sind * grad[1] / osint)
if not pol:
return np.array([np.arccos(ocost), ophi]), None
A = grad[1] / (sind * cost / sint + grad[0] * cosd)
nom1 = grad[0] + grad[1] * A
denom = 1 + A ** 2
cosgam = 2 * nom1 ** 2 / denom - 1
singam = 2 * nom1 * (grad[1] - grad[0] * A) / denom
return np.array([np.arccos(ocost), ophi]), np.array([cosgam,singam])
def fixang(pos):
"""Handle pole wraparound."""
a = np.array(pos)
bad = np.where(a[0] < 0)
a[0,bad] = -a[0, bad]
a[1,bad] = a[1, bad]+np.pi
bad = np.where(a[0] > np.pi)
a[0,bad] = 2 * np.pi - a[0, bad]
a[1,bad] = a[1, bad] + np.pi
return a
def apply_rotation(m, rot):
"""Update Q,U components in polarized map by applying the rotation
rot, represented as [cos2psi,sin2psi] per pixel. Rot is one of the
outputs from offset_pos.
"""
if len(m) < 3:
return m
if rot is None:
return m
m = np.asarray(m)
res = m.copy()
res[1] = rot[0] * m[1] - rot[1] * m[2]
res[2] = rot[1] * m[1] + rot[0] * m[2]
return m
# Set up progress prints
t0 = None
def silent(msg):
pass
def tprint(msg):
global t0
if t0 is None:
t0 = time.time()
print("%8.2f %s" % (time.time() - t0, msg), file=sys.stderr)
|
bthorne93/PySM_public
|
pysm/components.py
|
Python
|
mit
| 51,029
|
[
"Gaussian"
] |
b94c987b6bdfb44cda6c68d2435e4bd2e3cf8149a4ed16d088479f8831ef6478
|
import logging
import os
import shutil
import sys
from string import Template
from galaxy.util import unicodify
from galaxy import eggs
eggs.require( 'MarkupSafe' )
import markupsafe
log = logging.getLogger( __name__ )
CHUNK_SIZE = 2**20 # 1Mb
INSTALLATION_LOG = 'INSTALLATION.log'
# Set no activity timeout to 20 minutes.
NO_OUTPUT_TIMEOUT = 1200.0
MAXDIFFSIZE = 8000
MAX_DISPLAY_SIZE = 32768
DOCKER_IMAGE_TEMPLATE = '''
# Galaxy Docker image
FROM bgruening/galaxy-stable
MAINTAINER Bjoern A. Gruning, bjoern.gruening@gmail.com
RUN sed -i 's|brand.*|brand = deepTools|g' ~/galaxy-central/universe_wsgi.ini
WORKDIR /galaxy-central
${selected_repositories}
# Mark one folder as imported from the host.
VOLUME ["/export/"]
# Expose port 80 to the host
EXPOSE :80
# Autostart script that is invoked during container start
CMD ["/usr/bin/startup"]
'''
SELECTED_REPOSITORIES_TEMPLATE = '''
RUN service postgresql start && service apache2 start && ./run.sh --daemon && sleep 120 && python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:8080 --url ${tool_shed_url} -o ${repository_owner} --name ${repository_name} --tool-deps --repository-deps --panel-section-name 'Docker'
'''
def evaluate_template( text, install_environment ):
"""
Substitute variables defined in XML blocks from dependencies file. The value of the received
repository_install_dir is the root installation directory of the repository that contains the
tool dependency. The value of the received install_dir is the root installation directory of
the tool_dependency.
"""
return Template( text ).safe_substitute( get_env_var_values( install_environment ) )
def get_env_var_values( install_environment ):
"""
Return a dictionary of values, some of which enable substitution of reserved words for the values.
The received install_enviroment object has 2 important attributes for reserved word substitution:
install_environment.tool_shed_repository_install_dir is the root installation directory of the repository
that contains the tool dependency being installed, and install_environment.install_dir is the root
installation directory of the tool dependency.
"""
env_var_dict = {}
env_var_dict[ 'REPOSITORY_INSTALL_DIR' ] = install_environment.tool_shed_repository_install_dir
env_var_dict[ 'INSTALL_DIR' ] = install_environment.install_dir
env_var_dict[ 'system_install' ] = install_environment.install_dir
# If the Python interpreter is 64bit then we can safely assume that the underlying system is also 64bit.
env_var_dict[ '__is64bit__' ] = sys.maxsize > 2**32
return env_var_dict
def get_file_type_str( changeset_revision, file_type ):
if file_type == 'zip':
file_type_str = '%s.zip' % changeset_revision
elif file_type == 'bz2':
file_type_str = '%s.tar.bz2' % changeset_revision
elif file_type == 'gz':
file_type_str = '%s.tar.gz' % changeset_revision
else:
file_type_str = ''
return file_type_str
def move_file( current_dir, source, destination, rename_to=None ):
source_path = os.path.abspath( os.path.join( current_dir, source ) )
source_file = os.path.basename( source_path )
if rename_to is not None:
destination_file = rename_to
destination_directory = os.path.join( destination )
destination_path = os.path.join( destination_directory, destination_file )
else:
destination_directory = os.path.join( destination )
destination_path = os.path.join( destination_directory, source_file )
if not os.path.exists( destination_directory ):
os.makedirs( destination_directory )
shutil.move( source_path, destination_path )
def remove_dir( dir ):
"""Attempt to remove a directory from disk."""
if dir:
if os.path.exists( dir ):
try:
shutil.rmtree( dir )
except:
pass
def size_string( raw_text, size=MAX_DISPLAY_SIZE ):
"""Return a subset of a string (up to MAX_DISPLAY_SIZE) translated to a safe string for display in a browser."""
if raw_text and len( raw_text ) >= size:
large_str = '\nFile contents truncated because file size is larger than maximum viewing size of %s\n' % util.nice_size( size )
raw_text = '%s%s' % ( raw_text[ 0:size ], large_str )
return raw_text or ''
def stringify( list ):
if list:
return ','.join( list )
return ''
def strip_path( fpath ):
"""Attempt to strip the path from a file name."""
if not fpath:
return fpath
try:
file_path, file_name = os.path.split( fpath )
except:
file_name = fpath
return file_name
def to_html_string( text ):
"""Translates the characters in text to an html string"""
if text:
try:
text = unicodify( text )
except UnicodeDecodeError, e:
return "Error decoding string: %s" % str( e )
text = unicode( markupsafe.escape( text ) )
text = text.replace( '\n', '<br/>' )
text = text.replace( ' ', ' ' )
text = text.replace( ' ', ' ' )
return text
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/tool_shed/util/basic_util.py
|
Python
|
gpl-3.0
| 5,198
|
[
"Galaxy"
] |
324fb61b55cf6e16e7b410565ec65161fe172d3adbeac0743ac91a6fad5e7efe
|
# Copyright (c) 2006-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2011, 2013-2014 Google, Inc.
# Copyright (c) 2013-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Steven Myint <hg@stevenmyint.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Checks for various exception related errors."""
import inspect
import sys
import six
from six.moves import builtins
import astroid
from pylint import checkers
from pylint.checkers import utils
from pylint import interfaces
def _builtin_exceptions():
def predicate(obj):
return isinstance(obj, type) and issubclass(obj, BaseException)
members = inspect.getmembers(six.moves.builtins, predicate)
return {exc.__name__ for (_, exc) in members}
def _annotated_unpack_infer(stmt, context=None):
"""
Recursively generate nodes inferred by the given statement.
If the inferred value is a list or a tuple, recurse on the elements.
Returns an iterator which yields tuples in the format
('original node', 'infered node').
"""
if isinstance(stmt, (astroid.List, astroid.Tuple)):
for elt in stmt.elts:
inferred = utils.safe_infer(elt)
if inferred and inferred is not astroid.YES:
yield elt, inferred
return
for infered in stmt.infer(context):
if infered is astroid.YES:
continue
yield stmt, infered
PY3K = sys.version_info >= (3, 0)
OVERGENERAL_EXCEPTIONS = ('Exception',)
BUILTINS_NAME = builtins.__name__
MSGS = {
'E0701': ('Bad except clauses order (%s)',
'bad-except-order',
'Used when except clauses are not in the correct order (from the '
'more specific to the more generic). If you don\'t fix the order, '
'some exceptions may not be caught by the most specific handler.'),
'E0702': ('Raising %s while only classes or instances are allowed',
'raising-bad-type',
'Used when something which is neither a class, an instance or a \
string is raised (i.e. a `TypeError` will be raised).'),
'E0703': ('Exception context set to something which is not an '
'exception, nor None',
'bad-exception-context',
'Used when using the syntax "raise ... from ...", '
'where the exception context is not an exception, '
'nor None.',
{'minversion': (3, 0)}),
'E0704': ('The raise statement is not inside an except clause',
'misplaced-bare-raise',
'Used when a bare raise is not used inside an except clause. '
'This generates an error, since there are no active exceptions '
'to be reraised. An exception to this rule is represented by '
'a bare raise inside a finally clause, which might work, as long '
'as an exception is raised inside the try block, but it is '
'nevertheless a code smell that must not be relied upon.'),
'E0710': ('Raising a new style class which doesn\'t inherit from BaseException',
'raising-non-exception',
'Used when a new style class which doesn\'t inherit from \
BaseException is raised.'),
'E0711': ('NotImplemented raised - should raise NotImplementedError',
'notimplemented-raised',
'Used when NotImplemented is raised instead of \
NotImplementedError'),
'E0712': ('Catching an exception which doesn\'t inherit from Exception: %s',
'catching-non-exception',
'Used when a class which doesn\'t inherit from \
Exception is used as an exception in an except clause.'),
'W0702': ('No exception type(s) specified',
'bare-except',
'Used when an except clause doesn\'t specify exceptions type to \
catch.'),
'W0703': ('Catching too general exception %s',
'broad-except',
'Used when an except catches a too general exception, \
possibly burying unrelated errors.'),
'W0705': ('Catching previously caught exception type %s',
'duplicate-except',
'Used when an except catches a type that was already caught by '
'a previous handler.'),
'W0710': ('Exception doesn\'t inherit from standard "Exception" class',
'nonstandard-exception',
'Used when a custom exception class is raised but doesn\'t \
inherit from the builtin "Exception" class.',
{'maxversion': (3, 0)}),
'W0711': ('Exception to catch is the result of a binary "%s" operation',
'binary-op-exception',
'Used when the exception to catch is of the form \
"except A or B:". If intending to catch multiple, \
rewrite as "except (A, B):"'),
}
class BaseVisitor(object):
"""Base class for visitors defined in this module."""
def __init__(self, checker, node):
self._checker = checker
self._node = node
def visit(self, node):
name = node.__class__.__name__.lower()
dispatch_meth = getattr(self, 'visit_' + name, None)
if dispatch_meth:
dispatch_meth(node)
else:
self.visit_default(node)
def visit_default(self, node): # pylint: disable=unused-argument
"""Default implementation for all the nodes."""
class ExceptionRaiseRefVisitor(BaseVisitor):
"""Visit references (anything that is not an AST leaf)."""
def visit_name(self, name):
if name.name == 'NotImplemented':
self._checker.add_message(
'notimplemented-raised',
node=self._node)
def visit_call(self, call):
if isinstance(call.func, astroid.Name):
self.visit_name(call.func)
class ExceptionRaiseLeafVisitor(BaseVisitor):
"""Visitor for handling leaf kinds of a raise value."""
def visit_const(self, const):
if not isinstance(const.value, str):
# raising-string will be emitted from python3 porting checker.
self._checker.add_message('raising-bad-type', node=self._node,
args=const.value.__class__.__name__)
def visit_instance(self, instance):
# pylint: disable=protected-access
cls = instance._proxied
self.visit_classdef(cls)
# Exception instances have a particular class type
visit_exceptioninstance = visit_instance
def visit_classdef(self, cls):
if (not utils.inherit_from_std_ex(cls) and
utils.has_known_bases(cls)):
if cls.newstyle:
self._checker.add_message('raising-non-exception', node=self._node)
else:
self._checker.add_message('nonstandard-exception', node=self._node)
def visit_tuple(self, tuple_node):
if PY3K or not tuple_node.elts:
self._checker.add_message('raising-bad-type',
node=self._node,
args='tuple')
return
# On Python 2, using the following is not an error:
# raise (ZeroDivisionError, None)
# raise (ZeroDivisionError, )
# What's left to do is to check that the first
# argument is indeed an exception. Verifying the other arguments
# is not the scope of this check.
first = tuple_node.elts[0]
inferred = utils.safe_infer(first)
if not inferred or inferred is astroid.Uninferable:
return
if (isinstance(inferred, astroid.Instance)
and inferred.__class__.__name__ != 'Instance'):
# TODO: explain why
self.visit_default(tuple_node)
else:
self.visit(inferred)
def visit_default(self, node):
name = getattr(node, 'name', node.__class__.__name__)
self._checker.add_message('raising-bad-type',
node=self._node,
args=name)
class ExceptionsChecker(checkers.BaseChecker):
"""Exception related checks."""
__implements__ = interfaces.IAstroidChecker
name = 'exceptions'
msgs = MSGS
priority = -4
options = (('overgeneral-exceptions',
{'default' : OVERGENERAL_EXCEPTIONS,
'type' : 'csv', 'metavar' : '<comma-separated class names>',
'help' : 'Exceptions that will emit a warning '
'when being caught. Defaults to "%s"' % (
', '.join(OVERGENERAL_EXCEPTIONS),)}
),
)
def open(self):
self._builtin_exceptions = _builtin_exceptions()
super(ExceptionsChecker, self).open()
@utils.check_messages('nonstandard-exception', 'misplaced-bare-raise',
'raising-bad-type', 'raising-non-exception',
'notimplemented-raised', 'bad-exception-context')
def visit_raise(self, node):
if node.exc is None:
self._check_misplaced_bare_raise(node)
return
if PY3K and node.cause:
self._check_bad_exception_context(node)
expr = node.exc
try:
inferred_value = next(expr.infer())
except astroid.InferenceError:
inferred_value = None
ExceptionRaiseRefVisitor(self, node).visit(expr)
if inferred_value:
ExceptionRaiseLeafVisitor(self, node).visit(inferred_value)
def _check_misplaced_bare_raise(self, node):
# Filter out if it's present in __exit__.
scope = node.scope()
if (isinstance(scope, astroid.FunctionDef)
and scope.is_method()
and scope.name == '__exit__'):
return
current = node
# Stop when a new scope is generated or when the raise
# statement is found inside a TryFinally.
ignores = (astroid.ExceptHandler, astroid.FunctionDef, astroid.TryFinally)
while current and not isinstance(current.parent, ignores):
current = current.parent
expected = (astroid.ExceptHandler,)
if not current or not isinstance(current.parent, expected):
self.add_message('misplaced-bare-raise', node=node)
def _check_bad_exception_context(self, node):
"""Verify that the exception context is properly set.
An exception context can be only `None` or an exception.
"""
cause = utils.safe_infer(node.cause)
if cause in (astroid.YES, None):
return
if isinstance(cause, astroid.Const):
if cause.value is not None:
self.add_message('bad-exception-context',
node=node)
elif (not isinstance(cause, astroid.ClassDef) and
not utils.inherit_from_std_ex(cause)):
self.add_message('bad-exception-context',
node=node)
def _check_catching_non_exception(self, handler, exc, part):
if isinstance(exc, astroid.Tuple):
# Check if it is a tuple of exceptions.
inferred = [utils.safe_infer(elt) for elt in exc.elts]
if any(node is astroid.YES for node in inferred):
# Don't emit if we don't know every component.
return
if all(node and utils.inherit_from_std_ex(node)
for node in inferred):
return
if not isinstance(exc, astroid.ClassDef):
# Don't emit the warning if the infered stmt
# is None, but the exception handler is something else,
# maybe it was redefined.
if (isinstance(exc, astroid.Const) and
exc.value is None):
if ((isinstance(handler.type, astroid.Const) and
handler.type.value is None) or
handler.type.parent_of(exc)):
# If the exception handler catches None or
# the exception component, which is None, is
# defined by the entire exception handler, then
# emit a warning.
self.add_message('catching-non-exception',
node=handler.type,
args=(part.as_string(), ))
else:
self.add_message('catching-non-exception',
node=handler.type,
args=(part.as_string(), ))
return
if (not utils.inherit_from_std_ex(exc) and
exc.name not in self._builtin_exceptions):
if utils.has_known_bases(exc):
self.add_message('catching-non-exception',
node=handler.type,
args=(exc.name, ))
@utils.check_messages('bare-except', 'broad-except',
'binary-op-exception', 'bad-except-order',
'catching-non-exception', 'duplicate-except')
def visit_tryexcept(self, node):
"""check for empty except"""
exceptions_classes = []
nb_handlers = len(node.handlers)
for index, handler in enumerate(node.handlers):
if handler.type is None:
if not utils.is_raising(handler.body):
self.add_message('bare-except', node=handler)
# check if a "except:" is followed by some other
# except
if index < (nb_handlers - 1):
msg = 'empty except clause should always appear last'
self.add_message('bad-except-order', node=node, args=msg)
elif isinstance(handler.type, astroid.BoolOp):
self.add_message('binary-op-exception',
node=handler, args=handler.type.op)
else:
try:
excs = list(_annotated_unpack_infer(handler.type))
except astroid.InferenceError:
continue
for part, exc in excs:
if exc is astroid.YES:
continue
if (isinstance(exc, astroid.Instance)
and utils.inherit_from_std_ex(exc)):
# pylint: disable=protected-access
exc = exc._proxied
self._check_catching_non_exception(handler, exc, part)
if not isinstance(exc, astroid.ClassDef):
continue
exc_ancestors = [anc for anc in exc.ancestors()
if isinstance(anc, astroid.ClassDef)]
for previous_exc in exceptions_classes:
if previous_exc in exc_ancestors:
msg = '%s is an ancestor class of %s' % (
previous_exc.name, exc.name)
self.add_message('bad-except-order',
node=handler.type, args=msg)
if (exc.name in self.config.overgeneral_exceptions
and exc.root().name == utils.EXCEPTIONS_MODULE
and not utils.is_raising(handler.body)):
self.add_message('broad-except',
args=exc.name, node=handler.type)
if exc in exceptions_classes:
self.add_message('duplicate-except',
args=exc.name, node=handler.type)
exceptions_classes += [exc for _, exc in excs]
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(ExceptionsChecker(linter))
|
arju88nair/projectCulminate
|
venv/lib/python3.5/site-packages/pylint/checkers/exceptions.py
|
Python
|
apache-2.0
| 16,120
|
[
"VisIt"
] |
2bd1cfbefcaa5e2af8a9a3a0e9ca7a96fc3dbdf802876ee61ad710604eae7472
|
#!/usr/bin/env python
#
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Standard setup script.
"""
import glob
import os
import sys
from buildbot import version
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
def include(d, e):
"""Generate a pair of (directory, file-list) for installation.
'd' -- A directory
'e' -- A glob pattern"""
return (d, [f for f in glob.glob('%s/%s' % (d, e)) if os.path.isfile(f)])
class install_data_twisted(install_data):
"""make sure data files are installed in package.
this is evil.
copied from Twisted/setup.py.
"""
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
)
install_data.finalize_options(self)
def run(self):
install_data.run(self)
# ensure there's a buildbot/VERSION file
fn = os.path.join(self.install_dir, 'buildbot', 'VERSION')
open(fn, 'w').write(version)
self.outfiles.append(fn)
class our_sdist(sdist):
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
# ensure there's a buildbot/VERSION file
fn = os.path.join(base_dir, 'buildbot', 'VERSION')
open(fn, 'w').write(version)
# ensure that NEWS has a copy of the latest release notes, with the
# proper version substituted
src_fn = os.path.join('docs', 'relnotes/index.rst')
src = open(src_fn).read()
src = src.replace('|version|', version)
dst_fn = os.path.join(base_dir, 'NEWS')
open(dst_fn, 'w').write(src)
long_description = """
The BuildBot is a system to automate the compile/test cycle required by
most software projects to validate code changes. By automatically
rebuilding and testing the tree each time something has changed, build
problems are pinpointed quickly, before other developers are
inconvenienced by the failure. The guilty developer can be identified
and harassed without human intervention. By running the builds on a
variety of platforms, developers who do not have the facilities to test
their changes everywhere before checkin will at least know shortly
afterwards whether they have broken the build or not. Warning counts,
lint checks, image size, compile time, and other build parameters can
be tracked over time, are more visible, and are therefore easier to
improve.
"""
scripts = ["bin/buildbot"]
# sdist is usually run on a non-Windows platform, but the buildslave.bat file
# still needs to get packaged.
if 'sdist' in sys.argv or sys.platform == 'win32':
scripts.append("contrib/windows/buildbot.bat")
scripts.append("contrib/windows/buildbot_service.py")
setup_args = {
'name': "buildbot",
'version': version,
'description': "BuildBot build automation system",
'long_description': long_description,
'author': "Brian Warner",
'author_email': "warner-buildbot@lothar.com",
'maintainer': "Dustin J. Mitchell",
'maintainer_email': "dustin@v.igoro.us",
'url': "http://buildbot.net/",
'license': "GNU GPL",
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
],
'packages': ["buildbot",
"buildbot.status", "buildbot.status.web", "buildbot.status.web.hooks",
"buildbot.changes",
"buildbot.buildslave",
"buildbot.steps",
"buildbot.steps.package",
"buildbot.steps.package.deb",
"buildbot.steps.package.rpm",
"buildbot.steps.source",
"buildbot.process",
"buildbot.process.users",
"buildbot.clients",
"buildbot.monkeypatches",
"buildbot.schedulers",
"buildbot.scripts",
"buildbot.db",
"buildbot.db.types",
"buildbot.db.migrate.versions",
"buildbot.util",
"buildbot.test",
"buildbot.test.fake",
"buildbot.test.unit",
"buildbot.test.util",
"buildbot.test.regressions",
],
'data_files': [
("buildbot", [
"buildbot/buildbot.png",
]),
("buildbot/db/migrate", [
"buildbot/db/migrate/migrate.cfg",
]),
include("buildbot/db/migrate/versions", "*.py"),
("buildbot/clients", [
"buildbot/clients/debug.glade",
]),
("buildbot/status/web/files", [
"buildbot/status/web/files/default.css",
"buildbot/status/web/files/bg_gradient.jpg",
"buildbot/status/web/files/robots.txt",
"buildbot/status/web/files/templates_readme.txt",
"buildbot/status/web/files/favicon.ico",
]),
include("buildbot/status/web/files", '*.png'),
include("buildbot/status/web/templates", '*.html'),
include("buildbot/status/web/templates", '*.xml'),
("buildbot/scripts", [
"buildbot/scripts/sample.cfg",
"buildbot/scripts/buildbot_tac.tmpl",
]),
],
'scripts': scripts,
'cmdclass': {'install_data': install_data_twisted,
'sdist': our_sdist},
}
# set zip_safe to false to force Windows installs to always unpack eggs
# into directories, which seems to work better --
# see http://buildbot.net/trac/ticket/907
if sys.platform == "win32":
setup_args['zip_safe'] = False
py_26 = sys.version_info[0] > 2 or (sys.version_info[0] == 2 and sys.version_info[1] >= 6)
try:
# If setuptools is installed, then we'll add setuptools-specific arguments
# to the setup args.
import setuptools # @UnusedImport
except ImportError:
pass
else:
# dependencies
setup_args['install_requires'] = []
if sys.version_info[:2] >= (2, 6):
setup_args['install_requires'] += [
'twisted >= 11.0.0',
'Jinja2 >= 2.1',
]
else:
# Latest supported on Python 2.5 version of Twisted is 12.10, and
# pip/easy_install currently can't select correct version of Twisted.
# Twisted depends on zope.interface, which became incompatible with
# Python 2.5 starting from 4.0.0 release.
# Jinja2 dropped Python 2.5 support in 2.7 release.
setup_args['install_requires'] += [
'twisted >= 11.0.0, <= 12.1.0',
'zope.interface < 4.0.0',
'Jinja2 >= 2.1, < 2.7',
]
setup_args['install_requires'] += [
# sqlalchemy-0.8 betas show issues with sqlalchemy-0.7.2, so stick to 0.7.10
'sqlalchemy >= 0.6, <= 0.7.10',
# buildbot depends on sqlalchemy internals, and these are the tested
# versions.
'sqlalchemy-migrate ==0.6.1, ==0.7.0, ==0.7.1, ==0.7.2',
'python-dateutil==1.5',
]
setup_args['tests_require'] = [
'mock',
]
# Python-2.6 and up includes json
if not py_26:
setup_args['install_requires'].append('simplejson')
# Python-2.6 and up includes a working A sqlite (py25's is broken)
if not py_26:
setup_args['install_requires'].append('pysqlite')
if os.getenv('NO_INSTALL_REQS'):
setup_args['install_requires'] = None
setup_args['tests_require'] = None
setup(**setup_args)
# Local Variables:
# fill-column: 71
# End:
|
mitya57/debian-buildbot
|
setup.py
|
Python
|
gpl-2.0
| 8,524
|
[
"Brian"
] |
8adb0cae5384f176ede467425e51a4672e100fd17eccc110683b87f4b6a2ddd1
|
from __future__ import absolute_import, division, print_function
import contextlib
import itertools
import os.path
import pickle
import shutil
import sys
import tempfile
import unittest
import warnings
from io import BytesIO
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray import (
DataArray, Dataset, backends, open_dataarray, open_dataset, open_mfdataset,
save_mfdataset)
from xarray.backends.common import robust_getitem
from xarray.backends.netCDF4_ import _extract_nc4_variable_encoding
from xarray.backends.pydap_ import PydapDataStore
from xarray.core import indexing
from xarray.core.pycompat import (
PY2, ExitStack, basestring, dask_array_type, iteritems)
from xarray.tests import mock
from . import (
TestCase, assert_allclose, assert_array_equal, assert_equal,
assert_identical, has_dask, has_netCDF4, has_scipy, network, raises_regex,
requires_dask, requires_h5netcdf, requires_netCDF4, requires_pathlib,
requires_pydap, requires_pynio, requires_rasterio, requires_scipy,
requires_scipy_or_netCDF4, requires_zarr,
requires_cftime)
from .test_dataset import create_test_data
try:
import netCDF4 as nc4
except ImportError:
pass
try:
import dask.array as da
except ImportError:
pass
try:
from pathlib import Path
except ImportError:
try:
from pathlib2 import Path
except ImportError:
pass
ON_WINDOWS = sys.platform == 'win32'
def open_example_dataset(name, *args, **kwargs):
return open_dataset(os.path.join(os.path.dirname(__file__), 'data', name),
*args, **kwargs)
def create_masked_and_scaled_data():
x = np.array([np.nan, np.nan, 10, 10.1, 10.2], dtype=np.float32)
encoding = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1), 'dtype': 'i2'}
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_masked_and_scaled_data():
attributes = {'_FillValue': -1, 'add_offset': 10,
'scale_factor': np.float32(0.1)}
return Dataset({'x': ('t', [-1, -1, 0, 1, 2], attributes)})
def create_unsigned_masked_scaled_data():
encoding = {'_FillValue': 255, '_Unsigned': 'true', 'dtype': 'i1',
'add_offset': 10, 'scale_factor': np.float32(0.1)}
x = np.array([10.0, 10.1, 22.7, 22.8, np.nan], dtype=np.float32)
return Dataset({'x': ('t', x, {}, encoding)})
def create_encoded_unsigned_masked_scaled_data():
# These are values as written to the file: the _FillValue will
# be represented in the signed form.
attributes = {'_FillValue': -1, '_Unsigned': 'true',
'add_offset': 10, 'scale_factor': np.float32(0.1)}
# Create signed data corresponding to [0, 1, 127, 128, 255] unsigned
sb = np.asarray([0, 1, 127, -128, -1], dtype='i1')
return Dataset({'x': ('t', sb, attributes)})
def create_boolean_data():
attributes = {'units': '-'}
return Dataset({'x': ('t', [True, False, False, True], attributes)})
class TestCommon(TestCase):
def test_robust_getitem(self):
class UnreliableArrayFailure(Exception):
pass
class UnreliableArray(object):
def __init__(self, array, failures=1):
self.array = array
self.failures = failures
def __getitem__(self, key):
if self.failures > 0:
self.failures -= 1
raise UnreliableArrayFailure
return self.array[key]
array = UnreliableArray([0])
with pytest.raises(UnreliableArrayFailure):
array[0]
self.assertEqual(array[0], 0)
actual = robust_getitem(array, 0, catch=UnreliableArrayFailure,
initial_delay=0)
self.assertEqual(actual, 0)
class NetCDF3Only(object):
pass
class DatasetIOTestCases(object):
autoclose = False
engine = None
file_format = None
def create_store(self):
raise NotImplementedError
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as path:
self.save(data, path, **save_kwargs)
with self.open(path, **open_kwargs) as ds:
yield ds
@contextlib.contextmanager
def roundtrip_append(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file(
allow_cleanup_failure=allow_cleanup_failure) as path:
for i, key in enumerate(data.variables):
mode = 'a' if i > 0 else 'w'
self.save(data[[key]], path, mode=mode, **save_kwargs)
with self.open(path, **open_kwargs) as ds:
yield ds
# The save/open methods may be overwritten below
def save(self, dataset, path, **kwargs):
return dataset.to_netcdf(path, engine=self.engine,
format=self.file_format, **kwargs)
@contextlib.contextmanager
def open(self, path, **kwargs):
with open_dataset(path, engine=self.engine, autoclose=self.autoclose,
**kwargs) as ds:
yield ds
def test_zero_dimensional_variable(self):
expected = create_test_data()
expected['float_var'] = ([], 1.0e9, {'units': 'units of awesome'})
expected['bytes_var'] = ([], b'foobar')
expected['string_var'] = ([], u'foobar')
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_write_store(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
# we need to cf decode the store because it has time and
# non-dimension coordinates
with xr.decode_cf(store) as actual:
assert_allclose(expected, actual)
def check_dtypes_roundtripped(self, expected, actual):
for k in expected.variables:
expected_dtype = expected.variables[k].dtype
if (isinstance(self, NetCDF3Only) and expected_dtype == 'int64'):
# downcast
expected_dtype = np.dtype('int32')
actual_dtype = actual.variables[k].dtype
# TODO: check expected behavior for string dtypes more carefully
string_kinds = {'O', 'S', 'U'}
assert (expected_dtype == actual_dtype or
(expected_dtype.kind in string_kinds and
actual_dtype.kind in string_kinds))
def test_roundtrip_test_data(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
self.check_dtypes_roundtripped(expected, actual)
assert_identical(expected, actual)
def test_load(self):
expected = create_test_data()
@contextlib.contextmanager
def assert_loads(vars=None):
if vars is None:
vars = expected
with self.roundtrip(expected) as actual:
for k, v in actual.variables.items():
# IndexVariables are eagerly loaded into memory
self.assertEqual(v._in_memory, k in actual.dims)
yield actual
for k, v in actual.variables.items():
if k in vars:
self.assertTrue(v._in_memory)
assert_identical(expected, actual)
with pytest.raises(AssertionError):
# make sure the contextmanager works!
with assert_loads() as ds:
pass
with assert_loads() as ds:
ds.load()
with assert_loads(['var1', 'dim1', 'dim2']) as ds:
ds['var1'].load()
# verify we can read data even after closing the file
with self.roundtrip(expected) as ds:
actual = ds.load()
assert_identical(expected, actual)
def test_dataset_compute(self):
expected = create_test_data()
with self.roundtrip(expected) as actual:
# Test Dataset.compute()
for k, v in actual.variables.items():
# IndexVariables are eagerly cached
self.assertEqual(v._in_memory, k in actual.dims)
computed = actual.compute()
for k, v in actual.variables.items():
self.assertEqual(v._in_memory, k in actual.dims)
for v in computed.variables.values():
self.assertTrue(v._in_memory)
assert_identical(expected, actual)
assert_identical(expected, computed)
def test_pickle(self):
expected = Dataset({'foo': ('x', [42])})
with self.roundtrip(
expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
raw_pickle = pickle.dumps(roundtripped)
# windows doesn't like opening the same file twice
roundtripped.close()
unpickled_ds = pickle.loads(raw_pickle)
assert_identical(expected, unpickled_ds)
def test_pickle_dataarray(self):
expected = Dataset({'foo': ('x', [42])})
with self.roundtrip(
expected, allow_cleanup_failure=ON_WINDOWS) as roundtripped:
unpickled_array = pickle.loads(pickle.dumps(roundtripped['foo']))
assert_identical(expected['foo'], unpickled_array)
def test_dataset_caching(self):
expected = Dataset({'foo': ('x', [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert isinstance(actual.foo.variable._data,
indexing.MemoryCachedArray)
assert not actual.foo.variable._in_memory
actual.foo.values # cache
assert actual.foo.variable._in_memory
with self.roundtrip(expected, open_kwargs={'cache': False}) as actual:
assert isinstance(actual.foo.variable._data,
indexing.CopyOnWriteArray)
assert not actual.foo.variable._in_memory
actual.foo.values # no caching
assert not actual.foo.variable._in_memory
def test_roundtrip_None_variable(self):
expected = Dataset({None: (('x', 'y'), [[0, 1], [2, 3]])})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_object_dtype(self):
floats = np.array([0.0, 0.0, 1.0, 2.0, 3.0], dtype=object)
floats_nans = np.array([np.nan, np.nan, 1.0, 2.0, 3.0], dtype=object)
bytes_ = np.array([b'ab', b'cdef', b'g'], dtype=object)
bytes_nans = np.array([b'ab', b'cdef', np.nan], dtype=object)
strings = np.array([u'ab', u'cdef', u'g'], dtype=object)
strings_nans = np.array([u'ab', u'cdef', np.nan], dtype=object)
all_nans = np.array([np.nan, np.nan], dtype=object)
original = Dataset({'floats': ('a', floats),
'floats_nans': ('a', floats_nans),
'bytes': ('b', bytes_),
'bytes_nans': ('b', bytes_nans),
'strings': ('b', strings),
'strings_nans': ('b', strings_nans),
'all_nans': ('c', all_nans),
'nan': ([], np.nan)})
expected = original.copy(deep=True)
with self.roundtrip(original) as actual:
try:
assert_identical(expected, actual)
except AssertionError:
# Most stores use '' for nans in strings, but some don't.
# First try the ideal case (where the store returns exactly)
# the original Dataset), then try a more realistic case.
# This currently includes all netCDF files when encoding is not
# explicitly set.
# https://github.com/pydata/xarray/issues/1647
expected['bytes_nans'][-1] = b''
expected['strings_nans'][-1] = u''
assert_identical(expected, actual)
def test_roundtrip_string_data(self):
expected = Dataset({'x': ('t', ['ab', 'cdef'])})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_string_encoded_characters(self):
expected = Dataset({'x': ('t', [u'ab', u'cdef'])})
expected['x'].encoding['dtype'] = 'S1'
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
self.assertEqual(actual['x'].encoding['_Encoding'], 'utf-8')
expected['x'].encoding['_Encoding'] = 'ascii'
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
self.assertEqual(actual['x'].encoding['_Encoding'], 'ascii')
def test_roundtrip_numpy_datetime_data(self):
times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT'])
expected = Dataset({'t': ('t', times), 't0': times[0]})
kwds = {'encoding': {'t0': {'units': 'days since 1950-01-01'}}}
with self.roundtrip(expected, save_kwargs=kwds) as actual:
assert_identical(expected, actual)
assert actual.t0.encoding['units'] == 'days since 1950-01-01'
@requires_cftime
def test_roundtrip_cftime_datetime_data_enable_cftimeindex(self):
from .test_coding_times import _all_cftime_date_types
date_types = _all_cftime_date_types()
for date_type in date_types.values():
times = [date_type(1, 1, 1), date_type(1, 1, 2)]
expected = Dataset({'t': ('t', times), 't0': times[0]})
kwds = {'encoding': {'t0': {'units': 'days since 0001-01-01'}}}
expected_decoded_t = np.array(times)
expected_decoded_t0 = np.array([date_type(1, 1, 1)])
expected_calendar = times[0].calendar
with xr.set_options(enable_cftimeindex=True):
with self.roundtrip(expected, save_kwargs=kwds) as actual:
abs_diff = abs(actual.t.values - expected_decoded_t)
assert (abs_diff <= np.timedelta64(1, 's')).all()
assert (actual.t.encoding['units'] ==
'days since 0001-01-01 00:00:00.000000')
assert (actual.t.encoding['calendar'] ==
expected_calendar)
abs_diff = abs(actual.t0.values - expected_decoded_t0)
assert (abs_diff <= np.timedelta64(1, 's')).all()
assert (actual.t0.encoding['units'] ==
'days since 0001-01-01')
assert (actual.t.encoding['calendar'] ==
expected_calendar)
def test_roundtrip_timedelta_data(self):
time_deltas = pd.to_timedelta(['1h', '2h', 'NaT'])
expected = Dataset({'td': ('td', time_deltas), 'td0': time_deltas[0]})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_float64_data(self):
expected = Dataset({'x': ('y', np.array([1.0, 2.0, np.pi],
dtype='float64'))})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_example_1_netcdf(self):
expected = open_example_dataset('example_1.nc')
with self.roundtrip(expected) as actual:
# we allow the attributes to differ since that
# will depend on the encoding used. For example,
# without CF encoding 'actual' will end up with
# a dtype attribute.
assert_equal(expected, actual)
def test_roundtrip_coordinates(self):
original = Dataset({'foo': ('x', [0, 1])},
{'x': [2, 3], 'y': ('a', [42]), 'z': ('x', [4, 5])})
with self.roundtrip(original) as actual:
assert_identical(original, actual)
def test_roundtrip_global_coordinates(self):
original = Dataset({'x': [2, 3], 'y': ('a', [42]), 'z': ('x', [4, 5])})
with self.roundtrip(original) as actual:
assert_identical(original, actual)
def test_roundtrip_coordinates_with_space(self):
original = Dataset(coords={'x': 0, 'y z': 1})
expected = Dataset({'y z': 1}, {'x': 0})
with pytest.warns(xr.SerializationWarning):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_boolean_dtype(self):
original = create_boolean_data()
self.assertEqual(original['x'].dtype, 'bool')
with self.roundtrip(original) as actual:
assert_identical(original, actual)
self.assertEqual(actual['x'].dtype, 'bool')
def test_orthogonal_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {'dim1': [1, 2, 0], 'dim2': [3, 2, 0, 3],
'dim3': np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
# make sure the array is not yet loaded into memory
assert not actual['var1'].variable._in_memory
assert_identical(expected, actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
def test_vectorized_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
indexers = {'dim1': DataArray([0, 2, 0], dims='a'),
'dim2': DataArray([0, 2, 3], dims='a')}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
# make sure the array is not yet loaded into memory
assert not actual['var1'].variable._in_memory
assert_identical(expected, actual.load())
# do it twice, to make sure we're switched from
# vectorized -> numpy when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
def multiple_indexing(indexers):
# make sure a sequence of lazy indexings certainly works.
with self.roundtrip(in_memory) as on_disk:
actual = on_disk['var3']
expected = in_memory['var3']
for ind in indexers:
actual = actual.isel(**ind)
expected = expected.isel(**ind)
# make sure the array is not yet loaded into memory
assert not actual.variable._in_memory
assert_identical(expected, actual.load())
# two-staged vectorized-indexing
indexers = [
{'dim1': DataArray([[0, 7], [2, 6], [3, 5]], dims=['a', 'b']),
'dim3': DataArray([[0, 4], [1, 3], [2, 2]], dims=['a', 'b'])},
{'a': DataArray([0, 1], dims=['c']),
'b': DataArray([0, 1], dims=['c'])}
]
multiple_indexing(indexers)
# vectorized-slice mixed
indexers = [
{'dim1': DataArray([[0, 7], [2, 6], [3, 5]], dims=['a', 'b']),
'dim3': slice(None, 10)}
]
multiple_indexing(indexers)
# vectorized-integer mixed
indexers = [
{'dim3': 0},
{'dim1': DataArray([[0, 7], [2, 6], [3, 5]], dims=['a', 'b'])},
{'a': slice(None, None, 2)}
]
multiple_indexing(indexers)
# vectorized-integer mixed
indexers = [
{'dim3': 0},
{'dim1': DataArray([[0, 7], [2, 6], [3, 5]], dims=['a', 'b'])},
{'a': 1, 'b': 0}
]
multiple_indexing(indexers)
# with negative step slice.
indexers = [
{'dim1': DataArray([[0, 7], [2, 6], [3, 5]], dims=['a', 'b']),
'dim3': slice(-1, 1, -1)},
]
multiple_indexing(indexers)
# with negative step slice.
indexers = [
{'dim1': DataArray([[0, 7], [2, 6], [3, 5]], dims=['a', 'b']),
'dim3': slice(-1, 1, -2)},
]
multiple_indexing(indexers)
def test_isel_dataarray(self):
# Make sure isel works lazily. GH:issue:1688
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
expected = in_memory.isel(dim2=in_memory['dim2'] < 3)
actual = on_disk.isel(dim2=on_disk['dim2'] < 3)
assert_identical(expected, actual)
def validate_array_type(self, ds):
# Make sure that only NumpyIndexingAdapter stores a bare np.ndarray.
def find_and_validate_array(obj):
# recursively called function. obj: array or array wrapper.
if hasattr(obj, 'array'):
if isinstance(obj.array, indexing.ExplicitlyIndexed):
find_and_validate_array(obj.array)
else:
if isinstance(obj.array, np.ndarray):
assert isinstance(obj, indexing.NumpyIndexingAdapter)
elif isinstance(obj.array, dask_array_type):
assert isinstance(obj, indexing.DaskIndexingAdapter)
elif isinstance(obj.array, pd.Index):
assert isinstance(obj, indexing.PandasIndexAdapter)
else:
raise TypeError('{} is wrapped by {}'.format(
type(obj.array), type(obj)))
for k, v in ds.variables.items():
find_and_validate_array(v._data)
def test_array_type_after_indexing(self):
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
self.validate_array_type(on_disk)
indexers = {'dim1': [1, 2, 0], 'dim2': [3, 2, 0, 3],
'dim3': np.arange(5)}
expected = in_memory.isel(**indexers)
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
self.validate_array_type(actual)
# do it twice, to make sure we're switched from orthogonal -> numpy
# when we cached the values
actual = on_disk.isel(**indexers)
assert_identical(expected, actual)
self.validate_array_type(actual)
def test_dropna(self):
# regression test for GH:issue:1694
a = np.random.randn(4, 3)
a[1, 1] = np.NaN
in_memory = xr.Dataset({'a': (('y', 'x'), a)},
coords={'y': np.arange(4), 'x': np.arange(3)})
assert_identical(in_memory.dropna(dim='x'),
in_memory.isel(x=slice(None, None, 2)))
with self.roundtrip(in_memory) as on_disk:
self.validate_array_type(on_disk)
expected = in_memory.dropna(dim='x')
actual = on_disk.dropna(dim='x')
assert_identical(expected, actual)
def test_ondisk_after_print(self):
""" Make sure print does not load file into memory """
in_memory = create_test_data()
with self.roundtrip(in_memory) as on_disk:
repr(on_disk)
assert not on_disk['var1']._in_memory
class CFEncodedDataTest(DatasetIOTestCases):
def test_roundtrip_bytes_with_fill_value(self):
values = np.array([b'ab', b'cdef', np.nan], dtype=object)
encoding = {'_FillValue': b'X', 'dtype': 'S1'}
original = Dataset({'x': ('t', values, {}, encoding)})
expected = original.copy(deep=True)
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
original = Dataset({'x': ('t', values, {}, {'_FillValue': b''})})
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_string_with_fill_value_nchar(self):
values = np.array([u'ab', u'cdef', np.nan], dtype=object)
expected = Dataset({'x': ('t', values)})
encoding = {'dtype': 'S1', '_FillValue': b'X'}
original = Dataset({'x': ('t', values, {}, encoding)})
# Not supported yet.
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_unsigned_roundtrip_mask_and_scale(self):
decoded = create_unsigned_masked_scaled_data()
encoded = create_encoded_unsigned_masked_scaled_data()
with self.roundtrip(decoded) as actual:
for k in decoded.variables:
self.assertEqual(decoded.variables[k].dtype,
actual.variables[k].dtype)
assert_allclose(decoded, actual, decode_bytes=False)
with self.roundtrip(decoded,
open_kwargs=dict(decode_cf=False)) as actual:
for k in encoded.variables:
self.assertEqual(encoded.variables[k].dtype,
actual.variables[k].dtype)
assert_allclose(encoded, actual, decode_bytes=False)
with self.roundtrip(encoded,
open_kwargs=dict(decode_cf=False)) as actual:
for k in encoded.variables:
self.assertEqual(encoded.variables[k].dtype,
actual.variables[k].dtype)
assert_allclose(encoded, actual, decode_bytes=False)
# make sure roundtrip encoding didn't change the
# original dataset.
assert_allclose(
encoded, create_encoded_unsigned_masked_scaled_data())
with self.roundtrip(encoded) as actual:
for k in decoded.variables:
self.assertEqual(decoded.variables[k].dtype,
actual.variables[k].dtype)
assert_allclose(decoded, actual, decode_bytes=False)
with self.roundtrip(encoded,
open_kwargs=dict(decode_cf=False)) as actual:
for k in encoded.variables:
self.assertEqual(encoded.variables[k].dtype,
actual.variables[k].dtype)
assert_allclose(encoded, actual, decode_bytes=False)
def test_roundtrip_mask_and_scale(self):
decoded = create_masked_and_scaled_data()
encoded = create_encoded_masked_and_scaled_data()
with self.roundtrip(decoded) as actual:
assert_allclose(decoded, actual, decode_bytes=False)
with self.roundtrip(decoded,
open_kwargs=dict(decode_cf=False)) as actual:
# TODO: this assumes that all roundtrips will first
# encode. Is that something we want to test for?
assert_allclose(encoded, actual, decode_bytes=False)
with self.roundtrip(encoded,
open_kwargs=dict(decode_cf=False)) as actual:
assert_allclose(encoded, actual, decode_bytes=False)
# make sure roundtrip encoding didn't change the
# original dataset.
assert_allclose(encoded,
create_encoded_masked_and_scaled_data(),
decode_bytes=False)
with self.roundtrip(encoded) as actual:
assert_allclose(decoded, actual, decode_bytes=False)
with self.roundtrip(encoded,
open_kwargs=dict(decode_cf=False)) as actual:
assert_allclose(encoded, actual, decode_bytes=False)
def test_coordinates_encoding(self):
def equals_latlon(obj):
return obj == 'lat lon' or obj == 'lon lat'
original = Dataset({'temp': ('x', [0, 1]), 'precip': ('x', [0, -1])},
{'lat': ('x', [2, 3]), 'lon': ('x', [4, 5])})
with self.roundtrip(original) as actual:
assert_identical(actual, original)
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds['temp'].attrs['coordinates']))
self.assertTrue(
equals_latlon(ds['precip'].attrs['coordinates']))
self.assertNotIn('coordinates', ds.attrs)
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
modified = original.drop(['temp', 'precip'])
with self.roundtrip(modified) as actual:
assert_identical(actual, modified)
with create_tmp_file() as tmp_file:
modified.to_netcdf(tmp_file)
with open_dataset(tmp_file, decode_coords=False) as ds:
self.assertTrue(equals_latlon(ds.attrs['coordinates']))
self.assertNotIn('coordinates', ds['lat'].attrs)
self.assertNotIn('coordinates', ds['lon'].attrs)
def test_roundtrip_endian(self):
ds = Dataset({'x': np.arange(3, 10, dtype='>i2'),
'y': np.arange(3, 20, dtype='<i4'),
'z': np.arange(3, 30, dtype='=i8'),
'w': ('x', np.arange(3, 10, dtype=np.float))})
with self.roundtrip(ds) as actual:
# technically these datasets are slightly different,
# one hold mixed endian data (ds) the other should be
# all big endian (actual). assertDatasetIdentical
# should still pass though.
assert_identical(ds, actual)
if self.engine == 'netcdf4':
ds['z'].encoding['endian'] = 'big'
with pytest.raises(NotImplementedError):
with self.roundtrip(ds) as actual:
pass
def test_invalid_dataarray_names_raise(self):
te = (TypeError, 'string or None')
ve = (ValueError, 'string must be length 1 or')
data = np.random.random((2, 2))
da = xr.DataArray(data)
for name, e in zip([0, (4, 5), True, ''], [te, te, te, ve]):
ds = Dataset({name: da})
with raises_regex(*e):
with self.roundtrip(ds):
pass
def test_encoding_kwarg(self):
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'dtype': 'f4'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['dtype'], 'f4')
self.assertEqual(ds.x.encoding, {})
kwargs = dict(encoding={'x': {'foo': 'bar'}})
with raises_regex(ValueError, 'unexpected encoding'):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={'x': 'foo'})
with raises_regex(ValueError, 'must be castable'):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
kwargs = dict(encoding={'invalid': {}})
with pytest.raises(KeyError):
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
pass
ds = Dataset({'t': pd.date_range('2000-01-01', periods=3)})
units = 'days since 1900-01-01'
kwargs = dict(encoding={'t': {'units': units}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.t.encoding['units'], units)
assert_identical(actual, ds)
def test_default_fill_value(self):
# Test default encoding for float:
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'dtype': 'f4'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['_FillValue'],
np.nan)
self.assertEqual(ds.x.encoding, {})
# Test default encoding for int:
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'dtype': 'int16'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertTrue('_FillValue' not in actual.x.encoding)
self.assertEqual(ds.x.encoding, {})
# Test default encoding for implicit int:
ds = Dataset({'x': ('y', np.arange(10, dtype='int16'))})
with self.roundtrip(ds) as actual:
self.assertTrue('_FillValue' not in actual.x.encoding)
self.assertEqual(ds.x.encoding, {})
def test_explicitly_omit_fill_value(self):
ds = Dataset({'x': ('y', [np.pi, -np.pi])})
ds.x.encoding['_FillValue'] = None
with self.roundtrip(ds) as actual:
assert '_FillValue' not in actual.x.encoding
def test_explicitly_omit_fill_value_via_encoding_kwarg(self):
ds = Dataset({'x': ('y', [np.pi, -np.pi])})
kwargs = dict(encoding={'x': {'_FillValue': None}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert '_FillValue' not in actual.x.encoding
self.assertEqual(ds.y.encoding, {})
def test_explicitly_omit_fill_value_in_coord(self):
ds = Dataset({'x': ('y', [np.pi, -np.pi])}, coords={'y': [0.0, 1.0]})
ds.y.encoding['_FillValue'] = None
with self.roundtrip(ds) as actual:
assert '_FillValue' not in actual.y.encoding
def test_explicitly_omit_fill_value_in_coord_via_encoding_kwarg(self):
ds = Dataset({'x': ('y', [np.pi, -np.pi])}, coords={'y': [0.0, 1.0]})
kwargs = dict(encoding={'y': {'_FillValue': None}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
assert '_FillValue' not in actual.y.encoding
self.assertEqual(ds.y.encoding, {})
def test_encoding_same_dtype(self):
ds = Dataset({'x': ('y', np.arange(10.0, dtype='f4'))})
kwargs = dict(encoding={'x': {'dtype': 'f4'}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['dtype'], 'f4')
self.assertEqual(ds.x.encoding, {})
def test_append_write(self):
# regression for GH1215
data = create_test_data()
with self.roundtrip_append(data) as actual:
assert_identical(data, actual)
def test_append_overwrite_values(self):
# regression for GH1215
data = create_test_data()
with create_tmp_file(allow_cleanup_failure=False) as tmp_file:
self.save(data, tmp_file, mode='w')
data['var2'][:] = -999
data['var9'] = data['var2'] * 3
self.save(data[['var2', 'var9']], tmp_file, mode='a')
with self.open(tmp_file) as actual:
assert_identical(data, actual)
def test_append_with_invalid_dim_raises(self):
data = create_test_data()
with create_tmp_file(allow_cleanup_failure=False) as tmp_file:
self.save(data, tmp_file, mode='w')
data['var9'] = data['var2'] * 3
data = data.isel(dim1=slice(2, 6)) # modify one dimension
with raises_regex(ValueError,
'Unable to update size for existing dimension'):
self.save(data, tmp_file, mode='a')
def test_multiindex_not_implemented(self):
ds = (Dataset(coords={'y': ('x', [1, 2]), 'z': ('x', ['a', 'b'])})
.set_index(x=['y', 'z']))
with raises_regex(NotImplementedError, 'MultiIndex'):
with self.roundtrip(ds):
pass
_counter = itertools.count()
@contextlib.contextmanager
def create_tmp_file(suffix='.nc', allow_cleanup_failure=False):
temp_dir = tempfile.mkdtemp()
path = os.path.join(temp_dir, 'temp-%s%s' % (next(_counter), suffix))
try:
yield path
finally:
try:
shutil.rmtree(temp_dir)
except OSError:
if not allow_cleanup_failure:
raise
@contextlib.contextmanager
def create_tmp_files(nfiles, suffix='.nc', allow_cleanup_failure=False):
with ExitStack() as stack:
files = [stack.enter_context(create_tmp_file(suffix,
allow_cleanup_failure))
for apath in np.arange(nfiles)]
yield files
@requires_netCDF4
class BaseNetCDF4Test(CFEncodedDataTest):
engine = 'netcdf4'
def test_open_group(self):
# Create a netCDF file with a dataset stored within a group
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as rootgrp:
foogrp = rootgrp.createGroup('foo')
ds = foogrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo', '/foo', 'foo/', '/foo/':
with self.open(tmp_file, group=group) as actual:
assert_equal(actual['x'], expected['x'])
# check that missing group raises appropriate exception
with pytest.raises(IOError):
open_dataset(tmp_file, group='bar')
with raises_regex(ValueError, 'must be a string'):
open_dataset(tmp_file, group=(1, 2, 3))
def test_open_subgroup(self):
# Create a netCDF file with a dataset stored within a group within a
# group
with create_tmp_file() as tmp_file:
rootgrp = nc4.Dataset(tmp_file, 'w')
foogrp = rootgrp.createGroup('foo')
bargrp = foogrp.createGroup('bar')
ds = bargrp
ds.createDimension('time', size=10)
x = np.arange(10)
ds.createVariable('x', np.int32, dimensions=('time',))
ds.variables['x'][:] = x
rootgrp.close()
expected = Dataset()
expected['x'] = ('time', x)
# check equivalent ways to specify group
for group in 'foo/bar', '/foo/bar', 'foo/bar/', '/foo/bar/':
with self.open(tmp_file, group=group) as actual:
assert_equal(actual['x'], expected['x'])
def test_write_groups(self):
data1 = create_test_data()
data2 = data1 * 2
with create_tmp_file() as tmp_file:
self.save(data1, tmp_file, group='data/1')
self.save(data2, tmp_file, group='data/2', mode='a')
with self.open(tmp_file, group='data/1') as actual1:
assert_identical(data1, actual1)
with self.open(tmp_file, group='data/2') as actual2:
assert_identical(data2, actual2)
def test_roundtrip_string_with_fill_value_vlen(self):
values = np.array([u'ab', u'cdef', np.nan], dtype=object)
expected = Dataset({'x': ('t', values)})
# netCDF4-based backends don't support an explicit fillvalue
# for variable length strings yet.
# https://github.com/Unidata/netcdf4-python/issues/730
# https://github.com/shoyer/h5netcdf/issues/37
original = Dataset({'x': ('t', values, {}, {'_FillValue': u'XXX'})})
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
original = Dataset({'x': ('t', values, {}, {'_FillValue': u''})})
with pytest.raises(NotImplementedError):
with self.roundtrip(original) as actual:
assert_identical(expected, actual)
def test_roundtrip_character_array(self):
with create_tmp_file() as tmp_file:
values = np.array([['a', 'b', 'c'], ['d', 'e', 'f']], dtype='S')
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 2)
nc.createDimension('string3', 3)
v = nc.createVariable('x', np.dtype('S1'), ('x', 'string3'))
v[:] = values
values = np.array(['abc', 'def'], dtype='S')
expected = Dataset({'x': ('x', values)})
with open_dataset(tmp_file) as actual:
assert_identical(expected, actual)
# regression test for #157
with self.roundtrip(actual) as roundtripped:
assert_identical(expected, roundtripped)
def test_default_to_char_arrays(self):
data = Dataset({'x': np.array(['foo', 'zzzz'], dtype='S')})
with self.roundtrip(data) as actual:
assert_identical(data, actual)
self.assertEqual(actual['x'].dtype, np.dtype('S4'))
def test_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
expected = Dataset()
time = pd.date_range('1999-01-05', periods=10)
encoding = {'units': units, 'dtype': np.dtype('int32')}
expected['time'] = ('time', time, {}, encoding)
with open_dataset(tmp_file) as actual:
assert_equal(actual['time'], expected['time'])
actual_encoding = dict((k, v) for k, v in
iteritems(actual['time'].encoding)
if k in expected['time'].encoding)
self.assertDictEqual(actual_encoding,
expected['time'].encoding)
def test_dump_encodings(self):
# regression test for #709
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = dict(encoding={'x': {'zlib': True}})
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertTrue(actual.x.encoding['zlib'])
def test_dump_and_open_encodings(self):
# Create a netCDF file with explicit time units
# and make sure it makes it into the encodings
# and survives a round trip
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as ds:
ds.createDimension('time', size=10)
ds.createVariable('time', np.int32, dimensions=('time',))
units = 'days since 1999-01-01'
ds.variables['time'].setncattr('units', units)
ds.variables['time'][:] = np.arange(10) + 4
with open_dataset(tmp_file) as xarray_dataset:
with create_tmp_file() as tmp_file2:
xarray_dataset.to_netcdf(tmp_file2)
with nc4.Dataset(tmp_file2, 'r') as ds:
self.assertEqual(
ds.variables['time'].getncattr('units'), units)
assert_array_equal(
ds.variables['time'], np.arange(10) + 4)
def test_compression_encoding(self):
data = create_test_data()
data['var2'].encoding.update({'zlib': True,
'chunksizes': (5, 5),
'fletcher32': True,
'shuffle': True,
'original_shape': data.var2.shape})
with self.roundtrip(data) as actual:
for k, v in iteritems(data['var2'].encoding):
self.assertEqual(v, actual['var2'].encoding[k])
# regression test for #156
expected = data.isel(dim1=0)
with self.roundtrip(expected) as actual:
assert_equal(expected, actual)
def test_encoding_chunksizes_unlimited(self):
# regression test for GH1225
ds = Dataset({'x': [1, 2, 3], 'y': ('x', [2, 3, 4])})
ds.variables['x'].encoding = {
'zlib': False,
'shuffle': False,
'complevel': 0,
'fletcher32': False,
'contiguous': False,
'chunksizes': (2 ** 20,),
'original_shape': (3,),
}
with self.roundtrip(ds) as actual:
assert_equal(ds, actual)
def test_mask_and_scale(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('t', 5)
nc.createVariable('x', 'int16', ('t',), fill_value=-1)
v = nc.variables['x']
v.set_auto_maskandscale(False)
v.add_offset = 10
v.scale_factor = 0.1
v[:] = np.array([-1, -1, 0, 1, 2])
# first make sure netCDF4 reads the masked and scaled data
# correctly
with nc4.Dataset(tmp_file, mode='r') as nc:
expected = np.ma.array([-1, -1, 10, 10.1, 10.2],
mask=[True, True, False, False, False])
actual = nc.variables['x'][:]
assert_array_equal(expected, actual)
# now check xarray
with open_dataset(tmp_file) as ds:
expected = create_masked_and_scaled_data()
assert_identical(expected, ds)
def test_0dimensional_variable(self):
# This fix verifies our work-around to this netCDF4-python bug:
# https://github.com/Unidata/netcdf4-python/pull/220
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
v = nc.createVariable('x', 'int16')
v[...] = 123
with open_dataset(tmp_file) as ds:
expected = Dataset({'x': ((), 123)})
assert_identical(expected, ds)
def test_already_open_dataset(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, mode='w') as nc:
v = nc.createVariable('x', 'int')
v[...] = 42
nc = nc4.Dataset(tmp_file, mode='r')
with backends.NetCDF4DataStore(nc, autoclose=False) as store:
with open_dataset(store) as ds:
expected = Dataset({'x': ((), 42)})
assert_identical(expected, ds)
def test_variable_len_strings(self):
with create_tmp_file() as tmp_file:
values = np.array(['foo', 'bar', 'baz'], dtype=object)
with nc4.Dataset(tmp_file, mode='w') as nc:
nc.createDimension('x', 3)
v = nc.createVariable('x', str, ('x',))
v[:] = values
expected = Dataset({'x': ('x', values)})
for kwargs in [{}, {'decode_cf': True}]:
with open_dataset(tmp_file, **kwargs) as actual:
assert_identical(expected, actual)
@requires_netCDF4
class NetCDF4DataTest(BaseNetCDF4Test, TestCase):
autoclose = False
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(tmp_file, mode='w') as store:
yield store
def test_variable_order(self):
# doesn't work with scipy or h5py :(
ds = Dataset()
ds['a'] = 1
ds['z'] = 2
ds['b'] = 3
ds.coords['c'] = 4
with self.roundtrip(ds) as actual:
self.assertEqual(list(ds.variables), list(actual.variables))
def test_unsorted_index_raises(self):
# should be fixed in netcdf4 v1.2.1
random_data = np.random.random(size=(4, 6))
dim0 = [0, 1, 2, 3]
dim1 = [0, 2, 1, 3, 5, 4] # We will sort this in a later step
da = xr.DataArray(data=random_data, dims=('dim0', 'dim1'),
coords={'dim0': dim0, 'dim1': dim1}, name='randovar')
ds = da.to_dataset()
with self.roundtrip(ds) as ondisk:
inds = np.argsort(dim1)
ds2 = ondisk.isel(dim1=inds)
# Older versions of NetCDF4 raise an exception here, and if so we
# want to ensure we improve (that is, replace) the error message
try:
ds2.randovar.values
except IndexError as err:
self.assertIn('first by calling .load', str(err))
def test_88_character_filename_segmentation_fault(self):
# should be fixed in netcdf4 v1.3.1
with mock.patch('netCDF4.__version__', '1.2.4'):
with warnings.catch_warnings():
message = ('A segmentation fault may occur when the '
'file path has exactly 88 characters')
warnings.filterwarnings('error', message)
with pytest.raises(Warning):
# Need to construct 88 character filepath
xr.Dataset().to_netcdf('a' * (88 - len(os.getcwd()) - 1))
def test_setncattr_string(self):
list_of_strings = ['list', 'of', 'strings']
one_element_list_of_strings = ['one element']
one_string = 'one string'
attrs = {'foo': list_of_strings,
'bar': one_element_list_of_strings,
'baz': one_string}
ds = Dataset({'x': ('y', [1, 2, 3], attrs)},
attrs=attrs)
with self.roundtrip(ds) as actual:
for totest in [actual, actual['x']]:
assert_array_equal(list_of_strings, totest.attrs['foo'])
assert_array_equal(one_element_list_of_strings,
totest.attrs['bar'])
assert one_string == totest.attrs['baz']
class NetCDF4DataStoreAutocloseTrue(NetCDF4DataTest):
autoclose = True
@requires_netCDF4
@requires_dask
class NetCDF4ViaDaskDataTest(NetCDF4DataTest):
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with NetCDF4DataTest.roundtrip(
self, data, save_kwargs, open_kwargs,
allow_cleanup_failure) as ds:
yield ds.chunk()
def test_unsorted_index_raises(self):
# Skip when using dask because dask rewrites indexers to getitem,
# dask first pulls items by block.
pass
def test_dataset_caching(self):
# caching behavior differs for dask
pass
class NetCDF4ViaDaskDataTestAutocloseTrue(NetCDF4ViaDaskDataTest):
autoclose = True
@requires_zarr
class BaseZarrTest(CFEncodedDataTest):
DIMENSION_KEY = '_ARRAY_DIMENSIONS'
@contextlib.contextmanager
def create_store(self):
with self.create_zarr_target() as store_target:
yield backends.ZarrStore.open_group(store_target, mode='w')
def save(self, dataset, store_target, **kwargs):
return dataset.to_zarr(store=store_target, **kwargs)
@contextlib.contextmanager
def open(self, store_target, **kwargs):
with xr.open_zarr(store_target, **kwargs) as ds:
yield ds
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with self.create_zarr_target() as store_target:
self.save(data, store_target, **save_kwargs)
with self.open(store_target, **open_kwargs) as ds:
yield ds
@contextlib.contextmanager
def roundtrip_append(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
pytest.skip("zarr backend does not support appending")
def test_auto_chunk(self):
original = create_test_data().chunk()
with self.roundtrip(
original, open_kwargs={'auto_chunk': False}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
self.assertEqual(v._in_memory, k in actual.dims)
# there should be no chunks
self.assertEqual(v.chunks, None)
with self.roundtrip(
original, open_kwargs={'auto_chunk': True}) as actual:
for k, v in actual.variables.items():
# only index variables should be in memory
self.assertEqual(v._in_memory, k in actual.dims)
# chunk size should be the same as original
self.assertEqual(v.chunks, original[k].chunks)
def test_chunk_encoding(self):
# These datasets have no dask chunks. All chunking specified in
# encoding
data = create_test_data()
chunks = (5, 5)
data['var2'].encoding.update({'chunks': chunks})
with self.roundtrip(data) as actual:
self.assertEqual(chunks, actual['var2'].encoding['chunks'])
# expect an error with non-integer chunks
data['var2'].encoding.update({'chunks': (5, 4.5)})
with pytest.raises(TypeError):
with self.roundtrip(data) as actual:
pass
def test_chunk_encoding_with_dask(self):
# These datasets DO have dask chunks. Need to check for various
# interactions between dask and zarr chunks
ds = xr.DataArray((np.arange(12)), dims='x', name='var1').to_dataset()
# - no encoding specified -
# zarr automatically gets chunk information from dask chunks
ds_chunk4 = ds.chunk({'x': 4})
with self.roundtrip(ds_chunk4) as actual:
self.assertEqual((4,), actual['var1'].encoding['chunks'])
# should fail if dask_chunks are irregular...
ds_chunk_irreg = ds.chunk({'x': (5, 4, 3)})
with pytest.raises(ValueError) as e_info:
with self.roundtrip(ds_chunk_irreg) as actual:
pass
# make sure this error message is correct and not some other error
assert e_info.match('chunks')
# ... except if the last chunk is smaller than the first
ds_chunk_irreg = ds.chunk({'x': (5, 5, 2)})
with self.roundtrip(ds_chunk_irreg) as actual:
self.assertEqual((5,), actual['var1'].encoding['chunks'])
# - encoding specified -
# specify compatible encodings
for chunk_enc in 4, (4, ):
ds_chunk4['var1'].encoding.update({'chunks': chunk_enc})
with self.roundtrip(ds_chunk4) as actual:
self.assertEqual((4,), actual['var1'].encoding['chunks'])
# specify incompatible encoding
ds_chunk4['var1'].encoding.update({'chunks': (5, 5)})
with pytest.raises(ValueError) as e_info:
with self.roundtrip(ds_chunk4) as actual:
pass
assert e_info.match('chunks')
# TODO: remove this failure once syncronized overlapping writes are
# supported by xarray
ds_chunk4['var1'].encoding.update({'chunks': 5})
with pytest.raises(NotImplementedError):
with self.roundtrip(ds_chunk4) as actual:
pass
def test_hidden_zarr_keys(self):
expected = create_test_data()
with self.create_store() as store:
expected.dump_to_store(store)
zarr_group = store.ds
# check that a variable hidden attribute is present and correct
# JSON only has a single array type, which maps to list in Python.
# In contrast, dims in xarray is always a tuple.
for var in expected.variables.keys():
dims = zarr_group[var].attrs[self.DIMENSION_KEY]
assert dims == list(expected[var].dims)
with xr.decode_cf(store):
# make sure it is hidden
for var in expected.variables.keys():
assert self.DIMENSION_KEY not in expected[var].attrs
# put it back and try removing from a variable
del zarr_group.var2.attrs[self.DIMENSION_KEY]
with pytest.raises(KeyError):
with xr.decode_cf(store):
pass
def test_write_persistence_modes(self):
original = create_test_data()
# overwrite mode
with self.roundtrip(original, save_kwargs={'mode': 'w'}) as actual:
assert_identical(original, actual)
# don't overwrite mode
with self.roundtrip(original, save_kwargs={'mode': 'w-'}) as actual:
assert_identical(original, actual)
# make sure overwriting works as expected
with self.create_zarr_target() as store:
self.save(original, store)
# should overwrite with no error
self.save(original, store, mode='w')
with self.open(store) as actual:
assert_identical(original, actual)
with pytest.raises(ValueError):
self.save(original, store, mode='w-')
# check that we can't use other persistence modes
# TODO: reconsider whether other persistence modes should be supported
with pytest.raises(ValueError):
with self.roundtrip(original, save_kwargs={'mode': 'a'}) as actual:
pass
def test_compressor_encoding(self):
original = create_test_data()
# specify a custom compressor
import zarr
blosc_comp = zarr.Blosc(cname='zstd', clevel=3, shuffle=2)
save_kwargs = dict(encoding={'var1': {'compressor': blosc_comp}})
with self.roundtrip(original, save_kwargs=save_kwargs) as ds:
actual = ds['var1'].encoding['compressor']
# get_config returns a dictionary of compressor attributes
assert actual.get_config() == blosc_comp.get_config()
def test_group(self):
original = create_test_data()
group = 'some/random/path'
with self.roundtrip(original, save_kwargs={'group': group},
open_kwargs={'group': group}) as actual:
assert_identical(original, actual)
# TODO: someone who understand caching figure out whether chaching
# makes sense for Zarr backend
@pytest.mark.xfail(reason="Zarr caching not implemented")
def test_dataset_caching(self):
super(CFEncodedDataTest, self).test_dataset_caching()
@pytest.mark.xfail(reason="Zarr stores can not be appended to")
def test_append_write(self):
super(CFEncodedDataTest, self).test_append_write()
@pytest.mark.xfail(reason="Zarr stores can not be appended to")
def test_append_overwrite_values(self):
super(CFEncodedDataTest, self).test_append_overwrite_values()
@pytest.mark.xfail(reason="Zarr stores can not be appended to")
def test_append_with_invalid_dim_raises(self):
super(CFEncodedDataTest, self).test_append_with_invalid_dim_raises()
def test_to_zarr_compute_false_roundtrip(self):
from dask.delayed import Delayed
original = create_test_data().chunk()
with self.create_zarr_target() as store:
delayed_obj = self.save(original, store, compute=False)
assert isinstance(delayed_obj, Delayed)
delayed_obj.compute()
with self.open(store) as actual:
assert_identical(original, actual)
@requires_zarr
class ZarrDictStoreTest(BaseZarrTest, TestCase):
@contextlib.contextmanager
def create_zarr_target(self):
yield {}
@requires_zarr
class ZarrDirectoryStoreTest(BaseZarrTest, TestCase):
@contextlib.contextmanager
def create_zarr_target(self):
with create_tmp_file(suffix='.zarr') as tmp:
yield tmp
class ScipyWriteTest(CFEncodedDataTest, NetCDF3Only):
def test_append_write(self):
import scipy
if scipy.__version__ == '1.0.1':
pytest.xfail('https://github.com/scipy/scipy/issues/8625')
super(ScipyWriteTest, self).test_append_write()
def test_append_overwrite_values(self):
import scipy
if scipy.__version__ == '1.0.1':
pytest.xfail('https://github.com/scipy/scipy/issues/8625')
super(ScipyWriteTest, self).test_append_overwrite_values()
@requires_scipy
class ScipyInMemoryDataTest(ScipyWriteTest, TestCase):
engine = 'scipy'
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, 'w')
def test_to_netcdf_explicit_engine(self):
# regression test for GH1321
Dataset({'foo': 42}).to_netcdf(engine='scipy')
@pytest.mark.skipif(PY2, reason='cannot pickle BytesIO on Python 2')
def test_bytesio_pickle(self):
data = Dataset({'foo': ('x', [1, 2, 3])})
fobj = BytesIO(data.to_netcdf())
with open_dataset(fobj, autoclose=self.autoclose) as ds:
unpickled = pickle.loads(pickle.dumps(ds))
assert_identical(unpickled, data)
class ScipyInMemoryDataTestAutocloseTrue(ScipyInMemoryDataTest):
autoclose = True
@requires_scipy
class ScipyFileObjectTest(ScipyWriteTest, TestCase):
engine = 'scipy'
@contextlib.contextmanager
def create_store(self):
fobj = BytesIO()
yield backends.ScipyDataStore(fobj, 'w')
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
with create_tmp_file() as tmp_file:
with open(tmp_file, 'wb') as f:
self.save(data, f, **save_kwargs)
with open(tmp_file, 'rb') as f:
with self.open(f, **open_kwargs) as ds:
yield ds
@pytest.mark.skip(reason='cannot pickle file objects')
def test_pickle(self):
pass
@pytest.mark.skip(reason='cannot pickle file objects')
def test_pickle_dataarray(self):
pass
@requires_scipy
class ScipyFilePathTest(ScipyWriteTest, TestCase):
engine = 'scipy'
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.ScipyDataStore(tmp_file, mode='w') as store:
yield store
def test_array_attrs(self):
ds = Dataset(attrs={'foo': [[1, 2], [3, 4]]})
with raises_regex(ValueError, 'must be 1-dimensional'):
with self.roundtrip(ds):
pass
def test_roundtrip_example_1_netcdf_gz(self):
with open_example_dataset('example_1.nc.gz') as expected:
with open_example_dataset('example_1.nc') as actual:
assert_identical(expected, actual)
def test_netcdf3_endianness(self):
# regression test for GH416
expected = open_example_dataset('bears.nc', engine='scipy')
for var in expected.variables.values():
self.assertTrue(var.dtype.isnative)
@requires_netCDF4
def test_nc4_scipy(self):
with create_tmp_file(allow_cleanup_failure=True) as tmp_file:
with nc4.Dataset(tmp_file, 'w', format='NETCDF4') as rootgrp:
rootgrp.createGroup('foo')
with raises_regex(TypeError, 'pip install netcdf4'):
open_dataset(tmp_file, engine='scipy')
class ScipyFilePathTestAutocloseTrue(ScipyFilePathTest):
autoclose = True
@requires_netCDF4
class NetCDF3ViaNetCDF4DataTest(CFEncodedDataTest, NetCDF3Only, TestCase):
engine = 'netcdf4'
file_format = 'NETCDF3_CLASSIC'
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode='w', format='NETCDF3_CLASSIC') as store:
yield store
class NetCDF3ViaNetCDF4DataTestAutocloseTrue(NetCDF3ViaNetCDF4DataTest):
autoclose = True
@requires_netCDF4
class NetCDF4ClassicViaNetCDF4DataTest(CFEncodedDataTest, NetCDF3Only,
TestCase):
engine = 'netcdf4'
file_format = 'NETCDF4_CLASSIC'
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
with backends.NetCDF4DataStore.open(
tmp_file, mode='w', format='NETCDF4_CLASSIC') as store:
yield store
class NetCDF4ClassicViaNetCDF4DataTestAutocloseTrue(
NetCDF4ClassicViaNetCDF4DataTest):
autoclose = True
@requires_scipy_or_netCDF4
class GenericNetCDFDataTest(CFEncodedDataTest, NetCDF3Only, TestCase):
# verify that we can read and write netCDF3 files as long as we have scipy
# or netCDF4-python installed
file_format = 'netcdf3_64bit'
def test_write_store(self):
# there's no specific store to test here
pass
def test_engine(self):
data = create_test_data()
with raises_regex(ValueError, 'unrecognized engine'):
data.to_netcdf('foo.nc', engine='foobar')
with raises_regex(ValueError, 'invalid engine'):
data.to_netcdf(engine='netcdf4')
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file)
with raises_regex(ValueError, 'unrecognized engine'):
open_dataset(tmp_file, engine='foobar')
netcdf_bytes = data.to_netcdf()
with raises_regex(ValueError, 'can only read'):
open_dataset(BytesIO(netcdf_bytes), engine='foobar')
@pytest.mark.xfail(reason='https://github.com/pydata/xarray/issues/2050')
def test_cross_engine_read_write_netcdf3(self):
data = create_test_data()
valid_engines = set()
if has_netCDF4:
valid_engines.add('netcdf4')
if has_scipy:
valid_engines.add('scipy')
for write_engine in valid_engines:
for format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, format=format,
engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file,
engine=read_engine) as actual:
# hack to allow test to work:
# coord comes back as DataArray rather than coord,
# and so need to loop through here rather than in
# the test function (or we get recursion)
[assert_allclose(data[k].variable,
actual[k].variable)
for k in data.variables]
def test_encoding_unlimited_dims(self):
ds = Dataset({'x': ('y', np.arange(10.0))})
with self.roundtrip(ds,
save_kwargs=dict(unlimited_dims=['y'])) as actual:
self.assertEqual(actual.encoding['unlimited_dims'], set('y'))
assert_equal(ds, actual)
# Regression test for https://github.com/pydata/xarray/issues/2134
with self.roundtrip(ds,
save_kwargs=dict(unlimited_dims='y')) as actual:
self.assertEqual(actual.encoding['unlimited_dims'], set('y'))
assert_equal(ds, actual)
ds.encoding = {'unlimited_dims': ['y']}
with self.roundtrip(ds) as actual:
self.assertEqual(actual.encoding['unlimited_dims'], set('y'))
assert_equal(ds, actual)
# Regression test for https://github.com/pydata/xarray/issues/2134
ds.encoding = {'unlimited_dims': 'y'}
with self.roundtrip(ds) as actual:
self.assertEqual(actual.encoding['unlimited_dims'], set('y'))
assert_equal(ds, actual)
class GenericNetCDFDataTestAutocloseTrue(GenericNetCDFDataTest):
autoclose = True
@requires_h5netcdf
@requires_netCDF4
class H5NetCDFDataTest(BaseNetCDF4Test, TestCase):
engine = 'h5netcdf'
@contextlib.contextmanager
def create_store(self):
with create_tmp_file() as tmp_file:
yield backends.H5NetCDFStore(tmp_file, 'w')
def test_complex(self):
expected = Dataset({'x': ('y', np.ones(5) + 1j * np.ones(5))})
with self.roundtrip(expected) as actual:
assert_equal(expected, actual)
@pytest.mark.xfail(reason='https://github.com/pydata/xarray/issues/535')
def test_cross_engine_read_write_netcdf4(self):
# Drop dim3, because its labels include strings. These appear to be
# not properly read with python-netCDF4, which converts them into
# unicode instead of leaving them as bytes.
data = create_test_data().drop('dim3')
data.attrs['foo'] = 'bar'
valid_engines = ['netcdf4', 'h5netcdf']
for write_engine in valid_engines:
with create_tmp_file() as tmp_file:
data.to_netcdf(tmp_file, engine=write_engine)
for read_engine in valid_engines:
with open_dataset(tmp_file, engine=read_engine) as actual:
assert_identical(data, actual)
def test_read_byte_attrs_as_unicode(self):
with create_tmp_file() as tmp_file:
with nc4.Dataset(tmp_file, 'w') as nc:
nc.foo = b'bar'
with open_dataset(tmp_file) as actual:
expected = Dataset(attrs={'foo': 'bar'})
assert_identical(expected, actual)
def test_encoding_unlimited_dims(self):
ds = Dataset({'x': ('y', np.arange(10.0))})
with self.roundtrip(ds,
save_kwargs=dict(unlimited_dims=['y'])) as actual:
self.assertEqual(actual.encoding['unlimited_dims'], set('y'))
assert_equal(ds, actual)
ds.encoding = {'unlimited_dims': ['y']}
with self.roundtrip(ds) as actual:
self.assertEqual(actual.encoding['unlimited_dims'], set('y'))
assert_equal(ds, actual)
def test_compression_encoding_h5py(self):
ENCODINGS = (
# h5py style compression with gzip codec will be converted to
# NetCDF4-Python style on round-trip
({'compression': 'gzip', 'compression_opts': 9},
{'zlib': True, 'complevel': 9}),
# What can't be expressed in NetCDF4-Python style is
# round-tripped unaltered
({'compression': 'lzf', 'compression_opts': None},
{'compression': 'lzf', 'compression_opts': None}),
# If both styles are used together, h5py format takes precedence
({'compression': 'lzf', 'compression_opts': None,
'zlib': True, 'complevel': 9},
{'compression': 'lzf', 'compression_opts': None}))
for compr_in, compr_out in ENCODINGS:
data = create_test_data()
compr_common = {
'chunksizes': (5, 5),
'fletcher32': True,
'shuffle': True,
'original_shape': data.var2.shape
}
data['var2'].encoding.update(compr_in)
data['var2'].encoding.update(compr_common)
compr_out.update(compr_common)
with self.roundtrip(data) as actual:
for k, v in compr_out.items():
self.assertEqual(v, actual['var2'].encoding[k])
def test_compression_check_encoding_h5py(self):
"""When mismatched h5py and NetCDF4-Python encodings are expressed
in to_netcdf(encoding=...), must raise ValueError
"""
data = Dataset({'x': ('y', np.arange(10.0))})
# Compatible encodings are graciously supported
with create_tmp_file() as tmp_file:
data.to_netcdf(
tmp_file, engine='h5netcdf',
encoding={'x': {'compression': 'gzip', 'zlib': True,
'compression_opts': 6, 'complevel': 6}})
with open_dataset(tmp_file, engine='h5netcdf') as actual:
assert actual.x.encoding['zlib'] is True
assert actual.x.encoding['complevel'] == 6
# Incompatible encodings cause a crash
with create_tmp_file() as tmp_file:
with raises_regex(ValueError,
"'zlib' and 'compression' encodings mismatch"):
data.to_netcdf(
tmp_file, engine='h5netcdf',
encoding={'x': {'compression': 'lzf', 'zlib': True}})
with create_tmp_file() as tmp_file:
with raises_regex(
ValueError,
"'complevel' and 'compression_opts' encodings mismatch"):
data.to_netcdf(
tmp_file, engine='h5netcdf',
encoding={'x': {'compression': 'gzip',
'compression_opts': 5, 'complevel': 6}})
def test_dump_encodings_h5py(self):
# regression test for #709
ds = Dataset({'x': ('y', np.arange(10.0))})
kwargs = {'encoding': {'x': {
'compression': 'gzip', 'compression_opts': 9}}}
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['zlib'], True)
self.assertEqual(actual.x.encoding['complevel'], 9)
kwargs = {'encoding': {'x': {
'compression': 'lzf', 'compression_opts': None}}}
with self.roundtrip(ds, save_kwargs=kwargs) as actual:
self.assertEqual(actual.x.encoding['compression'], 'lzf')
self.assertEqual(actual.x.encoding['compression_opts'], None)
# tests pending h5netcdf fix
@unittest.skip
class H5NetCDFDataTestAutocloseTrue(H5NetCDFDataTest):
autoclose = True
@pytest.fixture(params=['scipy', 'netcdf4', 'h5netcdf', 'pynio'])
def readengine(request):
return request.param
@pytest.fixture(params=[1, 100])
def nfiles(request):
return request.param
@pytest.fixture(params=[True, False])
def autoclose(request):
return request.param
@pytest.fixture(params=[True, False])
def parallel(request):
return request.param
@pytest.fixture(params=[None, 5])
def chunks(request):
return request.param
# using pytest.mark.skipif does not work so this a work around
def skip_if_not_engine(engine):
if engine == 'netcdf4':
pytest.importorskip('netCDF4')
elif engine == 'pynio':
pytest.importorskip('Nio')
else:
pytest.importorskip(engine)
def test_open_mfdataset_manyfiles(readengine, nfiles, autoclose, parallel,
chunks):
# skip certain combinations
skip_if_not_engine(readengine)
if not has_dask and parallel:
pytest.skip('parallel requires dask')
if readengine == 'h5netcdf' and autoclose:
pytest.skip('h5netcdf does not support autoclose yet')
if ON_WINDOWS:
pytest.skip('Skipping on Windows')
randdata = np.random.randn(nfiles)
original = Dataset({'foo': ('x', randdata)})
# test standard open_mfdataset approach with too many files
with create_tmp_files(nfiles) as tmpfiles:
writeengine = (readengine if readengine != 'pynio' else 'netcdf4')
# split into multiple sets of temp files
for ii in original.x.values:
subds = original.isel(x=slice(ii, ii + 1))
subds.to_netcdf(tmpfiles[ii], engine=writeengine)
# check that calculation on opened datasets works properly
actual = open_mfdataset(tmpfiles, engine=readengine, parallel=parallel,
autoclose=autoclose, chunks=chunks)
# check that using open_mfdataset returns dask arrays for variables
assert isinstance(actual['foo'].data, dask_array_type)
assert_identical(original, actual)
@requires_scipy_or_netCDF4
class OpenMFDatasetWithDataVarsAndCoordsKwTest(TestCase):
coord_name = 'lon'
var_name = 'v1'
@contextlib.contextmanager
def setup_files_and_datasets(self):
ds1, ds2 = self.gen_datasets_with_common_coord_and_time()
with create_tmp_file() as tmpfile1:
with create_tmp_file() as tmpfile2:
# save data to the temporary files
ds1.to_netcdf(tmpfile1)
ds2.to_netcdf(tmpfile2)
yield [tmpfile1, tmpfile2], [ds1, ds2]
def gen_datasets_with_common_coord_and_time(self):
# create coordinate data
nx = 10
nt = 10
x = np.arange(nx)
t1 = np.arange(nt)
t2 = np.arange(nt, 2 * nt, 1)
v1 = np.random.randn(nt, nx)
v2 = np.random.randn(nt, nx)
ds1 = Dataset(data_vars={self.var_name: (['t', 'x'], v1),
self.coord_name: ('x', 2 * x)},
coords={
't': (['t', ], t1),
'x': (['x', ], x)
})
ds2 = Dataset(data_vars={self.var_name: (['t', 'x'], v2),
self.coord_name: ('x', 2 * x)},
coords={
't': (['t', ], t2),
'x': (['x', ], x)
})
return ds1, ds2
def test_open_mfdataset_does_same_as_concat(self):
options = ['all', 'minimal', 'different', ]
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
for opt in options:
with open_mfdataset(files, data_vars=opt) as ds:
kwargs = dict(data_vars=opt, dim='t')
ds_expect = xr.concat([ds1, ds2], **kwargs)
assert_identical(ds, ds_expect)
with open_mfdataset(files, coords=opt) as ds:
kwargs = dict(coords=opt, dim='t')
ds_expect = xr.concat([ds1, ds2], **kwargs)
assert_identical(ds, ds_expect)
def test_common_coord_when_datavars_all(self):
opt = 'all'
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# open the files with the data_var option
with open_mfdataset(files, data_vars=opt) as ds:
coord_shape = ds[self.coord_name].shape
coord_shape1 = ds1[self.coord_name].shape
coord_shape2 = ds2[self.coord_name].shape
var_shape = ds[self.var_name].shape
self.assertEqual(var_shape, coord_shape)
self.assertNotEqual(coord_shape1, coord_shape)
self.assertNotEqual(coord_shape2, coord_shape)
def test_common_coord_when_datavars_minimal(self):
opt = 'minimal'
with self.setup_files_and_datasets() as (files, [ds1, ds2]):
# open the files using data_vars option
with open_mfdataset(files, data_vars=opt) as ds:
coord_shape = ds[self.coord_name].shape
coord_shape1 = ds1[self.coord_name].shape
coord_shape2 = ds2[self.coord_name].shape
var_shape = ds[self.var_name].shape
self.assertNotEqual(var_shape, coord_shape)
self.assertEqual(coord_shape1, coord_shape)
self.assertEqual(coord_shape2, coord_shape)
def test_invalid_data_vars_value_should_fail(self):
with self.setup_files_and_datasets() as (files, _):
with pytest.raises(ValueError):
with open_mfdataset(files, data_vars='minimum'):
pass
# test invalid coord parameter
with pytest.raises(ValueError):
with open_mfdataset(files, coords='minimum'):
pass
@requires_dask
@requires_scipy
@requires_netCDF4
class DaskTest(TestCase, DatasetIOTestCases):
@contextlib.contextmanager
def create_store(self):
yield Dataset()
@contextlib.contextmanager
def roundtrip(self, data, save_kwargs={}, open_kwargs={},
allow_cleanup_failure=False):
yield data.chunk()
# Override methods in DatasetIOTestCases - not applicable to dask
def test_roundtrip_string_encoded_characters(self):
pass
def test_roundtrip_coordinates_with_space(self):
pass
def test_roundtrip_numpy_datetime_data(self):
# Override method in DatasetIOTestCases - remove not applicable
# save_kwds
times = pd.to_datetime(['2000-01-01', '2000-01-02', 'NaT'])
expected = Dataset({'t': ('t', times), 't0': times[0]})
with self.roundtrip(expected) as actual:
assert_identical(expected, actual)
def test_roundtrip_cftime_datetime_data_enable_cftimeindex(self):
# Override method in DatasetIOTestCases - remove not applicable
# save_kwds
from .test_coding_times import _all_cftime_date_types
date_types = _all_cftime_date_types()
for date_type in date_types.values():
times = [date_type(1, 1, 1), date_type(1, 1, 2)]
expected = Dataset({'t': ('t', times), 't0': times[0]})
expected_decoded_t = np.array(times)
expected_decoded_t0 = np.array([date_type(1, 1, 1)])
with xr.set_options(enable_cftimeindex=True):
with self.roundtrip(expected) as actual:
abs_diff = abs(actual.t.values - expected_decoded_t)
self.assertTrue((abs_diff <= np.timedelta64(1, 's')).all())
abs_diff = abs(actual.t0.values - expected_decoded_t0)
self.assertTrue((abs_diff <= np.timedelta64(1, 's')).all())
def test_roundtrip_cftime_datetime_data_disable_cftimeindex(self):
# Override method in DatasetIOTestCases - remove not applicable
# save_kwds
from .test_coding_times import _all_cftime_date_types
date_types = _all_cftime_date_types()
for date_type in date_types.values():
times = [date_type(1, 1, 1), date_type(1, 1, 2)]
expected = Dataset({'t': ('t', times), 't0': times[0]})
expected_decoded_t = np.array(times)
expected_decoded_t0 = np.array([date_type(1, 1, 1)])
with xr.set_options(enable_cftimeindex=False):
with self.roundtrip(expected) as actual:
abs_diff = abs(actual.t.values - expected_decoded_t)
self.assertTrue((abs_diff <= np.timedelta64(1, 's')).all())
abs_diff = abs(actual.t0.values - expected_decoded_t0)
self.assertTrue((abs_diff <= np.timedelta64(1, 's')).all())
def test_write_store(self):
# Override method in DatasetIOTestCases - not applicable to dask
pass
def test_dataset_caching(self):
expected = Dataset({'foo': ('x', [5, 6, 7])})
with self.roundtrip(expected) as actual:
assert not actual.foo.variable._in_memory
actual.foo.values # no caching
assert not actual.foo.variable._in_memory
def test_open_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks,
((5, 5),))
assert_identical(original, actual)
with open_mfdataset([tmp1, tmp2], chunks={'x': 3},
autoclose=self.autoclose) as actual:
self.assertEqual(actual.foo.variable.data.chunks,
((3, 2, 3, 2),))
with raises_regex(IOError, 'no files to open'):
open_mfdataset('foo-bar-baz-*.nc', autoclose=self.autoclose)
with raises_regex(ValueError, 'wild-card'):
open_mfdataset('http://some/remote/uri', autoclose=self.autoclose)
@requires_pathlib
def test_open_mfdataset_pathlib(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
original.isel(x=slice(5)).to_netcdf(tmp1)
original.isel(x=slice(5, 10)).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
assert_identical(original, actual)
def test_attrs_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
ds1 = original.isel(x=slice(5))
ds2 = original.isel(x=slice(5, 10))
ds1.attrs['test1'] = 'foo'
ds2.attrs['test2'] = 'bar'
ds1.to_netcdf(tmp1)
ds2.to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2]) as actual:
# presumes that attributes inherited from
# first dataset loaded
self.assertEqual(actual.test1, ds1.test1)
# attributes from ds2 are not retained, e.g.,
with raises_regex(AttributeError,
'no attribute'):
actual.test2
def test_preprocess_mfdataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
def preprocess(ds):
return ds.assign_coords(z=0)
expected = preprocess(original)
with open_mfdataset(tmp, preprocess=preprocess,
autoclose=self.autoclose) as actual:
assert_identical(expected, actual)
def test_save_mfdataset_roundtrip(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
datasets = [original.isel(x=slice(5)),
original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
assert_identical(actual, original)
def test_save_mfdataset_invalid(self):
ds = Dataset()
with raises_regex(ValueError, 'cannot use mode'):
save_mfdataset([ds, ds], ['same', 'same'])
with raises_regex(ValueError, 'same length'):
save_mfdataset([ds, ds], ['only one path'])
def test_save_mfdataset_invalid_dataarray(self):
# regression test for GH1555
da = DataArray([1, 2])
with raises_regex(TypeError, 'supports writing Dataset'):
save_mfdataset([da], ['dataarray'])
@requires_pathlib
def test_save_mfdataset_pathlib_roundtrip(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
datasets = [original.isel(x=slice(5)),
original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
tmp1 = Path(tmp1)
tmp2 = Path(tmp2)
save_mfdataset(datasets, [tmp1, tmp2])
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
assert_identical(actual, original)
def test_open_and_do_math(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset(tmp, autoclose=self.autoclose) as ds:
actual = 1.0 * ds
assert_allclose(original, actual, decode_bytes=False)
def test_open_mfdataset_concat_dim_none(self):
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
data = Dataset({'x': 0})
data.to_netcdf(tmp1)
Dataset({'x': np.nan}).to_netcdf(tmp2)
with open_mfdataset([tmp1, tmp2], concat_dim=None,
autoclose=self.autoclose) as actual:
assert_identical(data, actual)
def test_open_dataset(self):
original = Dataset({'foo': ('x', np.random.randn(10))})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp, chunks={'x': 5}) as actual:
self.assertIsInstance(actual.foo.variable.data, da.Array)
self.assertEqual(actual.foo.variable.data.chunks, ((5, 5),))
assert_identical(original, actual)
with open_dataset(tmp, chunks=5) as actual:
assert_identical(original, actual)
with open_dataset(tmp) as actual:
self.assertIsInstance(actual.foo.variable.data, np.ndarray)
assert_identical(original, actual)
def test_open_single_dataset(self):
# Test for issue GH #1988. This makes sure that the
# concat_dim is utilized when specified in open_mfdataset().
rnddata = np.random.randn(10)
original = Dataset({'foo': ('x', rnddata)})
dim = DataArray([100], name='baz', dims='baz')
expected = Dataset({'foo': (('baz', 'x'), rnddata[np.newaxis, :])},
{'baz': [100]})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_mfdataset([tmp], concat_dim=dim,
autoclose=self.autoclose) as actual:
assert_identical(expected, actual)
def test_dask_roundtrip(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
chunks = {'dim1': 4, 'dim2': 4, 'dim3': 4, 'time': 10}
with open_dataset(tmp, chunks=chunks) as dask_ds:
assert_identical(data, dask_ds)
with create_tmp_file() as tmp2:
dask_ds.to_netcdf(tmp2)
with open_dataset(tmp2) as on_disk:
assert_identical(data, on_disk)
def test_deterministic_names(self):
with create_tmp_file() as tmp:
data = create_test_data()
data.to_netcdf(tmp)
with open_mfdataset(tmp, autoclose=self.autoclose) as ds:
original_names = dict((k, v.data.name)
for k, v in ds.data_vars.items())
with open_mfdataset(tmp, autoclose=self.autoclose) as ds:
repeat_names = dict((k, v.data.name)
for k, v in ds.data_vars.items())
for var_name, dask_name in original_names.items():
self.assertIn(var_name, dask_name)
self.assertEqual(dask_name[:13], 'open_dataset-')
self.assertEqual(original_names, repeat_names)
def test_dataarray_compute(self):
# Test DataArray.compute() on dask backend.
# The test for Dataset.compute() is already in DatasetIOTestCases;
# however dask is the only tested backend which supports DataArrays
actual = DataArray([1, 2]).chunk()
computed = actual.compute()
self.assertFalse(actual._in_memory)
self.assertTrue(computed._in_memory)
assert_allclose(actual, computed, decode_bytes=False)
def test_to_netcdf_compute_false_roundtrip(self):
from dask.delayed import Delayed
original = create_test_data().chunk()
with create_tmp_file() as tmp_file:
# dataset, path, **kwargs):
delayed_obj = self.save(original, tmp_file, compute=False)
assert isinstance(delayed_obj, Delayed)
delayed_obj.compute()
with self.open(tmp_file) as actual:
assert_identical(original, actual)
def test_save_mfdataset_compute_false_roundtrip(self):
from dask.delayed import Delayed
original = Dataset({'foo': ('x', np.random.randn(10))}).chunk()
datasets = [original.isel(x=slice(5)),
original.isel(x=slice(5, 10))]
with create_tmp_file() as tmp1:
with create_tmp_file() as tmp2:
delayed_obj = save_mfdataset(datasets, [tmp1, tmp2],
engine=self.engine, compute=False)
assert isinstance(delayed_obj, Delayed)
delayed_obj.compute()
with open_mfdataset([tmp1, tmp2],
autoclose=self.autoclose) as actual:
assert_identical(actual, original)
class DaskTestAutocloseTrue(DaskTest):
autoclose = True
@requires_scipy_or_netCDF4
@requires_pydap
class PydapTest(TestCase):
def convert_to_pydap_dataset(self, original):
from pydap.model import GridType, BaseType, DatasetType
ds = DatasetType('bears', **original.attrs)
for key, var in original.data_vars.items():
v = GridType(key)
v[key] = BaseType(key, var.values, dimensions=var.dims,
**var.attrs)
for d in var.dims:
v[d] = BaseType(d, var[d].values)
ds[key] = v
# check all dims are stored in ds
for d in original.coords:
ds[d] = BaseType(d, original[d].values, dimensions=(d, ),
**original[d].attrs)
return ds
@contextlib.contextmanager
def create_datasets(self, **kwargs):
with open_example_dataset('bears.nc') as expected:
pydap_ds = self.convert_to_pydap_dataset(expected)
actual = open_dataset(PydapDataStore(pydap_ds))
# TODO solve this workaround:
# netcdf converts string to byte not unicode
expected['bears'] = expected['bears'].astype(str)
yield actual, expected
def test_cmp_local_file(self):
with self.create_datasets() as (actual, expected):
assert_equal(actual, expected)
# global attributes should be global attributes on the dataset
self.assertNotIn('NC_GLOBAL', actual.attrs)
self.assertIn('history', actual.attrs)
# we don't check attributes exactly with assertDatasetIdentical()
# because the test DAP server seems to insert some extra
# attributes not found in the netCDF file.
assert actual.attrs.keys() == expected.attrs.keys()
with self.create_datasets() as (actual, expected):
assert_equal(
actual.isel(l=2), expected.isel(l=2)) # noqa: E741
with self.create_datasets() as (actual, expected):
assert_equal(actual.isel(i=0, j=-1),
expected.isel(i=0, j=-1))
with self.create_datasets() as (actual, expected):
assert_equal(actual.isel(j=slice(1, 2)),
expected.isel(j=slice(1, 2)))
with self.create_datasets() as (actual, expected):
indexers = {'i': [1, 0, 0], 'j': [1, 2, 0, 1]}
assert_equal(actual.isel(**indexers),
expected.isel(**indexers))
with self.create_datasets() as (actual, expected):
indexers = {'i': DataArray([0, 1, 0], dims='a'),
'j': DataArray([0, 2, 1], dims='a')}
assert_equal(actual.isel(**indexers),
expected.isel(**indexers))
def test_compatible_to_netcdf(self):
# make sure it can be saved as a netcdf
with self.create_datasets() as (actual, expected):
with create_tmp_file() as tmp_file:
actual.to_netcdf(tmp_file)
actual = open_dataset(tmp_file)
actual['bears'] = actual['bears'].astype(str)
assert_equal(actual, expected)
@requires_dask
def test_dask(self):
with self.create_datasets(chunks={'j': 2}) as (actual, expected):
assert_equal(actual, expected)
@network
@requires_scipy_or_netCDF4
@requires_pydap
class PydapOnlineTest(PydapTest):
@contextlib.contextmanager
def create_datasets(self, **kwargs):
url = 'http://test.opendap.org/opendap/hyrax/data/nc/bears.nc'
actual = open_dataset(url, engine='pydap', **kwargs)
with open_example_dataset('bears.nc') as expected:
# workaround to restore string which is converted to byte
expected['bears'] = expected['bears'].astype(str)
yield actual, expected
def test_session(self):
from pydap.cas.urs import setup_session
session = setup_session('XarrayTestUser', 'Xarray2017')
with mock.patch('pydap.client.open_url') as mock_func:
xr.backends.PydapDataStore.open('http://test.url', session=session)
mock_func.assert_called_with('http://test.url', session=session)
@requires_scipy
@requires_pynio
class PyNioTest(ScipyWriteTest, TestCase):
def test_write_store(self):
# pynio is read-only for now
pass
@contextlib.contextmanager
def open(self, path, **kwargs):
with open_dataset(path, engine='pynio', autoclose=self.autoclose,
**kwargs) as ds:
yield ds
def save(self, dataset, path, **kwargs):
return dataset.to_netcdf(path, engine='scipy', **kwargs)
def test_weakrefs(self):
example = Dataset({'foo': ('x', np.arange(5.0))})
expected = example.rename({'foo': 'bar', 'x': 'y'})
with create_tmp_file() as tmp_file:
example.to_netcdf(tmp_file, engine='scipy')
on_disk = open_dataset(tmp_file, engine='pynio')
actual = on_disk.rename({'foo': 'bar', 'x': 'y'})
del on_disk # trigger garbage collection
assert_identical(actual, expected)
class PyNioTestAutocloseTrue(PyNioTest):
autoclose = True
@requires_rasterio
@contextlib.contextmanager
def create_tmp_geotiff(nx=4, ny=3, nz=3,
transform=None,
transform_args=[5000, 80000, 1000, 2000.],
crs={'units': 'm', 'no_defs': True, 'ellps': 'WGS84',
'proj': 'utm', 'zone': 18},
open_kwargs={}):
# yields a temporary geotiff file and a corresponding expected DataArray
import rasterio
from rasterio.transform import from_origin
with create_tmp_file(suffix='.tif') as tmp_file:
# allow 2d or 3d shapes
if nz == 1:
data_shape = ny, nx
write_kwargs = {'indexes': 1}
else:
data_shape = nz, ny, nx
write_kwargs = {}
data = np.arange(
nz * ny * nx,
dtype=rasterio.float32).reshape(
*data_shape)
if transform is None:
transform = from_origin(*transform_args)
with rasterio.open(
tmp_file, 'w',
driver='GTiff', height=ny, width=nx, count=nz,
crs=crs,
transform=transform,
dtype=rasterio.float32,
**open_kwargs) as s:
s.write(data, **write_kwargs)
dx, dy = s.res[0], -s.res[1]
a, b, c, d = transform_args
data = data[np.newaxis, ...] if nz == 1 else data
expected = DataArray(data, dims=('band', 'y', 'x'),
coords={
'band': np.arange(nz) + 1,
'y': -np.arange(ny) * d + b + dy / 2,
'x': np.arange(nx) * c + a + dx / 2,
})
yield tmp_file, expected
@requires_rasterio
class TestRasterio(TestCase):
@requires_scipy_or_netCDF4
def test_serialization(self):
with create_tmp_geotiff() as (tmp_file, expected):
# Write it to a netcdf and read again (roundtrip)
with xr.open_rasterio(tmp_file) as rioda:
with create_tmp_file(suffix='.nc') as tmp_nc_file:
rioda.to_netcdf(tmp_nc_file)
with xr.open_dataarray(tmp_nc_file) as ncds:
assert_identical(rioda, ncds)
def test_utm(self):
with create_tmp_geotiff() as (tmp_file, expected):
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert isinstance(rioda.attrs['crs'], basestring)
assert isinstance(rioda.attrs['res'], tuple)
assert isinstance(rioda.attrs['is_tiled'], np.uint8)
assert isinstance(rioda.attrs['transform'], tuple)
np.testing.assert_array_equal(rioda.attrs['nodatavals'],
[np.NaN, np.NaN, np.NaN])
# Check no parse coords
with xr.open_rasterio(tmp_file, parse_coordinates=False) as rioda:
assert 'x' not in rioda.coords
assert 'y' not in rioda.coords
def test_non_rectilinear(self):
from rasterio.transform import from_origin
# Create a geotiff file with 2d coordinates
with create_tmp_geotiff(transform=from_origin(0, 3, 1, 1).rotation(45),
crs=None) as (tmp_file, _):
# Default is to not parse coords
with xr.open_rasterio(tmp_file) as rioda:
assert 'x' not in rioda.coords
assert 'y' not in rioda.coords
assert 'crs' not in rioda.attrs
assert isinstance(rioda.attrs['res'], tuple)
assert isinstance(rioda.attrs['is_tiled'], np.uint8)
assert isinstance(rioda.attrs['transform'], tuple)
# See if a warning is raised if we force it
with self.assertWarns("transformation isn't rectilinear"):
with xr.open_rasterio(tmp_file,
parse_coordinates=True) as rioda:
assert 'x' not in rioda.coords
assert 'y' not in rioda.coords
def test_platecarree(self):
with create_tmp_geotiff(8, 10, 1, transform_args=[1, 2, 0.5, 2.],
crs='+proj=latlong',
open_kwargs={'nodata': -9765}) \
as (tmp_file, expected):
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert isinstance(rioda.attrs['crs'], basestring)
assert isinstance(rioda.attrs['res'], tuple)
assert isinstance(rioda.attrs['is_tiled'], np.uint8)
assert isinstance(rioda.attrs['transform'], tuple)
np.testing.assert_array_equal(rioda.attrs['nodatavals'],
[-9765.])
def test_notransform(self):
# regression test for https://github.com/pydata/xarray/issues/1686
import rasterio
import warnings
# Create a geotiff file
with warnings.catch_warnings():
# rasterio throws a NotGeoreferencedWarning here, which is
# expected since we test rasterio's defaults in this case.
warnings.filterwarnings('ignore', category=UserWarning,
message='Dataset has no geotransform set')
with create_tmp_file(suffix='.tif') as tmp_file:
# data
nx, ny, nz = 4, 3, 3
data = np.arange(nx * ny * nz,
dtype=rasterio.float32).reshape(nz, ny, nx)
with rasterio.open(
tmp_file, 'w',
driver='GTiff', height=ny, width=nx, count=nz,
dtype=rasterio.float32) as s:
s.write(data)
# Tests
expected = DataArray(data,
dims=('band', 'y', 'x'),
coords={'band': [1, 2, 3],
'y': [0.5, 1.5, 2.5],
'x': [0.5, 1.5, 2.5, 3.5],
})
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert isinstance(rioda.attrs['res'], tuple)
assert isinstance(rioda.attrs['is_tiled'], np.uint8)
assert isinstance(rioda.attrs['transform'], tuple)
def test_indexing(self):
with create_tmp_geotiff(8, 10, 3, transform_args=[1, 2, 0.5, 2.],
crs='+proj=latlong') as (tmp_file, expected):
with xr.open_rasterio(tmp_file, cache=False) as actual:
# tests
# assert_allclose checks all data + coordinates
assert_allclose(actual, expected)
assert not actual.variable._in_memory
# Basic indexer
ind = {'x': slice(2, 5), 'y': slice(5, 7)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {'band': slice(1, 2), 'x': slice(2, 5), 'y': slice(5, 7)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {'band': slice(1, 2), 'x': slice(2, 5), 'y': 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# orthogonal indexer
ind = {'band': np.array([2, 1, 0]),
'x': np.array([1, 0]), 'y': np.array([0, 2])}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {'band': np.array([2, 1, 0]),
'x': np.array([1, 0]), 'y': 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# minus-stepped slice
ind = {'band': np.array([2, 1, 0]),
'x': slice(-1, None, -1), 'y': 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {'band': np.array([2, 1, 0]),
'x': 1, 'y': slice(-1, 1, -2)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# None is selected
ind = {'band': np.array([2, 1, 0]),
'x': 1, 'y': slice(2, 2, 1)}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# vectorized indexer
ind = {'band': DataArray([2, 1, 0], dims='a'),
'x': DataArray([1, 0, 0], dims='a'),
'y': np.array([0, 2])}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
ind = {
'band': DataArray([[2, 1, 0], [1, 0, 2]], dims=['a', 'b']),
'x': DataArray([[1, 0, 0], [0, 1, 0]], dims=['a', 'b']),
'y': 0}
assert_allclose(expected.isel(**ind), actual.isel(**ind))
assert not actual.variable._in_memory
# Selecting lists of bands is fine
ex = expected.isel(band=[1, 2])
ac = actual.isel(band=[1, 2])
assert_allclose(ac, ex)
ex = expected.isel(band=[0, 2])
ac = actual.isel(band=[0, 2])
assert_allclose(ac, ex)
# Integer indexing
ex = expected.isel(band=1)
ac = actual.isel(band=1)
assert_allclose(ac, ex)
ex = expected.isel(x=1, y=2)
ac = actual.isel(x=1, y=2)
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=1, y=2)
ac = actual.isel(band=0, x=1, y=2)
assert_allclose(ac, ex)
# Mixed
ex = actual.isel(x=slice(2), y=slice(2))
ac = actual.isel(x=[0, 1], y=[0, 1])
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=1, y=slice(5, 7))
ac = actual.isel(band=0, x=1, y=slice(5, 7))
assert_allclose(ac, ex)
ex = expected.isel(band=0, x=slice(2, 5), y=2)
ac = actual.isel(band=0, x=slice(2, 5), y=2)
assert_allclose(ac, ex)
# One-element lists
ex = expected.isel(band=[0], x=slice(2, 5), y=[2])
ac = actual.isel(band=[0], x=slice(2, 5), y=[2])
assert_allclose(ac, ex)
def test_caching(self):
with create_tmp_geotiff(8, 10, 3, transform_args=[1, 2, 0.5, 2.],
crs='+proj=latlong') as (tmp_file, expected):
# Cache is the default
with xr.open_rasterio(tmp_file) as actual:
# This should cache everything
assert_allclose(actual, expected)
# once cached, non-windowed indexing should become possible
ac = actual.isel(x=[2, 4])
ex = expected.isel(x=[2, 4])
assert_allclose(ac, ex)
@requires_dask
def test_chunks(self):
with create_tmp_geotiff(8, 10, 3, transform_args=[1, 2, 0.5, 2.],
crs='+proj=latlong') as (tmp_file, expected):
# Chunk at open time
with xr.open_rasterio(tmp_file, chunks=(1, 2, 2)) as actual:
import dask.array as da
self.assertIsInstance(actual.data, da.Array)
assert 'open_rasterio' in actual.data.name
# do some arithmetic
ac = actual.mean()
ex = expected.mean()
assert_allclose(ac, ex)
ac = actual.sel(band=1).mean(dim='x')
ex = expected.sel(band=1).mean(dim='x')
assert_allclose(ac, ex)
def test_ENVI_tags(self):
rasterio = pytest.importorskip('rasterio', minversion='1.0a')
from rasterio.transform import from_origin
# Create an ENVI file with some tags in the ENVI namespace
# this test uses a custom driver, so we can't use create_tmp_geotiff
with create_tmp_file(suffix='.dat') as tmp_file:
# data
nx, ny, nz = 4, 3, 3
data = np.arange(nx * ny * nz,
dtype=rasterio.float32).reshape(nz, ny, nx)
transform = from_origin(5000, 80000, 1000, 2000.)
with rasterio.open(
tmp_file, 'w',
driver='ENVI', height=ny, width=nx, count=nz,
crs={'units': 'm', 'no_defs': True, 'ellps': 'WGS84',
'proj': 'utm', 'zone': 18},
transform=transform,
dtype=rasterio.float32) as s:
s.update_tags(
ns='ENVI',
description='{Tagged file}',
wavelength='{123.000000, 234.234000, 345.345678}',
fwhm='{1.000000, 0.234000, 0.000345}')
s.write(data)
dx, dy = s.res[0], -s.res[1]
# Tests
coords = {
'band': [1, 2, 3],
'y': -np.arange(ny) * 2000 + 80000 + dy / 2,
'x': np.arange(nx) * 1000 + 5000 + dx / 2,
'wavelength': ('band', np.array([123, 234.234, 345.345678])),
'fwhm': ('band', np.array([1, 0.234, 0.000345])),
}
expected = DataArray(data, dims=('band', 'y', 'x'), coords=coords)
with xr.open_rasterio(tmp_file) as rioda:
assert_allclose(rioda, expected)
assert isinstance(rioda.attrs['crs'], basestring)
assert isinstance(rioda.attrs['res'], tuple)
assert isinstance(rioda.attrs['is_tiled'], np.uint8)
assert isinstance(rioda.attrs['transform'], tuple)
# from ENVI tags
assert isinstance(rioda.attrs['description'], basestring)
assert isinstance(rioda.attrs['map_info'], basestring)
assert isinstance(rioda.attrs['samples'], basestring)
def test_no_mftime(self):
# rasterio can accept "filename" urguments that are actually urls,
# including paths to remote files.
# In issue #1816, we found that these caused dask to break, because
# the modification time was used to determine the dask token. This
# tests ensure we can still chunk such files when reading with
# rasterio.
with create_tmp_geotiff(8, 10, 3, transform_args=[1, 2, 0.5, 2.],
crs='+proj=latlong') as (tmp_file, expected):
with mock.patch('os.path.getmtime', side_effect=OSError):
with xr.open_rasterio(tmp_file, chunks=(1, 2, 2)) as actual:
import dask.array as da
self.assertIsInstance(actual.data, da.Array)
assert_allclose(actual, expected)
@network
def test_http_url(self):
# more examples urls here
# http://download.osgeo.org/geotiff/samples/
url = 'http://download.osgeo.org/geotiff/samples/made_up/ntf_nord.tif'
with xr.open_rasterio(url) as actual:
assert actual.shape == (1, 512, 512)
# make sure chunking works
with xr.open_rasterio(url, chunks=(1, 256, 256)) as actual:
import dask.array as da
self.assertIsInstance(actual.data, da.Array)
class TestEncodingInvalid(TestCase):
def test_extract_nc4_variable_encoding(self):
var = xr.Variable(('x',), [1, 2, 3], {}, {'foo': 'bar'})
with raises_regex(ValueError, 'unexpected encoding'):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
var = xr.Variable(('x',), [1, 2, 3], {}, {'chunking': (2, 1)})
encoding = _extract_nc4_variable_encoding(var)
self.assertEqual({}, encoding)
# regression test
var = xr.Variable(('x',), [1, 2, 3], {}, {'shuffle': True})
encoding = _extract_nc4_variable_encoding(var, raise_on_invalid=True)
self.assertEqual({'shuffle': True}, encoding)
def test_extract_h5nc_encoding(self):
# not supported with h5netcdf (yet)
var = xr.Variable(('x',), [1, 2, 3], {},
{'least_sigificant_digit': 2})
with raises_regex(ValueError, 'unexpected encoding'):
_extract_nc4_variable_encoding(var, raise_on_invalid=True)
class MiscObject:
pass
@requires_netCDF4
class TestValidateAttrs(TestCase):
def test_validating_attrs(self):
def new_dataset():
return Dataset({'data': ('y', np.arange(10.0))},
{'y': np.arange(10)})
def new_dataset_and_dataset_attrs():
ds = new_dataset()
return ds, ds.attrs
def new_dataset_and_data_attrs():
ds = new_dataset()
return ds, ds.data.attrs
def new_dataset_and_coord_attrs():
ds = new_dataset()
return ds, ds.coords['y'].attrs
for new_dataset_and_attrs in [new_dataset_and_dataset_attrs,
new_dataset_and_data_attrs,
new_dataset_and_coord_attrs]:
ds, attrs = new_dataset_and_attrs()
attrs[123] = 'test'
with raises_regex(TypeError, 'Invalid name for attr'):
ds.to_netcdf('test.nc')
ds, attrs = new_dataset_and_attrs()
attrs[MiscObject()] = 'test'
with raises_regex(TypeError, 'Invalid name for attr'):
ds.to_netcdf('test.nc')
ds, attrs = new_dataset_and_attrs()
attrs[''] = 'test'
with raises_regex(ValueError, 'Invalid name for attr'):
ds.to_netcdf('test.nc')
# This one should work
ds, attrs = new_dataset_and_attrs()
attrs['test'] = 'test'
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = {'a': 5}
with raises_regex(TypeError, 'Invalid value for attr'):
ds.to_netcdf('test.nc')
ds, attrs = new_dataset_and_attrs()
attrs['test'] = MiscObject()
with raises_regex(TypeError, 'Invalid value for attr'):
ds.to_netcdf('test.nc')
ds, attrs = new_dataset_and_attrs()
attrs['test'] = 5
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = 3.14
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = [1, 2, 3, 4]
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = (1.9, 2.5)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = np.arange(5)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = np.arange(12).reshape(3, 4)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = 'This is a string'
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = ''
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
ds, attrs = new_dataset_and_attrs()
attrs['test'] = np.arange(12).reshape(3, 4)
with create_tmp_file() as tmp_file:
ds.to_netcdf(tmp_file)
@requires_scipy_or_netCDF4
class TestDataArrayToNetCDF(TestCase):
def test_dataarray_to_netcdf_no_name(self):
original_da = DataArray(np.arange(12).reshape((3, 4)))
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_dataarray_to_netcdf_with_name(self):
original_da = DataArray(np.arange(12).reshape((3, 4)),
name='test')
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_dataarray_to_netcdf_coord_name_clash(self):
original_da = DataArray(np.arange(12).reshape((3, 4)),
dims=['x', 'y'],
name='x')
with create_tmp_file() as tmp:
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
def test_open_dataarray_options(self):
data = DataArray(
np.arange(5), coords={'y': ('x', range(5))}, dims=['x'])
with create_tmp_file() as tmp:
data.to_netcdf(tmp)
expected = data.drop('y')
with open_dataarray(tmp, drop_variables=['y']) as loaded:
assert_identical(expected, loaded)
def test_dataarray_to_netcdf_return_bytes(self):
# regression test for GH1410
data = xr.DataArray([1, 2, 3])
output = data.to_netcdf()
assert isinstance(output, bytes)
@requires_pathlib
def test_dataarray_to_netcdf_no_name_pathlib(self):
original_da = DataArray(np.arange(12).reshape((3, 4)))
with create_tmp_file() as tmp:
tmp = Path(tmp)
original_da.to_netcdf(tmp)
with open_dataarray(tmp) as loaded_da:
assert_identical(original_da, loaded_da)
|
jcmgray/xarray
|
xarray/tests/test_backends.py
|
Python
|
apache-2.0
| 121,213
|
[
"NetCDF"
] |
1cff333ed4a0093b5c44fe6878cf88d2f80409fb35599e7c20ff659d3a5b26fa
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from builtins import object, dict, str
import os
import json
import logging
import networkx as nx
from indra.statements import Influence
# Python 2
try:
basestring
# Python 3
except:
basestring = str
logger = logging.getLogger(__name__)
class CAGAssembler(object):
"""Assembles a causal analysis graph from INDRA Statements.
Parameters
----------
stmts : Optional[list[indra.statement.Statements]]
A list of INDRA Statements to be assembled. Currently supports
Influence Statements.
Attributes
----------
statements : list[indra.statements.Statement]
A list of INDRA Statements to be assembled.
CAG : nx.MultiDiGraph
A networkx MultiDiGraph object representing the causal analysis graph.
"""
def __init__(self, stmts=None):
if not stmts:
self.statements = []
else:
self.statements = stmts
self.grounding_threshold = None
self.grounding_ontology = 'UN'
self.CAG = None
def add_statements(self, stmts):
"""Add a list of Statements to the assembler."""
self.statements += stmts
def make_model(self, grounding_ontology='UN', grounding_threshold=None):
"""Return a networkx MultiDiGraph representing a causal analysis graph.
Parameters
----------
grounding_ontology : Optional[str]
The ontology from which the grounding should be taken
(e.g. UN, FAO)
grounding_threshold : Optional[float]
Minimum threshold score for Eidos grounding.
Returns
-------
nx.MultiDiGraph
The assembled CAG.
"""
if grounding_threshold is not None:
self.grounding_threshold = grounding_threshold
self.grounding_ontology = grounding_ontology
# Filter to Influence Statements which are currently supported
statements = [stmt for stmt in self.statements if
isinstance(stmt, Influence)]
# Initialize graph
self.CAG = nx.MultiDiGraph()
# Add nodes and edges to the graph
for s in statements:
# Get standardized name of subject and object
# subj, obj = (self._node_name(s.subj), self._node_name(s.obj))
# See if both subject and object have polarities given
has_both_polarity = (s.subj.delta['polarity'] is not None and
s.obj.delta['polarity'] is not None)
# Add the nodes to the graph
for node, delta in zip((s.subj.concept, s.obj.concept),
(s.subj.delta, s.obj.delta)):
self.CAG.add_node(self._node_name(node),
simulable=has_both_polarity,
mods=delta['adjectives'])
# Edge is solid if both nodes have polarity given
linestyle = 'solid' if has_both_polarity else 'dotted'
if has_both_polarity:
same_polarity = (s.subj.delta['polarity'] ==
s.obj.delta['polarity'])
if same_polarity:
target_arrow_shape, linecolor = ('circle', 'green')
else:
target_arrow_shape, linecolor = ('tee', 'maroon')
else:
target_arrow_shape, linecolor = ('triangle', 'maroon')
# Add edge to the graph with metadata from statement
provenance = []
if s.evidence:
provenance = s.evidence[0].annotations.get('provenance', [])
if provenance:
provenance[0]['text'] = s.evidence[0].text
self.CAG.add_edge(
self._node_name(s.subj.concept),
self._node_name(s.obj.concept),
subj_polarity=s.subj.delta['polarity'],
subj_adjectives=s.subj.delta['adjectives'],
obj_polarity=s.obj.delta['polarity'],
obj_adjectives=s.obj.delta['adjectives'],
linestyle=linestyle,
linecolor=linecolor,
targetArrowShape=target_arrow_shape,
provenance=provenance,
)
return self.CAG
def print_tsv(self, file_name):
def _get_factor(stmt, concept, delta, evidence, raw_name):
if evidence.source_api == 'eidos':
if concept.db_refs[self.grounding_ontology]:
factor_norm = concept.db_refs[self.grounding_ontology][0][0]
else:
factor_norm = ''
elif evidence.source_api == 'hume':
factor_norm = concept.db_refs['HUME'][0][0]
elif evidence.source_api == 'cwms':
factor_norm = concept.db_refs['CWMS']
elif evidence.source_api == 'sofia':
# TODO extract ontology catgory here
factor_norm = concept.name
mods = ', '.join(delta.get('adjectives', []))
if delta.get('polarity') == -1:
pol = 'decrease'
elif delta.get('polarity') == 1:
pol = 'increase'
else:
pol = ''
name = raw_name if raw_name else concept.name
return name, factor_norm, mods, pol
def _get_evidence(evidence):
# TODO: add sentence ID
sent_id = ''
location = evidence.annotations.get('Location')
location = location if location is not None else ''
time = evidence.annotations.get('Time')
time = time if time is not None else ''
ref = evidence.pmid if evidence.pmid is not None else ''
return ref, evidence.source_api, sent_id, location, \
time, evidence.text
header = ['Source', 'System', 'Sentence ID',
'Factor A Text', 'Factor A Normalization',
'Factor A Modifiers', 'Factor A Polarity',
'Relation Text', 'Relation Normalization',
'Relation Modifiers',
'Factor B Text', 'Factor B Normalization',
'Factor B Modifiers', 'Factor B Polarity',
'Location', 'Time', 'Evidence',
'Relation ID']
fh = open(file_name, 'w')
fh.write('\t'.join(header) + '\n')
# Filter to Influence Statements which are currently supported
statements = [stmt for stmt in self.statements if
isinstance(stmt, Influence)]
all_rows = []
for idx, stmt in enumerate(statements):
for evidence in stmt.evidence:
source, system, sent_id, location, time, text = \
_get_evidence(evidence)
factor_a, factor_a_norm, mod_a, pol_a = \
_get_factor(stmt, stmt.subj, stmt.subj_delta, evidence,
evidence.annotations['subj_text'])
factor_b, factor_b_norm, mod_b, pol_b = \
_get_factor(stmt, stmt.obj, stmt.obj_delta, evidence,
evidence.annotations['obj_text'])
relation_text = 'influences'
# Can we get a more specific relation type here?
relation_norm = ''
relation_mod = ''
row = [source, system, sent_id,
factor_a, factor_a_norm, mod_a, pol_a,
relation_text, relation_norm, relation_mod,
factor_b, factor_b_norm, mod_b, pol_b,
location, time, text, str(idx)]
if row not in all_rows:
all_rows.append(row)
for row in sorted(all_rows, key=lambda x: x[0]):
fh.write('\t'.join(row) + '\n')
fh.close()
def export_to_cytoscapejs(self):
"""Return CAG in format readable by CytoscapeJS.
Return
------
dict
A JSON-like dict representing the graph for use with
CytoscapeJS.
"""
def _create_edge_data_dict(e):
"""Return a dict from a MultiDiGraph edge for CytoscapeJS export."""
# A hack to get rid of the redundant 'Provenance' label.
if e[3].get('provenance'):
tooltip = e[3]['provenance'][0]
if tooltip.get('@type'):
del tooltip['@type']
else:
tooltip = None
edge_data_dict = {
'id' : e[0]+'_'+e[1],
'source' : e[0],
'target' : e[1],
'linestyle' : e[3]["linestyle"],
'linecolor' : e[3]["linecolor"],
'targetArrowShape' : e[3]["targetArrowShape"],
'subj_adjectives' : e[3]["subj_adjectives"],
'subj_polarity' : e[3]["subj_polarity"],
'obj_adjectives' : e[3]["obj_adjectives"],
'obj_polarity' : e[3]["obj_polarity"],
'tooltip' : tooltip,
'simulable' : False if (
e[3]['obj_polarity'] is None or
e[3]['subj_polarity'] is None) else True,
}
return edge_data_dict
return {
'nodes': [{'data': {
'id': n[0],
'simulable': n[1]['simulable'],
'tooltip': 'Modifiers: '+json.dumps(n[1]['mods'])}
} for n in self.CAG.nodes(data=True)],
'edges': [{'data': _create_edge_data_dict(e)}
for e in self.CAG.edges(data=True, keys=True)]
}
def generate_jupyter_js(self, cyjs_style=None, cyjs_layout=None):
"""Generate Javascript from a template to run in Jupyter notebooks.
Parameters
----------
cyjs_style : Optional[dict]
A dict that sets CytoscapeJS style as specified in
https://github.com/cytoscape/cytoscape.js/blob/master/documentation/md/style.md.
cyjs_layout : Optional[dict]
A dict that sets CytoscapeJS
`layout parameters <http://js.cytoscape.org/#core/layout>`_.
Returns
-------
str
A Javascript string to be rendered in a Jupyter notebook cell.
"""
# First, export the CAG to CyJS
cyjs_elements = self.export_to_cytoscapejs()
# Load the Javascript template
tempf = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cag_template.js')
with open(tempf, 'r') as fh:
template = fh.read()
# Load the default style and layout
stylef = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'cag_style.json')
with open(stylef, 'r') as fh:
style = json.load(fh)
# Apply style and layout only if arg wasn't passed in
if cyjs_style is None:
cyjs_style = style['style']
if cyjs_layout is None:
cyjs_layout = style['layout']
# Now fill in the template
formatted_args = tuple(json.dumps(x, indent=2) for x in
(cyjs_elements, cyjs_style, cyjs_layout))
js_str = template % formatted_args
return js_str
def _node_name(self, concept):
"""Return a standardized name for a node given a Concept."""
if (# grounding threshold is specified
self.grounding_threshold is not None
# The particular eidos ontology grounding (un/wdi/fao) is present
and concept.db_refs[self.grounding_ontology]
# The grounding score is above the grounding threshold
and (concept.db_refs[self.grounding_ontology][0][1] >
self.grounding_threshold)):
entry = concept.db_refs[self.grounding_ontology][0][0]
return entry.split('/')[-1].replace('_', ' ').capitalize()
else:
return concept.name.capitalize()
|
pvtodorov/indra
|
indra/assemblers/cag/assembler.py
|
Python
|
bsd-2-clause
| 12,390
|
[
"Cytoscape"
] |
2dd5f7cb6d2b9da981886903434a9ae65f49a868c3ff94d63f12a3c12cbe73df
|
# $Id$
#
# Copyright (C) 2003 Rational Discovery LLC
# All Rights Reserved
#
import sys
from rdkit import six
from rdkit.VLib.Node import VLibNode
class TransformNode(VLibNode):
""" base class for nodes which filter their input
Assumptions:
- transform function takes a number of arguments equal to the
number of inputs we have. We return whatever it returns
- inputs (parents) can be stepped through in lockstep
Usage Example:
>>> from rdkit.VLib.Supply import SupplyNode
>>> def func(a,b):
... return a+b
>>> tform = TransformNode(func)
>>> suppl1 = SupplyNode(contents=[1,2,3,3])
>>> suppl2 = SupplyNode(contents=[1,2,3,1])
>>> tform.AddParent(suppl1)
>>> tform.AddParent(suppl2)
>>> v = [x for x in tform]
>>> v
[2, 4, 6, 4]
>>> tform.reset()
>>> v = [x for x in tform]
>>> v
[2, 4, 6, 4]
If we don't provide a function, just return the inputs:
>>> tform = TransformNode()
>>> suppl1 = SupplyNode(contents=[1,2,3,3])
>>> suppl2 = SupplyNode(contents=[1,2,3,1])
>>> tform.AddParent(suppl1)
>>> tform.AddParent(suppl2)
>>> v = [x for x in tform]
>>> v
[(1, 1), (2, 2), (3, 3), (3, 1)]
"""
def __init__(self, func=None, **kwargs):
VLibNode.__init__(self, **kwargs)
self._func = func
def next(self):
done = 0
parent = self.GetParents()[0]
args = []
try:
for parent in self.GetParents():
args.append(parent.next())
except StopIteration:
raise StopIteration
args = tuple(args)
if self._func is not None:
res = self._func(*args)
else:
res = args
return res
if six.PY3:
TransformNode.__next__ = TransformNode.next
#------------------------------------
#
# doctest boilerplate
#
def _test():
import doctest, sys
return doctest.testmod(sys.modules["__main__"])
if __name__ == '__main__':
import sys
failed, tried = _test()
sys.exit(failed)
|
jandom/rdkit
|
rdkit/VLib/Transform.py
|
Python
|
bsd-3-clause
| 1,971
|
[
"RDKit"
] |
432d541534fd1caeb2c1a938244cd33144536604178bef85f45f7ae48f65ee6e
|
from ._MultiNEAT import *
def Scale(a, a_min, a_max, a_tr_min, a_tr_max):
t_a_r = a_max - a_min
if t_a_r == 0:
return a_max
t_r = a_tr_max - a_tr_min
rel_a = (a - a_min) / t_a_r
return a_tr_min + t_r * rel_a
def Clamp(a, min, max):
if a < min:
return min
elif a > max:
return max
else:
return a
def AlmostEqual(a, b, margin):
if abs(a-b) > margin:
return False
else:
return True
try:
import numpy as np
from numpy import array, clip
except:
print('Install NumPy for visualization')
try:
import cv2
cvnumpy_installed = True
except:
print ('Tip: install the OpenCV computer vision library (2.0+) with '
'Python bindings to get convenient neural network visualization to NumPy arrays.')
cvnumpy_installed = False
try:
import matplotlib.pyplot as plt
matplotlib_installed = True
except:
matplotlib_installed = False
if matplotlib_installed:
def render_nn(nn, ax=None,
is_substrate=False,
details=False,
invert_yaxis=True,
connection_alpha=1.0):
if ax is None:
ax = plt.gca()
if is_substrate:
ax.set_xlim(-1.1, 1.1)
ax.set_ylim(-1.1, 1.1)
node_radius = 0.05
else:
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(-0.05, 1.05)
node_radius = 0.03
if invert_yaxis: ax.invert_yaxis()
# get the max weight
max_weight = max([c.weight for c in nn.connections])
# connections
for connection in nn.connections:
n1 = nn.neurons[connection.source_neuron_idx]
n2 = nn.neurons[connection.target_neuron_idx]
if is_substrate:
n1_x, n1_y = n1.substrate_coords[0], n1.substrate_coords[1]
n2_x, n2_y = n2.substrate_coords[0], n2.substrate_coords[1]
else:
n1_x, n1_y = n1.x, n1.y
n2_x, n2_y = n2.x, n2.y
offsetx = n2_x - n1_x
offsety = n2_y - n1_y
if offsetx == 0 or offsety == 0:
continue
# if going left->right, offset is a bit to the left and vice versa
# same for y
if n1_x - offsetx < 0:
ox = -node_radius * 0.9
elif n1_x - offsetx > 0:
ox = node_radius * 0.9
else:
ox = 0
if n1_y - offsety < 0:
oy = -node_radius * 0.9
elif n1_y - offsety > 0:
oy = node_radius * 0.9
else:
oy = 0
wg = clip(connection.weight, -2, 2)
if connection.weight > 0.0:
ax.arrow(n1_x, n1_y, offsetx+ox, offsety+oy, head_width = node_radius*0.8,
head_length = node_radius*1.2, fc='red', ec='red', length_includes_head=True,
linewidth = abs(wg),
alpha = connection_alpha*np.clip(0.1+abs(connection.weight)/max_weight, 0, 1))
else:
ax.arrow(n1_x, n1_y, offsetx+ox, offsety+oy, head_width = node_radius*0.8,
head_length = node_radius*1.2, fc='blue', ec='blue', length_includes_head=True,
linewidth = abs(wg),
alpha = connection_alpha*np.clip(0.1+abs(connection.weight)/max_weight, 0, 1))
# neurons
for index in range(len(nn.neurons)):
n = nn.neurons[index]
if is_substrate:
nx, ny = n.substrate_coords[0], n.substrate_coords[1]
else:
nx, ny = n.x, n.y
a = n.activation
if a < 0:
clr = array([0.3,0.3,0.3]) + array([0,0,0.5]) * (-a)
else:
clr = array([0.3,0.3,0.3]) + array([0.5,0,0]) * (a)
clr = clip(clr, 0, 1)
if n.type == NeuronType.INPUT:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='green', fc=clr, linewidth=3, zorder=2))
elif n.type == NeuronType.BIAS:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='black', fc=(1,1,1), linewidth=3, zorder=2))
elif n.type == NeuronType.HIDDEN:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='grey', fc=clr, linewidth=3, zorder=2))
elif n.type == NeuronType.OUTPUT:
ax.add_patch(plt.Circle((nx, ny), node_radius, ec='brown', fc=clr, linewidth=3, zorder=2))
def plot_nn(nn, ax=None,
is_substrate=False,
details=False,
invert_yaxis=True,
connection_alpha=1.0):
# if this is a genome, make a NN from it
if type(nn) == Genome:
kk = NeuralNetwork()
nn.BuildPhenotype(kk)
nn = kk
if is_substrate:
return render_nn(nn, ax,
is_substrate=True,
details=details,
invert_yaxis=invert_yaxis)
# not a substrate, compute the node coordinates
for i, n in enumerate(nn.neurons):
nn.neurons[i].x = 0
nn.neurons[i].y = 0
rect_x = 0
rect_y = 0
rect_x_size = 1
rect_y_size = 1
neuron_radius = 0.03
MAX_DEPTH = 64
# for every depth, count how many nodes are on this depth
all_depths = np.linspace(0.0, 1.0, MAX_DEPTH)
for depth in all_depths:
neuron_count = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron_count += 1
if neuron_count == 0:
continue
# calculate x positions of neurons
xxpos = rect_x_size / (1 + neuron_count)
j = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron.x = rect_x + xxpos + j * (rect_x_size / (2 + neuron_count))
j = j + 1
# calculate y positions of nodes
for neuron in nn.neurons:
base_y = rect_y + neuron.split_y
size_y = rect_y_size - neuron_radius
if neuron.split_y == 0.0:
neuron.y = base_y * size_y + neuron_radius
else:
neuron.y = base_y * size_y
# done, render the nn
return render_nn(nn, ax,
is_substrate=False,
details=details,
invert_yaxis=invert_yaxis)
# Faster Neural Network display code
# image is a NumPy array
# rect is a tuple in the form (x, y, size_x, size_y)
if not cvnumpy_installed:
def DrawPhenotype(image, rect, nn, neuron_radius=15,
max_line_thickness=3, substrate=False):
print("OpenCV/NumPy don't appear to be installed")
raise NotImplementedError
else:
MAX_DEPTH = 64
def DrawPhenotype(image, rect, nn, neuron_radius=15,
max_line_thickness=3, substrate=False):
for i, n in enumerate(nn.neurons):
nn.neurons[i].x = 0
nn.neurons[i].y = 0
rect_x = rect[0]
rect_y = rect[1]
rect_x_size = rect[2]
rect_y_size = rect[3]
if not substrate:
depth = 0
# for every depth, count how many nodes are on this depth
all_depths = np.linspace(0.0, 1.0, MAX_DEPTH)
for depth in all_depths:
neuron_count = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron_count += 1
if neuron_count == 0:
continue
# calculate x positions of neurons
xxpos = rect_x_size / (1 + neuron_count)
j = 0
for neuron in nn.neurons:
if AlmostEqual(neuron.split_y, depth, 1.0 / (MAX_DEPTH+1)):
neuron.x = rect_x + xxpos + j * (rect_x_size / (2 + neuron_count))
j = j + 1
# calculate y positions of nodes
for neuron in nn.neurons:
base_y = rect_y + neuron.split_y
size_y = rect_y_size - neuron_radius
if neuron.split_y == 0.0:
neuron.y = base_y * size_y + neuron_radius
else:
neuron.y = base_y * size_y
else:
# HyperNEAT substrate
# only the first 2 dimensions are used for drawing
# if a layer is 1D, y values will be supplied to make 3 rows
# determine min/max coords in NN
xs = [(neuron.substrate_coords[0]) for neuron in nn.neurons]
ys = [(neuron.substrate_coords[1]) for neuron in nn.neurons]
min_x, min_y, max_x, max_y = min(xs), min(ys), max(xs), max(ys)
#dims = [len(neuron.substrate_coords) for neuron in nn.neurons]
for neuron in nn.neurons:
# TODO(jkoelker) Make the rect_x_size / 15 a variable
neuron.x = Scale(neuron.substrate_coords[0], min_x, max_x,
rect_x_size / 15,
rect_x_size - rect_x_size / 15)
neuron.y = Scale(neuron.substrate_coords[1], min_y, max_y,
rect_x_size / 15,
rect_y_size - rect_x_size / 15)
# the positions of neurons is computed, now we draw
# connections first
if len(nn.connections) > 0:
max_weight = max([abs(x.weight) for x in nn.connections])
else:
max_weight = 1.0
if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint,
np.int, np.int8, np.int16, np.int32]:
magn = 255.0
else:
magn = 1.0
for conn in nn.connections:
thickness = conn.weight
thickness = Scale(thickness, 0, max_weight, 1, max_line_thickness)
thickness = Clamp(thickness, 1, max_line_thickness)
w = Scale(abs(conn.weight), 0.0, max_weight, 0.0, 1.0)
w = Clamp(w, 0.75, 1.0)
if conn.recur_flag:
if conn.weight < 0:
# green weight
color = (0, magn * w, 0)
else:
# white weight
color = (magn * w, magn * w, magn * w)
else:
if conn.weight < 0:
# blue weight
color = (0, 0, magn * w)
else:
# red weight
color = (magn * w, 0, 0)
if magn == 255:
color = tuple(int(x) for x in color)
# if the link is looping back on the same neuron, draw it with
# ellipse
if conn.source_neuron_idx == conn.target_neuron_idx:
pass # todo: later
else:
# Draw a line
pt1 = (int(nn.neurons[conn.source_neuron_idx].x),
int(nn.neurons[conn.source_neuron_idx].y))
pt2 = (int(nn.neurons[conn.target_neuron_idx].x),
int(nn.neurons[conn.target_neuron_idx].y))
cv2.line(image, pt1, pt2, color, int(thickness))
# draw all neurons
for neuron in nn.neurons:
pt = (int(neuron.x), int(neuron.y))
a = neuron.activation
if a < 0:
clr = array([0.3,0.3,0.3]) + array([0, 0, .7]) * (-a)
else:
clr = array([0.3,0.3,0.3]) + array([.7, .7, .7]) * (a)
clr = clip(clr, 0, 1)
if image.dtype in [np.uint8, np.uint16, np.uint32, np.uint,
np.int, np.int8, np.int16, np.int32]:
clr = (clr*255).astype(np.uint8)
clr = tuple(int(x) for x in clr)
a = Clamp(a, 0.3, 2.0)
if np.isnan(a):
a = 0.0
if neuron.type == NeuronType.INPUT:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (0,255,0), thickness=2) # outline
elif neuron.type == NeuronType.BIAS:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (0,0,0), thickness=2) # outline
elif neuron.type == NeuronType.HIDDEN:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled
cv2.circle(image, pt, neuron_radius, (127,127,127), thickness=2) # outline
elif neuron.type == NeuronType.OUTPUT:
cv2.circle(image, pt, int(neuron_radius*a), clr, thickness=-1) # filled first
cv2.circle(image, pt, neuron_radius, (255,255,0), thickness=2) # outline
# More general one that returns a NumPy image
def Draw(x, size=(300,300)):
img = np.zeros((size[0], size[1], 3), dtype=np.uint8)
img += 10
if isinstance(x, NeuralNetwork):
DrawPhenotype(img, (0, 0, 250, 250), x )
else:
nn = NeuralNetwork()
x.BuildPhenotype(nn)
DrawPhenotype(img, (0, 0, 250, 250), nn )
return img
|
peter-ch/MultiNEAT
|
MultiNEAT/viz.py
|
Python
|
lgpl-3.0
| 13,883
|
[
"NEURON"
] |
730d7403255204a41e8b0142cc684a89686fed4c16a0407f5c99dc7ad18a7c2f
|
from ase import *
atoms = Atoms('H7',
positions=[(0, 0, 0),
(1, 0, 0),
(0, 1, 0),
(1, 1, 0),
(0, 2, 0),
(1, 2, 0),
(0.5, 0.5, 1)],
constraint=[FixAtoms(range(6))],
calculator=LennardJones())
traj = PickleTrajectory('H.traj', 'w', atoms)
dyn = QuasiNewton(atoms, maxstep=0.2)
dyn.attach(traj.write)
dyn.run(fmax=0.01, steps=100)
print dyn.H[-3:,-3:]
try:
del atoms[-1]
except RuntimeError:
pass
else:
raise RuntimeError
|
freephys/python_ase
|
ase/test/example.py
|
Python
|
gpl-3.0
| 627
|
[
"ASE"
] |
bcb75adf627f93fe7d0005c0f47acf782d255d59b5f6d224ef0867bb7c06e14f
|
#!/usr/bin/env python
#from sumatra.projects import load_project
#from sumatra.parameters import build_parameters
#from sumatra.decorators import capture
from ruffus import *
import sys
import os
import time
import datetime
import drmaa
from omics_pipe.utils import *
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.modules.fastqc import fastqc
from omics_pipe.modules.ChIP_trim import ChIP_trim
from omics_pipe.modules.bowtie import bowtie
from omics_pipe.modules.read_density import read_density
from omics_pipe.modules.macs import macs
from omics_pipe.modules.homer_peaks import homer_peaks
from omics_pipe.modules.peak_track import peak_track
from omics_pipe.modules.annotate_peaks import annotate_peaks
from omics_pipe.modules.find_motifs import find_motifs
p = Bunch(default_parameters)
os.chdir(p.WORKING_DIR)
now = datetime.datetime.now()
date = now.strftime("%Y-%m-%d %H:%M")
print p
for step in p.STEPS:
vars()['inputList_' + step] = []
for sample in p.SAMPLE_LIST:
vars()['inputList_' + step].append([sample, "%s/%s_%s_completed.flag" % (p.FLAG_PATH, step, sample)])
print vars()['inputList_' + step]
for steps in p.STEPS_PAIRS:
vars()['inputList_' + steps] = []
vars()['inputList_' + steps].append([steps, "%s/%s_completed.flag" % (p.FLAG_PATH, steps)])
print vars()['inputList_' + steps]
@parallel(inputList_fastqc)
@check_if_uptodate(check_file_exists)
def run_fastqc(sample, fastqc_flag):
fastqc(sample, fastqc_flag)
return
@parallel(inputList_ChIP_trim)
@check_if_uptodate(check_file_exists)
def run_ChIP_trim(sample, ChIP_trim_flag):
ChIP_trim(sample, ChIP_trim_flag)
return
@parallel(inputList_bowtie)
@check_if_uptodate(check_file_exists)
@follows(run_ChIP_trim)
def run_bowtie(sample, bowtie_flag):
bowtie(sample, bowtie_flag)
return
@parallel(inputList_read_density)
@check_if_uptodate(check_file_exists)
@follows(run_bowtie)
def run_read_density(sample, read_density_flag):
read_density(sample, read_density_flag)
return
@parallel(inputList_homer_peaks)
@check_if_uptodate(check_file_exists)
@follows(run_read_density)
def run_homer_peaks(step, homer_peaks_flag):
homer_peaks(step, homer_peaks_flag)
return
@parallel(inputList_peak_track)
@check_if_uptodate(check_file_exists)
@follows(run_homer_peaks)
def run_peak_track(step, peak_track_flag):
peak_track(step, peak_track_flag)
return
@parallel(inputList_annotate_peaks)
@check_if_uptodate(check_file_exists)
@follows(run_homer_peaks)
def run_annotate_peaks(step, annotate_peaks_flag):
annotate_peaks(step, annotate_peaks_flag)
return
@parallel(inputList_find_motifs)
@check_if_uptodate(check_file_exists)
@follows(run_homer_peaks)
def run_find_motifs(step, find_motifs_flag):
find_motifs(step, find_motifs_flag)
return
@parallel(inputList_macs)
@check_if_uptodate(check_file_exists)
@follows(run_bowtie)
def run_macs(step, macs_flag):
macs(step, macs_flag)
return
@parallel(inputList_last_function)
@check_if_uptodate(check_file_exists)
@follows(run_fastqc, run_find_motifs, run_peak_track, run_annotate_peaks, run_macs)
def last_function(sample, last_function_flag):
print "PIPELINE HAS FINISHED SUCCESSFULLY!!! YAY!"
pipeline_graph_output = p.FLAG_PATH + "/pipeline_" + sample + "_" + str(date) + ".pdf"
#pipeline_printout_graph(pipeline_graph_output,'pdf', [step,steps], no_key_legend=False)
stage = "last_function"
flag_file = "%s/%s_%s_completed.flag" % (p.FLAG_PATH, stage, sample)
open(flag_file, 'w').close()
return
if __name__ == '__main__':
pipeline_run(p.STEP, multiprocess = p.PIPE_MULTIPROCESS, verbose = p.PIPE_VERBOSE, gnu_make_maximal_rebuild_mode = p.PIPE_REBUILD)
|
adammaikai/OmicsPipe2.0
|
omics_pipe/ChIPseq.py
|
Python
|
mit
| 3,774
|
[
"Bowtie"
] |
a05fa93fcf195e639cb875810cfd0cdb54179efb39cc7a1bbcf1b5c27ad3f9b1
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Kovid Goyal <kovid at kovidgoyal.net>'
import httplib, hashlib, zlib, string, time
from io import BytesIO
from tempfile import NamedTemporaryFile
from calibre import guess_type
from calibre.srv.tests.base import BaseTest, TestServer
from calibre.utils.monotonic import monotonic
class TestHTTP(BaseTest):
def test_header_parsing(self): # {{{
'Test parsing of HTTP headers'
from calibre.srv.http_request import HTTPHeaderParser
def test(name, *lines, **kwargs):
p = HTTPHeaderParser()
p.push(*lines)
self.assertTrue(p.finished)
self.assertSetEqual(set(p.hdict.items()), {(k.replace('_', '-').title(), v) for k, v in kwargs.iteritems()}, name + ' failed')
test('Continuation line parsing',
'a: one',
'b: two',
' 2',
'\t3',
'c:three',
'\r\n', a='one', b='two 2 3', c='three')
test('Non-ascii headers parsing',
b'a:mūs\r', '\r\n', a='mūs')
test('Comma-separated parsing',
'Accept-Encoding: one',
'accept-Encoding: two',
'\r\n', accept_encoding='one, two')
def parse(*lines):
lines = list(lines)
lines.append(b'\r\n')
self.assertRaises(ValueError, HTTPHeaderParser().push, *lines)
parse('Connection:mūs\r\n'.encode('utf-16'))
parse(b'Connection\r\n')
parse(b'Connection:a\r\n', b'\r\n')
parse(b' Connection:a\n')
parse(b':a\n')
# }}}
def test_accept_encoding(self): # {{{
'Test parsing of Accept-Encoding'
from calibre.srv.http_response import acceptable_encoding
def test(name, val, ans, allowed={'gzip'}):
self.ae(acceptable_encoding(val, allowed), ans, name + ' failed')
test('Empty field', '', None)
test('Simple', 'gzip', 'gzip')
test('Case insensitive', 'GZIp', 'gzip')
test('Multiple', 'gzip, identity', 'gzip')
test('Priority', '1;q=0.5, 2;q=0.75, 3;q=1.0', '3', {'1', '2', '3'})
# }}}
def test_accept_language(self): # {{{
'Test parsing of Accept-Language'
from calibre.srv.http_response import preferred_lang
from calibre.utils.localization import get_translator
def test(name, val, ans):
self.ae(preferred_lang(val, lambda x:(True, x, None)), ans, name + ' failed')
test('Empty field', '', 'en')
test('Simple', 'de', 'de')
test('Case insensitive', 'Es', 'es')
test('Multiple', 'fr, es', 'fr')
test('Priority', 'en;q=0.1, de;q=0.7, fr;q=0.5', 'de')
def handler(data):
return data.lang_code + data._('Unknown')
with TestServer(handler, timeout=0.1) as server:
conn = server.connect()
def test(al, q):
conn.request('GET', '/', headers={'Accept-Language': al})
r = conn.getresponse()
self.ae(r.status, httplib.OK)
q += get_translator(q)[-1].ugettext('Unknown')
self.ae(r.read(), q)
test('en', 'en')
test('eng', 'en')
test('es', 'es')
# }}}
def test_range_parsing(self): # {{{
'Test parsing of Range header'
from calibre.srv.http_response import get_ranges
def test(val, *args):
pval = get_ranges(val, 100)
if len(args) == 1 and args[0] is None:
self.assertIsNone(pval, val)
else:
self.assertListEqual([tuple(x) for x in pval], list(args), val)
test('crap', None)
test('crap=', None)
test('crap=1', None)
test('crap=1-2', None)
test('bytes=a-2')
test('bytes=0-99', (0, 99, 100))
test('bytes=0-0,-1', (0, 0, 1), (99, 99, 1))
test('bytes=-5', (95, 99, 5))
test('bytes=95-', (95, 99, 5))
test('bytes=-200', (0, 99, 100))
# }}}
def test_http_basic(self): # {{{
'Test basic HTTP protocol conformance'
from calibre.srv.errors import HTTPNotFound, HTTPRedirect
body = 'Requested resource not found'
def handler(data):
raise HTTPNotFound(body)
def raw_send(conn, raw):
conn.send(raw)
conn._HTTPConnection__state = httplib._CS_REQ_SENT
return conn.getresponse()
with TestServer(handler, timeout=0.1, max_header_line_size=100./1024, max_request_body_size=100./(1024*1024)) as server:
conn = server.connect()
r = raw_send(conn, b'hello\n')
self.ae(r.status, httplib.BAD_REQUEST)
self.ae(r.read(), b'HTTP requires CRLF line terminators')
r = raw_send(conn, b'\r\nGET /index.html HTTP/1.1\r\n\r\n')
self.ae(r.status, httplib.NOT_FOUND), self.ae(r.read(), b'Requested resource not found')
r = raw_send(conn, b'\r\n\r\nGET /index.html HTTP/1.1\r\n\r\n')
self.ae(r.status, httplib.BAD_REQUEST)
self.ae(r.read(), b'Multiple leading empty lines not allowed')
r = raw_send(conn, b'hello world\r\n')
self.ae(r.status, httplib.BAD_REQUEST)
self.ae(r.read(), b'Malformed Request-Line')
r = raw_send(conn, b'x' * 200)
self.ae(r.status, httplib.BAD_REQUEST)
self.ae(r.read(), b'')
r = raw_send(conn, b'XXX /index.html HTTP/1.1\r\n\r\n')
self.ae(r.status, httplib.BAD_REQUEST), self.ae(r.read(), b'Unknown HTTP method')
# Test 404
conn.request('HEAD', '/moose')
r = conn.getresponse()
self.ae(r.status, httplib.NOT_FOUND)
self.assertIsNotNone(r.getheader('Date', None))
self.ae(r.getheader('Content-Length'), str(len(body)))
self.ae(r.getheader('Content-Type'), 'text/plain; charset=UTF-8')
self.ae(len(r.getheaders()), 3)
self.ae(r.read(), '')
conn.request('GET', '/choose')
r = conn.getresponse()
self.ae(r.status, httplib.NOT_FOUND)
self.ae(r.read(), b'Requested resource not found')
# Test 500
orig = server.loop.log.filter_level
server.loop.log.filter_level = server.loop.log.ERROR + 10
server.change_handler(lambda data:1/0)
conn = server.connect()
conn.request('GET', '/test/')
r = conn.getresponse()
self.ae(r.status, httplib.INTERNAL_SERVER_ERROR)
server.loop.log.filter_level = orig
# Test 301
def handler(data):
raise HTTPRedirect('/somewhere-else')
server.change_handler(handler)
conn = server.connect()
conn.request('GET', '/')
r = conn.getresponse()
self.ae(r.status, httplib.MOVED_PERMANENTLY)
self.ae(r.getheader('Location'), '/somewhere-else')
self.ae('', r.read())
server.change_handler(lambda data:data.path[0] + data.read().decode('ascii'))
conn = server.connect()
# Test simple GET
conn.request('GET', '/test/')
r = conn.getresponse()
self.ae(r.status, httplib.OK)
self.ae(r.read(), b'test')
# Test TRACE
lines = ['TRACE /xxx HTTP/1.1', 'Test: value', 'Xyz: abc, def', '', '']
r = raw_send(conn, ('\r\n'.join(lines)).encode('ascii'))
self.ae(r.status, httplib.OK)
self.ae(r.read().decode('utf-8'), '\n'.join(lines[:-2]))
# Test POST with simple body
conn.request('POST', '/test', 'body')
r = conn.getresponse()
self.ae(r.status, httplib.CREATED)
self.ae(r.read(), b'testbody')
# Test POST with chunked transfer encoding
conn.request('POST', '/test', headers={'Transfer-Encoding': 'chunked'})
conn.send(b'4\r\nbody\r\na\r\n1234567890\r\n0\r\n\r\n')
r = conn.getresponse()
self.ae(r.status, httplib.CREATED)
self.ae(r.read(), b'testbody1234567890')
# Test various incorrect input
orig_level, server.log.filter_level = server.log.filter_level, server.log.ERROR
conn.request('GET', '/test' + ('a' * 200))
r = conn.getresponse()
self.ae(r.status, httplib.BAD_REQUEST)
conn = server.connect()
conn.request('GET', '/test', ('a' * 200))
r = conn.getresponse()
self.ae(r.status, httplib.REQUEST_ENTITY_TOO_LARGE)
conn = server.connect()
conn.request('POST', '/test', headers={'Transfer-Encoding': 'chunked'})
conn.send(b'x\r\nbody\r\n0\r\n\r\n')
r = conn.getresponse()
self.ae(r.status, httplib.BAD_REQUEST)
self.assertIn(b'not a valid chunk size', r.read())
conn.request('POST', '/test', headers={'Transfer-Encoding': 'chunked'})
conn.send(b'4\r\nbody\r\n200\r\n\r\n')
r = conn.getresponse()
self.ae(r.status, httplib.REQUEST_ENTITY_TOO_LARGE)
conn.request('POST', '/test', body='a'*200)
r = conn.getresponse()
self.ae(r.status, httplib.REQUEST_ENTITY_TOO_LARGE)
conn = server.connect()
conn.request('POST', '/test', headers={'Transfer-Encoding': 'chunked'})
conn.send(b'3\r\nbody\r\n0\r\n\r\n')
r = conn.getresponse()
self.ae(r.status, httplib.BAD_REQUEST), self.ae(r.read(), b'Chunk does not have trailing CRLF')
conn = server.connect(timeout=1)
conn.request('POST', '/test', headers={'Transfer-Encoding': 'chunked'})
conn.send(b'30\r\nbody\r\n0\r\n\r\n')
r = conn.getresponse()
self.ae(r.status, httplib.REQUEST_TIMEOUT)
self.assertIn(b'', r.read())
server.log.filter_level = orig_level
conn = server.connect()
# Test pipelining
responses = []
for i in xrange(10):
conn._HTTPConnection__state = httplib._CS_IDLE
conn.request('GET', '/%d'%i)
responses.append(conn.response_class(conn.sock, strict=conn.strict, method=conn._method))
for i in xrange(10):
r = responses[i]
r.begin()
self.ae(r.read(), ('%d' % i).encode('ascii'))
conn._HTTPConnection__state = httplib._CS_IDLE
# Test closing
server.loop.opts.timeout = 10 # ensure socket is not closed because of timeout
conn.request('GET', '/close', headers={'Connection':'close'})
r = conn.getresponse()
self.ae(server.loop.num_active_connections, 1)
self.ae(r.status, 200), self.ae(r.read(), 'close')
server.loop.wakeup()
num = 10
while num and server.loop.num_active_connections != 0:
time.sleep(0.01)
num -= 1
self.ae(server.loop.num_active_connections, 0)
self.assertIsNone(conn.sock)
# }}}
def test_http_response(self): # {{{
'Test HTTP protocol responses'
from calibre.srv.http_response import parse_multipart_byterange
def handler(conn):
return conn.generate_static_output('test', lambda : ''.join(conn.path))
with NamedTemporaryFile(suffix='test.epub') as f, open(P('localization/locales.zip'), 'rb') as lf, \
TestServer(handler, timeout=0.2, compress_min_size=0) as server:
fdata = string.ascii_letters * 100
f.write(fdata), f.seek(0)
# Test ETag
conn = server.connect()
conn.request('GET', '/an_etagged_path')
r = conn.getresponse()
self.ae(r.status, httplib.OK), self.ae(r.read(), b'an_etagged_path')
etag = r.getheader('ETag')
self.ae(etag, '"%s"' % hashlib.sha1('an_etagged_path').hexdigest())
conn.request('GET', '/an_etagged_path', headers={'If-None-Match':etag})
r = conn.getresponse()
self.ae(r.status, httplib.NOT_MODIFIED)
self.ae(r.read(), b'')
# Test gzip
raw = b'a'*20000
server.change_handler(lambda conn: raw)
conn = server.connect()
conn.request('GET', '/an_etagged_path', headers={'Accept-Encoding':'gzip'})
r = conn.getresponse()
self.ae(r.status, httplib.OK), self.ae(zlib.decompress(r.read(), 16+zlib.MAX_WBITS), raw)
# Test getting a filesystem file
for use_sendfile in (True, False):
server.change_handler(lambda conn: f)
server.loop.opts.use_sendfile = use_sendfile
conn = server.connect()
conn.request('GET', '/test')
r = conn.getresponse()
etag = type('')(r.getheader('ETag'))
self.assertTrue(etag)
self.ae(r.getheader('Content-Type'), guess_type(f.name)[0])
self.ae(type('')(r.getheader('Accept-Ranges')), 'bytes')
self.ae(int(r.getheader('Content-Length')), len(fdata))
self.ae(r.status, httplib.OK), self.ae(r.read(), fdata)
conn.request('GET', '/test', headers={'Range':'bytes=2-25'})
r = conn.getresponse()
self.ae(r.status, httplib.PARTIAL_CONTENT)
self.ae(type('')(r.getheader('Accept-Ranges')), 'bytes')
self.ae(type('')(r.getheader('Content-Range')), 'bytes 2-25/%d' % len(fdata))
self.ae(int(r.getheader('Content-Length')), 24)
self.ae(r.read(), fdata[2:26])
conn.request('GET', '/test', headers={'Range':'bytes=100000-'})
r = conn.getresponse()
self.ae(r.status, httplib.REQUESTED_RANGE_NOT_SATISFIABLE)
self.ae(type('')(r.getheader('Content-Range')), 'bytes */%d' % len(fdata))
conn.request('GET', '/test', headers={'Range':'bytes=25-50', 'If-Range':etag})
r = conn.getresponse()
self.ae(r.status, httplib.PARTIAL_CONTENT), self.ae(r.read(), fdata[25:51])
self.ae(int(r.getheader('Content-Length')), 26)
conn.request('GET', '/test', headers={'Range':'bytes=0-1000000'})
r = conn.getresponse()
self.ae(r.status, httplib.PARTIAL_CONTENT), self.ae(r.read(), fdata)
conn.request('GET', '/test', headers={'Range':'bytes=25-50', 'If-Range':'"nomatch"'})
r = conn.getresponse()
self.ae(r.status, httplib.OK), self.ae(r.read(), fdata)
self.assertFalse(r.getheader('Content-Range'))
self.ae(int(r.getheader('Content-Length')), len(fdata))
conn.request('GET', '/test', headers={'Range':'bytes=0-25,26-50'})
r = conn.getresponse()
self.ae(r.status, httplib.PARTIAL_CONTENT)
clen = int(r.getheader('Content-Length'))
data = r.read()
self.ae(clen, len(data))
buf = BytesIO(data)
self.ae(parse_multipart_byterange(buf, r.getheader('Content-Type')), [(0, fdata[:26]), (26, fdata[26:51])])
# Test sending of larger file
start_time = monotonic()
lf.seek(0)
data = lf.read()
server.change_handler(lambda conn: lf)
conn = server.connect()
conn.request('GET', '/test')
r = conn.getresponse()
self.ae(r.status, httplib.OK)
rdata = r.read()
self.ae(len(data), len(rdata))
self.ae(hashlib.sha1(data).hexdigest(), hashlib.sha1(rdata).hexdigest())
self.ae(data, rdata)
time_taken = monotonic() - start_time
self.assertLess(time_taken, 1, 'Large file transfer took too long')
# }}}
def test_static_generation(self): # {{{
'Test static generation'
nums = list(map(str, xrange(10)))
def handler(conn):
return conn.generate_static_output('test', nums.pop)
with TestServer(handler) as server:
conn = server.connect()
conn.request('GET', '/an_etagged_path')
r = conn.getresponse()
data = r.read()
for i in xrange(5):
conn.request('GET', '/an_etagged_path')
r = conn.getresponse()
self.assertEqual(data, r.read())
# }}}
|
ashang/calibre
|
src/calibre/srv/tests/http.py
|
Python
|
gpl-3.0
| 17,003
|
[
"MOOSE"
] |
935ff2fbd919ff606fda17408e4aaab63b148154d0a822c12397411713b2c3df
|
import warnings
import numpy as np
import pandas as pd
import xarray
import scipy.stats as st
import numba
try:
import pymc3 as pm
except:
pass
import arviz as az
import arviz.plots.plot_utils
import scipy.ndimage
import skimage
import matplotlib._contour
from matplotlib.pyplot import get_cmap as mpl_get_cmap
import bokeh.application
import bokeh.application.handlers
import bokeh.models
import bokeh.palettes
import bokeh.plotting
import colorcet
try:
import datashader as ds
import datashader.bokeh_ext
except ImportError as e:
warnings.warn(
f"""DataShader import failed with error "{e}".
Features requiring DataShader will not work and you will get exceptions."""
)
from . import utils
from . import image
from . import az_utils
try:
from . import stan
except:
warnings.warn(
"Could not import `stan` submodule. Perhaps pystan or cmdstanpy is not properly installed."
)
def plot_with_error_bars(
centers, confs, names, marker_kwargs={}, line_kwargs={}, **kwargs
):
"""Make a horizontal plot of centers/conf ints with error bars.
Parameters
----------
centers : array_like, shape (n,)
Array of center points for error bar plot.
confs : array_like, shape (n, 2)
Array of low and high values of confidence intervals
names : list of strings
Names of the variables for the plot. These give the y-ticks.
marker_kwargs : dict, default {}
Kwargs to be passed to p.circle() for plotting centers.
line_kwargs : dict, default {}
Kwargs passsed to p.line() to plot the confidence interval.
kwargs : dict
Any addition kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Plot of error bars.
"""
n = len(names)
if len(centers) != n:
raise ValueError("len(centers) ≠ len(names)")
if confs.shape != (n, 2):
raise ValueError("Shape of `confs` must be (len(names), 2).")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 50 * n
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 450
line_width = kwargs.pop("line_width", 2)
p = bokeh.plotting.figure(y_range=names[::-1], **kwargs)
p.circle(x=centers, y=names, **marker_kwargs)
for conf, name in zip(confs, names):
p.line(x=conf, y=[name, name], line_width=2)
return p
def fill_between(
x1=None,
y1=None,
x2=None,
y2=None,
show_line=True,
patch_kwargs={},
line_kwargs={},
p=None,
**kwargs,
):
"""
Create a filled region between two curves.
Parameters
----------
x1 : array_like
Array of x-values for first curve
y1 : array_like
Array of y-values for first curve
x2 : array_like
Array of x-values for second curve
y2 : array_like
Array of y-values for second curve
show_line : bool, default True
If True, show the lines on the edges of the fill.
patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill.
line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure() in
creating the figure.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with fill-between.
"""
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 350
if p is None:
p = bokeh.plotting.figure(**kwargs)
line_width = patch_kwargs.pop("line_width", 0)
line_alpha = patch_kwargs.pop("line_alpha", 0)
p.patch(
x=np.concatenate((x1, x2[::-1])),
y=np.concatenate((y1, y2[::-1])),
line_width=line_width,
line_alpha=line_width,
**patch_kwargs,
)
if show_line:
line_width = line_kwargs.pop("line_width", 2)
p.line(x1, y1, line_width=line_width, **line_kwargs)
p.line(x2, y2, line_width=line_width, **line_kwargs)
return p
def qqplot(
data,
gen_fun,
n_samples=1000,
args=(),
patch_kwargs={},
line_kwargs={},
diag_kwargs={},
p=None,
**kwargs,
):
"""
Parameters
----------
data : array_like, shape (N,)
Array of data to be used in making Q-Q plot.
gen_fun : function
Function to randomly draw a new data set out of the model
distribution parametrized by the MLE. Must have call
signature `gen_fun(*args, size)`. `size` is the number of
samples to draw.
n_samples : int, default 1000
Number of samples to draw using gen_fun().
args : tuple, default ()
Arguments to be passed to gen_fun().
show_line : bool, default True
If True, show the lines on the edges of the filled region.
patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill.
line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill.
diag_kwargs : dict
Any kwargs to be passed into p.line() in generating diagonal
reference line of Q-Q plot.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure() in
creating the figure.
"""
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 350
x = np.sort(data)
theor_x = np.array([np.sort(gen_fun(*args, len(x))) for _ in range(n_samples)])
# Upper and lower bounds
low_theor, up_theor = np.percentile(theor_x, (2.5, 97.5), axis=0)
if p is None:
p = bokeh.plotting.figure(**kwargs)
if "fill_alpha" not in patch_kwargs:
patch_kwargs["fill_alpha"] = 0.5
p = fill_between(
x,
up_theor,
x,
low_theor,
patch_kwargs=patch_kwargs,
line_kwargs=line_kwargs,
show_line=True,
p=p,
)
# Plot 45 degree line
color = diag_kwargs.pop("color", "black")
alpha = diag_kwargs.pop("alpha", 0.5)
line_width = diag_kwargs.pop("line_width", 4)
p.line([0, x.max()], [0, x.max()], line_width=line_width, color=color, alpha=alpha)
return p
def ecdf(
data=None,
p=None,
x_axis_label=None,
y_axis_label="ECDF",
title=None,
plot_height=300,
plot_width=450,
staircase=False,
complementary=False,
x_axis_type="linear",
y_axis_type="linear",
**kwargs,
):
"""
Create a plot of an ECDF.
Parameters
----------
data : array_like
One-dimensional array of data. Nan's are ignored.
conf_int : bool, default False
If True, display a confidence interval on the ECDF.
ptiles : list, default [2.5, 97.5]
The percentiles to use for the confidence interval. Ignored it
`conf_int` is False.
n_bs_reps : int, default 1000
Number of bootstrap replicates to do to compute confidence
interval. Ignored if `conf_int` is False.
fill_color : str, default 'lightgray'
Color of the confidence interbal. Ignored if `conf_int` is
False.
fill_alpha : float, default 1
Opacity of confidence interval. Ignored if `conf_int` is False.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
x_axis_label : str, default None
Label for the x-axis. Ignored if `p` is not None.
y_axis_label : str, default 'ECDF' or 'ECCDF'
Label for the y-axis. Ignored if `p` is not None.
title : str, default None
Title of the plot. Ignored if `p` is not None.
plot_height : int, default 300
Height of plot, in pixels. Ignored if `p` is not None.
plot_width : int, default 450
Width of plot, in pixels. Ignored if `p` is not None.
staircase : bool, default False
If True, make a plot of a staircase ECDF (staircase). If False,
plot the ECDF as dots.
complementary : bool, default False
If True, plot the empirical complementary cumulative
distribution functon.
x_axis_type : str, default 'linear'
Either 'linear' or 'log'.
y_axis_type : str, default 'linear'
Either 'linear' or 'log'.
kwargs
Any kwargs to be passed to either p.circle or p.line, for
`staircase` being False or True, respectively.
Returns
-------
output : bokeh.plotting.Figure instance
Plot populated with ECDF.
"""
# Check data to make sure legit
data = utils._convert_data(data)
# Data points on ECDF
x, y = _ecdf_vals(data, staircase, complementary)
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "ECCDF" if complementary else "ECDF")
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_axis_type=x_axis_type,
y_axis_type=y_axis_type,
title=title,
)
if staircase:
# Line of steps
p.line(x, y, **kwargs)
# Rays for ends
if complementary:
p.ray(x[0], 1, None, np.pi, **kwargs)
p.ray(x[-1], 0, None, 0, **kwargs)
else:
p.ray(x[0], 0, None, np.pi, **kwargs)
p.ray(x[-1], 1, None, 0, **kwargs)
else:
p.circle(x, y, **kwargs)
return p
def histogram(
data=None,
bins=10,
p=None,
density=False,
kind="step",
line_kwargs={},
patch_kwargs={},
**kwargs,
):
"""
Make a plot of a histogram of a data set.
Parameters
----------
data : array_like
1D array of data to make a histogram out of
bins : int, array_like, or one of 'exact' or 'integer' default 10
Setting for `bins` kwarg to be passed to `np.histogram()`. If
`'exact'`, then each unique value in the data gets its own bin.
If `integer`, then integer data is assumed and each integer gets
its own bin.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
density : bool, default False
If True, normalized the histogram. Otherwise, base the histogram
on counts.
kind : str, default 'step'
The kind of histogram to display. Allowed values are 'step' and
'step_filled'.
line_kwargs : dict
Any kwargs to be passed to p.line() in making the line of the
histogram.
patch_kwargs : dict
Any kwargs to be passed to p.patch() in making the fill of the
histogram.
kwargs : dict
All other kwargs are passed to bokeh.plotting.figure()
Returns
-------
output : Bokeh figure
Figure populated with histogram.
"""
if data is None:
raise RuntimeError("Input `data` must be specified.")
# Instantiate Bokeh plot if not already passed in
if p is None:
y_axis_label = kwargs.pop("y_axis_label", "density" if density else "count")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
y_range = kwargs.pop("y_range", bokeh.models.DataRange1d(start=0))
p = bokeh.plotting.figure(y_axis_label=y_axis_label, y_range=y_range, **kwargs)
if bins == "exact":
a = np.unique(data)
if len(a) == 1:
bins = np.array([a[0] - 0.5, a[0] + 0.5])
else:
bins = np.concatenate(
(
(a[0] - (a[1] - a[0]) / 2,),
(a[1:] + a[:-1]) / 2,
(a[-1] + (a[-1] - a[-2]) / 2,),
)
)
elif bins == "integer":
if np.any(data != np.round(data)):
raise RuntimeError("'integer' bins chosen, but data are not integer.")
bins = np.arange(data.min() - 1, data.max() + 1) + 0.5
# Compute histogram
f, e = np.histogram(data, bins=bins, density=density)
e0 = np.empty(2 * len(e))
f0 = np.empty(2 * len(e))
e0[::2] = e
e0[1::2] = e
f0[0] = 0
f0[-1] = 0
f0[1:-1:2] = f
f0[2:-1:2] = f
if kind == "step":
p.line(e0, f0, **line_kwargs)
if kind == "step_filled":
x2 = [e0.min(), e0.max()]
y2 = [0, 0]
p = fill_between(e0, f0, x2, y2, show_line=True, p=p, patch_kwargs=patch_kwargs)
return p
def predictive_ecdf(
samples,
data=None,
diff=False,
percentiles=[80, 60, 40, 20],
color="blue",
data_color="orange",
data_staircase=True,
data_size=2,
x=None,
discrete=False,
p=None,
**kwargs,
):
"""Plot a predictive ECDF from samples.
Parameters
----------
samples : Numpy array or xarray, shape (n_samples, n) or xarray DataArray
A Numpy array containing predictive samples.
data : Numpy array, shape (n,) or xarray DataArray
If not None, ECDF of measured data is overlaid with predictive
ECDF.
diff : bool, default True
If True, the ECDFs minus median of the predictive ECDF are
plotted.
percentiles : list, default [80, 60, 40, 20]
Percentiles for making colored envelopes for confidence
intervals for the predictive ECDFs. Maximally four can be
specified.
color : str, default 'blue'
One of ['green', 'blue', 'red', 'gray', 'purple', 'orange'].
There are used to make the color scheme of shading of
percentiles.
data_color : str, default 'orange'
String representing the color of the data to be plotted over the
confidence interval envelopes.
data_staircase : bool, default True
If True, plot the ECDF of the data as a staircase.
Otherwise plot it as dots.
data_size : int, default 2
Size of marker (if `data_line` if False) or thickness of line
(if `data_staircase` is True) of plot of data.
x : Numpy array, default None
Points at which to evaluate the ECDF. If None, points are
automatically generated based on the data range.
discrete : bool, default False
If True, the samples take on discrete values.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Figure populated with glyphs describing range of values for the
ECDF of the samples. The shading goes according to percentiles
of samples of the ECDF, with the median ECDF plotted as line in
the middle.
"""
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("Samples can only be Numpy arrays and xarrays.")
if len(percentiles) > 4:
raise RuntimeError("Can specify maximally four percentiles.")
# Build ptiles
percentiles = np.sort(percentiles)[::-1]
ptiles = [pt for pt in percentiles if pt > 0]
ptiles = (
[50 - pt / 2 for pt in percentiles]
+ [50]
+ [50 + pt / 2 for pt in percentiles[::-1]]
)
ptiles_str = [str(pt) for pt in ptiles]
if color not in ["green", "blue", "red", "gray", "purple", "orange", "betancourt"]:
raise RuntimeError(
"Only allowed colors are 'green', 'blue', 'red', 'gray', 'purple', 'orange'"
)
colors = {
"blue": ["#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"],
"green": ["#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"],
"red": ["#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"],
"orange": ["#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"],
"purple": ["#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"],
"gray": ["#bdbdbd", "#969696", "#737373", "#525252", "#252525"],
"betancourt": [
"#DCBCBC",
"#C79999",
"#B97C7C",
"#A25050",
"#8F2727",
"#7C0000",
],
}
data_range = samples.max() - samples.min()
if discrete and x is None:
x = np.arange(samples.min(), samples.max() + 1)
elif x is None:
x = np.linspace(
samples.min() - 0.05 * data_range, samples.max() + 0.05 * data_range, 400
)
ecdfs = np.array([_ecdf_arbitrary_points(sample, x) for sample in samples])
df_ecdf = pd.DataFrame()
for ptile in ptiles:
df_ecdf[str(ptile)] = np.percentile(
ecdfs, ptile, axis=0, interpolation="higher"
)
df_ecdf["x"] = x
if data is not None and diff:
ecdfs = np.array(
[_ecdf_arbitrary_points(sample, np.sort(data)) for sample in samples]
)
ecdf_data_median = np.percentile(ecdfs, 50, axis=0, interpolation="higher")
if diff:
for ptile in filter(lambda item: item != "50", ptiles_str):
df_ecdf[ptile] -= df_ecdf["50"]
df_ecdf["50"] = 0.0
if p is None:
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "ECDF difference" if diff else "ECDF")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 325
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
for i, ptile in enumerate(ptiles_str[: len(ptiles_str) // 2]):
if discrete:
x, y1 = cdf_to_staircase(df_ecdf["x"].values, df_ecdf[ptile].values)
_, y2 = cdf_to_staircase(
df_ecdf["x"].values, df_ecdf[ptiles_str[-i - 1]].values
)
else:
x = df_ecdf["x"]
y1 = df_ecdf[ptile]
y2 = df_ecdf[ptiles_str[-i - 1]]
fill_between(
x,
y1,
x,
y2,
p=p,
show_line=False,
patch_kwargs=dict(color=colors[color][i]),
)
# The median as a solid line
if discrete:
x, y = cdf_to_staircase(df_ecdf["x"], df_ecdf["50"])
else:
x, y = df_ecdf["x"], df_ecdf["50"]
p.line(x, y, line_width=2, color=colors[color][-1])
# Overlay data set
if data is not None:
x_data, y_data = _ecdf_vals(data, staircase=False)
if diff:
# subtracting off median wrecks y-coords for duplicated x-values...
y_data -= ecdf_data_median
#...so take only unique values,...
unique_x = np.unique(x_data)
#...find the (correct) max y-value for each...
unique_inds = np.searchsorted(x_data, unique_x, side='right') - 1
#...and use only that going forward
y_data = y_data[unique_inds]
x_data = unique_x
if data_staircase:
x_data, y_data = cdf_to_staircase(x_data, y_data)
p.line(x_data, y_data, color=data_color, line_width=data_size)
else:
p.circle(x_data, y_data, color=data_color, size=data_size)
return p
def predictive_regression(
samples,
samples_x,
data=None,
diff=False,
percentiles=[80, 60, 40, 20],
color="blue",
data_kwargs={},
p=None,
**kwargs,
):
"""Plot a predictive regression plot from samples.
Parameters
----------
samples : Numpy array, shape (n_samples, n_x) or xarray DataArray
Numpy array containing predictive samples of y-values.
sample_x : Numpy array, shape (n_x,)
data : Numpy array, shape (n, 2) or xarray DataArray
If not None, the measured data. The first column is the x-data,
and the second the y-data. These are plotted as points over the
predictive plot.
diff : bool, default True
If True, the predictive y-values minus the median of the
predictive y-values are plotted.
percentiles : list, default [80, 60, 40, 20]
Percentiles for making colored envelopes for confidence
intervals for the predictive ECDFs. Maximally four can be
specified.
color : str, default 'blue'
One of ['green', 'blue', 'red', 'gray', 'purple', 'orange'].
There are used to make the color scheme of shading of
percentiles.
data_kwargs : dict
Any kwargs to be passed to p.circle() when plotting the data
points.
p : bokeh.plotting.Figure instance, or None (default)
If None, create a new figure. Otherwise, populate the existing
figure `p`.
kwargs
All other kwargs are passed to bokeh.plotting.figure().
Returns
-------
output : Bokeh figure
Figure populated with glyphs describing range of values for the
the samples. The shading goes according to percentiles of
samples, with the median plotted as line in the middle.
"""
if type(samples) != np.ndarray:
if type(samples) == xarray.core.dataarray.DataArray:
samples = samples.squeeze().values
else:
raise RuntimeError("Samples can only be Numpy arrays and xarrays.")
if type(samples_x) != np.ndarray:
if type(samples_x) == xarray.core.dataarray.DataArray:
samples_x = samples_x.squeeze().values
else:
raise RuntimeError("`samples_x` can only be Numpy array or xarray.")
if len(percentiles) > 4:
raise RuntimeError("Can specify maximally four percentiles.")
# Build ptiles
percentiles = np.sort(percentiles)[::-1]
ptiles = [pt for pt in percentiles if pt > 0]
ptiles = (
[50 - pt / 2 for pt in percentiles]
+ [50]
+ [50 + pt / 2 for pt in percentiles[::-1]]
)
ptiles_str = [str(pt) for pt in ptiles]
if color not in ["green", "blue", "red", "gray", "purple", "orange", "betancourt"]:
raise RuntimeError(
"Only allowed colors are 'green', 'blue', 'red', 'gray', 'purple', 'orange'"
)
colors = {
"blue": ["#9ecae1", "#6baed6", "#4292c6", "#2171b5", "#084594"],
"green": ["#a1d99b", "#74c476", "#41ab5d", "#238b45", "#005a32"],
"red": ["#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"],
"orange": ["#fdae6b", "#fd8d3c", "#f16913", "#d94801", "#8c2d04"],
"purple": ["#bcbddc", "#9e9ac8", "#807dba", "#6a51a3", "#4a1486"],
"gray": ["#bdbdbd", "#969696", "#737373", "#525252", "#252525"],
"betancourt": [
"#DCBCBC",
"#C79999",
"#B97C7C",
"#A25050",
"#8F2727",
"#7C0000",
],
}
if samples.shape[1] != len(samples_x):
raise ValueError(
"`samples_x must have the same number of entries as `samples` does columns."
)
# It's useful to have data as a data frame
if data is not None:
df_data = pd.DataFrame(data=data, columns=["__data_x", "__data_y"])
df_data = df_data.sort_values(by="__data_x")
# Make sure all entries in x-data in samples_x
if diff:
if len(samples_x) != len(df_data) or not np.allclose(
np.sort(samples_x), df_data["__data_x"].values
):
raise ValueError(
"If `diff=True`, then samples_x must match the x-values of `data`."
)
df_pred = pd.DataFrame(
data=np.percentile(samples, ptiles, axis=0).transpose(),
columns=[str(ptile) for ptile in ptiles],
)
df_pred["__x"] = samples_x
df_pred = df_pred.sort_values(by="__x")
if p is None:
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "y difference" if diff else "y")
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 325
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 400
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
for i, ptile in enumerate(ptiles_str[: len(ptiles_str) // 2]):
if diff:
y1 = df_pred[ptile] - df_pred["50"]
y2 = df_pred[ptiles_str[-i - 1]] - df_pred["50"]
else:
y1 = df_pred[ptile]
y2 = df_pred[ptiles_str[-i - 1]]
fill_between(
x1=df_pred["__x"],
x2=df_pred["__x"],
y1=y1,
y2=y2,
p=p,
show_line=False,
patch_kwargs=dict(fill_color=colors[color][i]),
)
# The median as a solid line
if diff:
p.line(
df_pred["__x"],
np.zeros_like(samples_x),
line_width=2,
color=colors[color][-1],
)
else:
p.line(df_pred["__x"], df_pred["50"], line_width=2, color=colors[color][-1])
# Overlay data set
if data is not None:
data_color = data_kwargs.pop("color", "orange")
data_alpha = data_kwargs.pop("alpha", 1.0)
data_size = data_kwargs.pop("size", 2)
if diff:
p.circle(
df_data["__data_x"],
df_data["__data_y"] - df_pred["50"],
color=data_color,
size=data_size,
alpha=data_alpha,
**data_kwargs,
)
else:
p.circle(
df_data["__data_x"],
df_data["__data_y"],
color=data_color,
size=data_size,
alpha=data_alpha,
**data_kwargs,
)
return p
def sbc_rank_ecdf(
sbc_output=None,
parameters=None,
diff=True,
ptile=99.0,
bootstrap_envelope=False,
n_bs_reps=None,
show_envelope=True,
show_envelope_line=True,
color_by_warning_code=False,
staircase=False,
p=None,
marker_kwargs={},
envelope_patch_kwargs={},
envelope_line_kwargs={},
palette=None,
show_legend=True,
**kwargs,
):
"""Make a rank ECDF plot from simulation-based calibration.
Parameters
----------
sbc_output : DataFrame
Output of bebi103.stan.sbc() containing results from an SBC
calculation.
parameters : list, default None
List of parameters to include in the SBC rank ECDF plot. If
None, use all parameters.
diff : bool, default True
If True, plot the ECDF minus the ECDF of a Uniform distribution.
Otherwise, plot the ECDF of the rank statistic from SBC.
ptile : float, default 99
Which precentile to use as the envelope in the plot.
bootstrap_envelope : bool, default False
If True, use bootstrapping on the appropriate Uniform
distribution to compute the envelope. Otherwise, use the
Gaussian approximation for the envelope.
n_bs_reps : bool, default None
Number of bootstrap replicates to use when computing the
envelope. If None, n_bs_reps is determined from the formula
int(max(n, max(L+1, 100/(100-ptile))) * 100), where n is the
number of simulations used in the SBC calculation.
show_envelope : bool, default True
If True, display the envelope encompassing the ptile percent
confidence interval for the SBC ECDF.
show_envelope_line : bool, default True
If True, and `show_envelope` is also True, plot a line around
the envelope.
color_by_warning_code : bool, default False
If True, color glyphs by diagnostics warning code instead of
coloring the glyphs by parameter
staircase : bool, default False
If True, plot the ECDF as a staircase. Otherwise, plot with
dots.
p : bokeh.plotting.Figure instance, default None
Plot to which to add the SBC rank ECDF plot. If None, create a
new figure.
marker_kwargs : dict, default {}
Dictionary of kwargs to pass to `p.circle()` or `p.line()` when
plotting the SBC ECDF.
envelope_patch_kwargs : dict
Any kwargs passed into p.patch(), which generates the fill of
the envelope.
envelope_line_kwargs : dict
Any kwargs passed into p.line() in generating the line around
the fill of the envelope.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
show_legend : bool, default True
If True, show legend.
kwargs : dict
Any kwargs passed to `bokeh.plotting.figure()` when creating the
plot.
Returns
-------
output : bokeh.plotting.Figure instance
A plot containing the SBC plot.
Notes
-----
.. You can see example SBC ECDF plots in Fig. 14 b and c in this
paper: https://arxiv.org/abs/1804.06788
"""
if sbc_output is None:
raise RuntimeError("Argument `sbc_output` must be specified.")
# Defaults
if palette is None:
palette = colorcet.b_glasbey_category10
elif palette not in [list, tuple]:
palette = [palette]
if "x_axis_label" not in kwargs:
kwargs["x_axis_label"] = "rank statistic"
if "y_axis_label" not in kwargs:
kwargs["y_axis_label"] = "ECDF difference" if diff else "ECDF"
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 275
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 450
toolbar_location = kwargs.pop("toolbar_location", "above")
if "fill_color" not in envelope_patch_kwargs:
envelope_patch_kwargs["fill_color"] = "gray"
if "fill_alpha" not in envelope_patch_kwargs:
envelope_patch_kwargs["fill_alpha"] = 0.5
if "line_color" not in envelope_line_kwargs:
envelope_line_kwargs["line_color"] = "gray"
if "color" in "marker_kwargs" and color_by_warning_code:
raise RuntimeError(
"Cannot specify marker color when `color_by_warning_code` is True."
)
if staircase and color_by_warning_code:
raise RuntimeError("Cannot color by warning code for staircase ECDFs.")
if parameters is None:
parameters = list(sbc_output["parameter"].unique())
elif type(parameters) not in [list, tuple]:
parameters = [parameters]
L = sbc_output["L"].iloc[0]
df = sbc_output.loc[
sbc_output["parameter"].isin(parameters),
["parameter", "rank_statistic", "warning_code"],
]
n = (df["parameter"] == df["parameter"].unique()[0]).sum()
if show_envelope:
x, y_low, y_high = _sbc_rank_envelope(
L,
n,
ptile=ptile,
diff=diff,
bootstrap=bootstrap_envelope,
n_bs_reps=n_bs_reps,
)
p = fill_between(
x1=x,
x2=x,
y1=y_high,
y2=y_low,
patch_kwargs=envelope_patch_kwargs,
line_kwargs=envelope_line_kwargs,
show_line=show_envelope_line,
p=p,
**kwargs,
)
else:
p = bokeh.plotting.figure(**kwargs)
if staircase:
dfs = []
for param in parameters:
if diff:
x_data, y_data = _ecdf_diff(
df.loc[df["parameter"] == param, "rank_statistic"],
L,
staircase=True,
)
else:
x_data, y_data = _ecdf_vals(
df.loc[df["parameter"] == param, "rank_statistic"], staircase=True
)
dfs.append(
pd.DataFrame(
data=dict(rank_statistic=x_data, __ECDF=y_data, parameter=param)
)
)
df = pd.concat(dfs, ignore_index=True)
else:
df["__ECDF"] = df.groupby("parameter")["rank_statistic"].transform(_ecdf_y)
df["warning_code"] = df["warning_code"].astype(str)
if diff:
df["__ECDF"] -= (df["rank_statistic"] + 1) / L
if staircase:
color = marker_kwargs.pop("color", palette)
if type(color) == str:
color = [color] * len(parameters)
elif "color" not in marker_kwargs:
color = palette
else:
color = [marker_kwargs.pop("color")] * len(parameters)
if color_by_warning_code:
if len(color) < len(df["warning_code"].unique()):
raise RuntimeError(
"Not enough colors in palette to cover all warning codes."
)
elif len(color) < len(parameters):
raise RuntimeError("Not enough colors in palette to cover all parameters.")
if staircase:
plot_cmd = p.line
else:
plot_cmd = p.circle
if color_by_warning_code:
for i, (warning_code, g) in enumerate(df.groupby("warning_code")):
if show_legend:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
legend_label=warning_code,
**marker_kwargs,
)
else:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
**marker_kwargs,
)
else:
for i, (param, g) in enumerate(df.groupby("parameter")):
if show_legend:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
legend_label=param,
**marker_kwargs,
)
else:
plot_cmd(
source=g,
x="rank_statistic",
y="__ECDF",
color=color[i],
**marker_kwargs,
)
if show_legend:
p.legend.click_policy = "hide"
return p
def parcoord_plot(
samples=None,
pars=None,
transformation=None,
color_by_chain=False,
palette=None,
line_kwargs={},
divergence_kwargs={},
xtick_label_orientation="horizontal",
**kwargs,
):
"""
Make a parallel coordinate plot of MCMC samples. The x-axis is the
parameter name and the y-axis is the value of the parameter,
possibly transformed to so the scale of all parameters are similar.
Parameters
----------
samples : ArviZ InferenceData instance or xarray Dataset instance
Result of MCMC sampling.
pars : list of strings
List of variables to include in the plot.
transformation : function, str, or dict, default None
A transformation to apply to each set of samples. The function
must take a single array as input and return an array as the
same size. If None, nor transformation is done. If a dictionary,
each key is the variable name and the corresponding value is a
function for the transformation of that variable. Alternatively,
if `transformation` is `'minmax'`, the data are scaled to range
from zero to one, or if `transformation` is `'rank'`, the rank
of the each data is used.
color_by_chain : bool, default False
If True, color the lines by chain.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
line_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of non-divergent samples.
divergence_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of divergent samples.
xtick_label_orientation : str or float, default 'horizontal'
Orientation of x tick labels. In some plots, horizontally
labeled ticks will have label clashes, and this can fix that.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()` when
instantiating the figure.
Returns
-------
output : Bokeh plot
Parallel coordinates plot.
"""
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
line_width = line_kwargs.pop("line_width", 0.5)
alpha = line_kwargs.pop("alpha", 0.02)
line_join = line_kwargs.pop("line_join", "bevel")
if "color" in line_kwargs and color_by_chain:
raise RuntimeError(
"Cannot specify line color and also color by chain. If coloring by chain, use `palette` kwarg to specify color scheme."
)
color = line_kwargs.pop("color", "black")
divergence_line_join = divergence_kwargs.pop("line_join", "bevel")
divergence_line_width = divergence_kwargs.pop("line_width", 1)
divergence_color = divergence_kwargs.pop("color", "orange")
divergence_alpha = divergence_kwargs.pop("alpha", 1)
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 175
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 600
toolbar_location = kwargs.pop("toolbar_location", "above")
if "x_range" in kwargs:
raise RuntimeError("Cannot specify x_range; this is inferred.")
if not color_by_chain:
palette = [color] * len(palette)
if type(samples) != az.data.inference_data.InferenceData:
raise RuntimeError("Input must be an ArviZ InferenceData instance.")
if not hasattr(samples, "posterior"):
raise RuntimeError("Input samples do not have 'posterior' group.")
if not (
hasattr(samples, "sample_stats") and hasattr(samples.sample_stats, "diverging")
):
warnings.warn("No divergence information available.")
pars, df = _sample_pars_to_df(samples, pars)
if transformation == "minmax":
transformation = {
par: lambda x: (x - x.min()) / (x.max() - x.min())
if x.min() < x.max()
else 0.0
for par in pars
}
elif transformation == "rank":
transformation = {par: lambda x: st.rankdata(x) for par in pars}
if transformation is None:
transformation = {par: lambda x: x for par in pars}
if callable(transformation) or transformation is None:
transformation = {par: transformation for par in pars}
for col, trans in transformation.items():
df[col] = trans(df[col])
df = df.melt(id_vars=["divergent__", "chain__", "draw__"])
p = bokeh.plotting.figure(
x_range=bokeh.models.FactorRange(*pars),
toolbar_location=toolbar_location,
**kwargs,
)
# Plots for samples that were not divergent
ys = np.array(
[
group["value"].values
for _, group in df.loc[~df["divergent__"]].groupby(["chain__", "draw__"])
]
)
if len(ys) > 0:
ys = [y for y in ys]
xs = [list(df["variable"].unique())] * len(ys)
p.multi_line(
xs,
ys,
line_width=line_width,
alpha=alpha,
line_join=line_join,
color=[palette[i % len(palette)] for i in range(len(ys))],
**line_kwargs,
)
# Plots for samples that were divergent
ys = np.array(
[
group["value"].values
for _, group in df.loc[df["divergent__"]].groupby(["chain__", "draw__"])
]
)
if len(ys) > 0:
ys = [y for y in ys]
xs = [list(df["variable"].unique())] * len(ys)
p.multi_line(
xs,
ys,
alpha=divergence_alpha,
line_join=line_join,
color=divergence_color,
line_width=divergence_line_width,
**divergence_kwargs,
)
p.xaxis.major_label_orientation = xtick_label_orientation
return p
def trace_plot(samples=None, pars=None, palette=None, line_kwargs={}, **kwargs):
"""
Make a trace plot of MCMC samples.
Parameters
----------
samples : ArviZ InferenceData instance or xarray Dataset instance
Result of MCMC sampling.
pars : list of strings
List of variables to include in the plot.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
colorcet.b_glasbey_category10 from the colorcet package.
line_kwargs: dict
Dictionary of kwargs to be passed to `p.multi_line()` in making
the plot of non-divergent samples.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()`.
Returns
-------
output : Bokeh gridplot
Set of chain traces as a Bokeh gridplot.
"""
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
line_width = line_kwargs.pop("line_width", 0.5)
alpha = line_kwargs.pop("alpha", 0.5)
line_join = line_kwargs.pop("line_join", "bevel")
if "color" in line_kwargs:
raise RuntimeError(
"Cannot specify line color. Specify color scheme with `palette` kwarg."
)
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 150
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 600
x_axis_label = kwargs.pop("x_axis_label", "step")
if "y_axis_label" in kwargs:
raise RuntimeError(
"`y_axis_label` cannot be specified; it is inferred from samples."
)
if type(samples) != az.data.inference_data.InferenceData:
raise RuntimeError("Input must be an ArviZ InferenceData instance.")
if not hasattr(samples, "posterior"):
raise RuntimeError("Input samples do not have 'posterior' group.")
pars, df = _sample_pars_to_df(samples, pars)
plots = []
grouped = df.groupby("chain__")
for i, par in enumerate(pars):
p = bokeh.plotting.figure(x_axis_label=x_axis_label, y_axis_label=par, **kwargs)
for i, (chain, group) in enumerate(grouped):
p.line(
group["draw__"],
group[par],
line_width=line_width,
line_join=line_join,
color=palette[i],
*line_kwargs,
)
plots.append(p)
if len(plots) == 1:
return plots[0]
# Link ranges
for i, p in enumerate(plots[:-1]):
plots[i].x_range = plots[-1].x_range
return bokeh.layouts.gridplot(plots, ncols=1)
def corner(
samples=None,
pars=None,
labels=None,
datashade=False,
plot_width=150,
plot_ecdf=False,
cmap="black",
color_by_chain=False,
palette=None,
divergence_color="orange",
alpha=0.02,
single_param_color="black",
bins=20,
show_contours=False,
contour_color="black",
bins_2d=50,
levels=None,
weights=None,
smooth=0.02,
extend_contour_domain=False,
plot_width_correction=50,
plot_height_correction=40,
xtick_label_orientation="horizontal",
):
"""
Make a corner plot of MCMC results. Heavily influenced by the corner
package by Dan Foreman-Mackey.
Parameters
----------
samples : Pandas DataFrame or ArviZ InferenceData instance
Results of sampling.
pars : list
List of variables as strings included in `samples` to construct
corner plot.
labels : list, default None
List of labels for the respective variables given in `pars`. If
None, the variable names from `pars` are used.
datashade : bool, default False
Whether or not to convert sampled points to a raster image using
Datashader.
plot_width : int, default 150
Width of each plot in the corner plot in pixels. The height is
computed from the width to make the plots roughly square.
plot_ecdf : bool, default False
If True, plot ECDFs of samples on the diagonal of the corner
plot. If False, histograms are plotted.
cmap : str, default 'black'
Valid colormap string for DataShader or for coloring Bokeh
glyphs.
color_by_chain : bool, default False
If True, color the glyphs by chain index.
palette : list of strings of hex colors, or single hex string
If a list, color palette to use. If a single string representing
a hex color, all glyphs are colored with that color. Default is
the default color cycle employed by Altair. Ignored is
`color_by_chain` is False.
divergence_color : str, default 'orange'
Color to use for showing points where the sampler experienced a
divergence.
alpha : float, default 1.0
Opacity of glyphs. Ignored if `datashade` is True.
single_param_color : str, default 'black'
Color of histogram or ECDF lines.
bins : int, default 20
Number of bins to use in constructing histograms. Ignored if
`plot_ecdf` is True.
show_contours : bool, default False
If True, show contour plot on top of samples.
contour_color : str, default 'black'
Color of contour lines
bins_2d : int, default 50
Number of bins in each direction for binning 2D histograms when
computing contours.
levels : list of floats, default None
Levels to use when constructing contours. By default, these are
chosen according to this principle from Dan Foreman-Mackey:
http://corner.readthedocs.io/en/latest/pages/sigmas.html
weights : default None
Value to pass as `weights` kwarg to np.histogram2d(), used in
constructing contours.
smooth : int or None, default 1
Width of smoothing kernel for making contours. plot_width_correction : int, default 50
Correction for width of plot taking into account tick and axis
labels.
extend_contour_domain : bool, default False
If True, extend the domain of the contours a little bit beyond
the extend of the samples. This is done in the corner package,
but I prefer not to do it.
plot_width_correction : int, default 50
Correction for width of plot taking into account tick and axis
labels.
plot_height_correction : int, default 40
Correction for height of plot taking into account tick and axis
labels.
xtick_label_orientation : str or float, default 'horizontal'
Orientation of x tick labels. In some plots, horizontally
labeled ticks will have label clashes, and this can fix that.
Returns
-------
output : Bokeh gridplot
Corner plot as a Bokeh gridplot.
"""
# Default properties
if palette is None:
palette = colorcet.b_glasbey_category10
if color_by_chain:
if datashade:
raise NotImplementedError(
"Can only color by chain if `datashade` is False."
)
if cmap not in ["black", None]:
warnings.warn("Ignoring cmap values to color by chain.")
if divergence_color is None:
divergence_color = cmap
if type(samples) == pd.core.frame.DataFrame:
df = samples
if pars is None:
pars = [col for col in df.columns if len(col) < 2 or col[-2:] != "__"]
else:
pars, df = _sample_pars_to_df(samples, pars)
if color_by_chain:
# Have to convert datatype to string to play nice with Bokeh
df["chain__"] = df["chain__"].astype(str)
factors = tuple(df["chain__"].unique())
cmap = bokeh.transform.factor_cmap("chain__", palette=palette, factors=factors)
# Add dummy divergent column if no divergence information is given
if "divergent__" not in df.columns:
df = df.copy()
df["divergent__"] = 0
# Add dummy chain column if no divergence information is given
if "chain__" not in df.columns:
df = df.copy()
df["chain__"] = 0
if len(pars) > 6:
raise RuntimeError("For space purposes, can show only six variables.")
for col in pars:
if col not in df.columns:
raise RuntimeError("Column " + col + " not in the columns of DataFrame.")
if labels is None:
labels = pars
elif len(labels) != len(pars):
raise RuntimeError("len(pars) must equal len(labels)")
if len(pars) == 1:
x = pars[0]
if plot_ecdf:
if datashade:
if plot_width == 150:
plot_height = 200
plot_width = 300
else:
plot_width = 200
plot_height = 200
x_range, _ = _data_range(df, pars[0], pars[0])
p = bokeh.plotting.figure(
x_range=x_range,
y_range=[-0.02, 1.02],
plot_width=plot_width,
plot_height=plot_height,
)
x_ecdf, y_ecdf = _ecdf_vals(df[pars[0]], staircase=True)
df_ecdf = pd.DataFrame(data={pars[0]: x_ecdf, "ECDF": y_ecdf})
_ = datashader.bokeh_ext.InteractiveImage(
p,
_create_line_image,
df=df_ecdf,
x=x,
y="ECDF",
cmap=single_param_color,
)
else:
p = ecdf(
df[pars[0]],
staircase=True,
line_width=2,
line_color=single_param_color,
)
else:
p = histogram(
df[pars[0]],
bins=bins,
density=True,
line_width=2,
color=single_param_color,
x_axis_label=pars[0],
)
p.xaxis.major_label_orientation = xtick_label_orientation
return p
if not datashade:
if len(df) > 10000:
raise RuntimeError(
"Cannot render more than 10,000 samples without DataShader."
)
elif len(df) > 5000:
warnings.warn("Rendering so many points without DataShader is ill-advised.")
plots = [[None for _ in range(len(pars))] for _ in range(len(pars))]
for i, j in zip(*np.tril_indices(len(pars))):
pw = plot_width
ph = plot_width
if j == 0:
pw += plot_width_correction
if i == len(pars) - 1:
ph += plot_height_correction
x = pars[j]
if i != j:
y = pars[i]
x_range, y_range = _data_range(df, x, y)
plots[i][j] = bokeh.plotting.figure(
x_range=x_range, y_range=y_range, plot_width=pw, plot_height=ph
)
if datashade:
_ = datashader.bokeh_ext.InteractiveImage(
plots[i][j], _create_points_image, df=df, x=x, y=y, cmap=cmap
)
plots[i][j].circle(
df.loc[df["divergent__"] == 1, x],
df.loc[df["divergent__"] == 1, y],
size=2,
color=divergence_color,
)
else:
if divergence_color is None:
plots[i][j].circle(df[x], df[y], size=2, alpha=alpha, color=cmap)
else:
plots[i][j].circle(
source=df.loc[df["divergent__"] == 0, [x, y, "chain__"]],
x=x,
y=y,
size=2,
alpha=alpha,
color=cmap,
)
plots[i][j].circle(
df.loc[df["divergent__"] == 1, x],
df.loc[df["divergent__"] == 1, y],
size=2,
color=divergence_color,
)
if show_contours:
xs, ys = contour_lines_from_samples(
df[x].values,
df[y].values,
bins=bins_2d,
smooth=smooth,
levels=levels,
weights=weights,
extend_domain=extend_contour_domain,
)
plots[i][j].multi_line(xs, ys, line_color=contour_color, line_width=2)
else:
if plot_ecdf:
x_range, _ = _data_range(df, x, x)
plots[i][i] = bokeh.plotting.figure(
x_range=x_range,
y_range=[-0.02, 1.02],
plot_width=pw,
plot_height=ph,
)
if datashade:
x_ecdf, y_ecdf = _ecdf_vals(df[x], staircase=True)
df_ecdf = pd.DataFrame(data={x: x_ecdf, "ECDF": y_ecdf})
_ = datashader.bokeh_ext.InteractiveImage(
plots[i][i],
_create_line_image,
df=df_ecdf,
x=x,
y="ECDF",
cmap=single_param_color,
)
else:
plots[i][i] = ecdf(
df[x],
p=plots[i][i],
staircase=True,
line_width=2,
line_color=single_param_color,
)
else:
x_range, _ = _data_range(df, x, x)
plots[i][i] = bokeh.plotting.figure(
x_range=x_range,
y_range=bokeh.models.DataRange1d(start=0.0),
plot_width=pw,
plot_height=ph,
)
f, e = np.histogram(df[x], bins=bins, density=True)
e0 = np.empty(2 * len(e))
f0 = np.empty(2 * len(e))
e0[::2] = e
e0[1::2] = e
f0[0] = 0
f0[-1] = 0
f0[1:-1:2] = f
f0[2:-1:2] = f
plots[i][i].line(e0, f0, line_width=2, color=single_param_color)
plots[i][j].xaxis.major_label_orientation = xtick_label_orientation
# Link axis ranges
for i in range(1, len(pars)):
for j in range(i):
plots[i][j].x_range = plots[j][j].x_range
plots[i][j].y_range = plots[i][i].x_range
# Label axes
for i, label in enumerate(labels):
plots[-1][i].xaxis.axis_label = label
for i, label in enumerate(labels[1:]):
plots[i + 1][0].yaxis.axis_label = label
if plot_ecdf:
plots[0][0].yaxis.axis_label = "ECDF"
# Take off tick labels
for i in range(len(pars) - 1):
for j in range(i + 1):
plots[i][j].xaxis.major_label_text_font_size = "0pt"
if not plot_ecdf:
plots[0][0].yaxis.major_label_text_font_size = "0pt"
for i in range(1, len(pars)):
for j in range(1, i + 1):
plots[i][j].yaxis.major_label_text_font_size = "0pt"
grid = bokeh.layouts.gridplot(plots, toolbar_location="left")
return grid
def contour(
X,
Y,
Z,
levels=None,
p=None,
overlaid=False,
cmap=None,
overlay_grid=False,
fill=False,
fill_palette=None,
fill_alpha=0.75,
line_kwargs={},
**kwargs,
):
"""
Make a contour plot, possibly overlaid on an image.
Parameters
----------
X : 2D Numpy array
Array of x-values, as would be produced using np.meshgrid()
Y : 2D Numpy array
Array of y-values, as would be produced using np.meshgrid()
Z : 2D Numpy array
Array of z-values.
levels : array_like
Levels to plot, ranging from 0 to 1. The contour around a given
level contains that fraction of the total probability if the
contour plot is for a 2D probability density function. By
default, the levels are given by the one, two, three, and four
sigma levels corresponding to a marginalized distribution from
a 2D Gaussian distribution.
p : bokeh plotting object, default None
If not None, the contour are added to `p`. This option is not
allowed if `overlaid` is True.
overlaid : bool, default False
If True, `Z` is displayed as an image and the contours are
overlaid.
cmap : str or list of hex colors, default None
If `im` is an intensity image, `cmap` is a mapping of
intensity to color. If None, default is 256-level Viridis.
If `im` is a color image, then `cmap` can either be
'rgb' or 'cmy' (default), for RGB or CMY merge of channels.
overlay_grid : bool, default False
If True, faintly overlay the grid on top of image. Ignored if
overlaid is False.
line_kwargs : dict, default {}
Keyword arguments passed to `p.multiline()` for rendering the
contour.
kwargs
Any kwargs to be passed to `bokeh.plotting.figure()`.
Returns
-------
output : Bokeh plotting object
Plot populated with contours, possible with an image.
"""
if len(X.shape) != 2 or Y.shape != X.shape or Z.shape != X.shape:
raise RuntimeError("All arrays must be 2D and of same shape.")
if overlaid and p is not None:
raise RuntimeError("Cannot specify `p` if showing image.")
# Set defaults
x_axis_label = kwargs.pop("x_axis_label", "x")
y_axis_label = kwargs.pop("y_axis_label", "y")
if "line_color" not in line_kwargs:
if overlaid:
line_kwargs["line_color"] = "white"
else:
line_kwargs["line_color"] = "black"
line_width = line_kwargs.pop("line_width", 2)
if p is None:
if overlaid:
frame_height = kwargs.pop("frame_height", 300)
frame_width = kwargs.pop("frame_width", 300)
title = kwargs.pop("title", None)
p = image.imshow(
Z,
cmap=cmap,
frame_height=frame_height,
frame_width=frame_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
x_range=[X.min(), X.max()],
y_range=[Y.min(), Y.max()],
no_ticks=False,
flip=False,
return_im=False,
)
else:
if "plot_height" not in kwargs and "frame_height" not in kwargs:
kwargs["frame_height"] = 300
if "plot_width" not in kwargs and "frame_width" not in kwargs:
kwargs["frame_width"] = 300
p = bokeh.plotting.figure(
x_axis_label=x_axis_label, y_axis_label=y_axis_label, **kwargs
)
# Set default levels
if levels is None:
levels = 1.0 - np.exp(-np.arange(0.5, 2.1, 0.5) ** 2 / 2)
# Compute contour lines
if fill or line_width:
xs, ys = _contour_lines(X, Y, Z, levels)
# Make fills. This is currently not supported
if fill:
raise NotImplementedError("Filled contours are not yet implemented.")
if fill_palette is None:
if len(levels) <= 6:
fill_palette = bokeh.palettes.Greys[len(levels) + 3][1:-1]
elif len(levels) <= 10:
fill_palette = bokeh.palettes.Viridis[len(levels) + 1]
else:
raise RuntimeError(
"Can only have maximally 10 levels with filled contours"
+ " unless user specifies `fill_palette`."
)
elif len(fill_palette) != len(levels) + 1:
raise RuntimeError(
"`fill_palette` must have 1 more entry" + " than `levels`"
)
p.patch(
xs[-1], ys[-1], color=fill_palette[0], alpha=fill_alpha, line_color=None
)
for i in range(1, len(levels)):
x_p = np.concatenate((xs[-1 - i], xs[-i][::-1]))
y_p = np.concatenate((ys[-1 - i], ys[-i][::-1]))
p.patch(x_p, y_p, color=fill_palette[i], alpha=fill_alpha, line_color=None)
p.background_fill_color = fill_palette[-1]
# Populate the plot with contour lines
p.multi_line(xs, ys, line_width=line_width, **line_kwargs)
if overlay_grid and overlaid:
p.grid.level = "overlay"
p.grid.grid_line_alpha = 0.2
return p
def ds_line_plot(
df,
x,
y,
cmap="#1f77b4",
plot_height=300,
plot_width=500,
x_axis_label=None,
y_axis_label=None,
title=None,
margin=0.02,
):
"""
Make a datashaded line plot.
Parameters
----------
df : pandas DataFrame
DataFrame containing the data
x : Valid column name of Pandas DataFrame
Column containing the x-data.
y : Valid column name of Pandas DataFrame
Column containing the y-data.
cmap : str, default '#1f77b4'
Valid colormap string for DataShader and for coloring Bokeh
glyphs.
plot_height : int, default 300
Height of plot, in pixels.
plot_width : int, default 500
Width of plot, in pixels.
x_axis_label : str, default None
Label for the x-axis.
y_axis_label : str, default None
Label for the y-axis.
title : str, default None
Title of the plot. Ignored if `p` is not None.
margin : float, default 0.02
Margin, in units of `plot_width` or `plot_height`, to leave
around the plotted line.
Returns
-------
output : datashader.bokeh_ext.InteractiveImage
Interactive image of plot. Note that you should *not* use
bokeh.io.show() to view the image. For most use cases, you
should just call this function without variable assignment.
"""
if x_axis_label is None:
if type(x) == str:
x_axis_label = x
else:
x_axis_label = "x"
if y_axis_label is None:
if type(y) == str:
y_axis_label = y
else:
y_axis_label = "y"
x_range, y_range = _data_range(df, x, y, margin=margin)
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_range=x_range,
y_range=y_range,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
title=title,
)
return datashader.bokeh_ext.InteractiveImage(
p, _create_line_image, df=df, x=x, y=y, cmap=cmap
)
def ds_point_plot(
df,
x,
y,
cmap="#1f77b4",
plot_height=300,
plot_width=500,
x_axis_label=None,
y_axis_label=None,
title=None,
margin=0.02,
):
"""
Make a datashaded point plot.
Parameters
----------
df : pandas DataFrame
DataFrame containing the data
x : Valid column name of Pandas DataFrame
Column containing the x-data.
y : Valid column name of Pandas DataFrame
Column containing the y-data.
cmap : str, default '#1f77b4'
Valid colormap string for DataShader and for coloring Bokeh
glyphs.
plot_height : int, default 300
Height of plot, in pixels.
plot_width : int, default 500
Width of plot, in pixels.
x_axis_label : str, default None
Label for the x-axis.
y_axis_label : str, default None
Label for the y-axis.
title : str, default None
Title of the plot. Ignored if `p` is not None.
margin : float, default 0.02
Margin, in units of `plot_width` or `plot_height`, to leave
around the plotted line.
Returns
-------
output : datashader.bokeh_ext.InteractiveImage
Interactive image of plot. Note that you should *not* use
bokeh.io.show() to view the image. For most use cases, you
should just call this function without variable assignment.
"""
if x_axis_label is None:
if type(x) == str:
x_axis_label = x
else:
x_axis_label = "x"
if y_axis_label is None:
if type(y) == str:
y_axis_label = y
else:
y_axis_label = "y"
x_range, y_range = _data_range(df, x, y, margin=margin)
p = bokeh.plotting.figure(
plot_height=plot_height,
plot_width=plot_width,
x_range=x_range,
y_range=y_range,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
title=title,
)
return datashader.bokeh_ext.InteractiveImage(
p, _create_points_image, df=df, x=x, y=y, cmap=cmap
)
def mpl_cmap_to_color_mapper(cmap):
"""
Convert a Matplotlib colormap to a bokeh.models.LinearColorMapper
instance.
Parameters
----------
cmap : str
A string giving the name of the color map.
Returns
-------
output : bokeh.models.LinearColorMapper instance
A linear color_mapper with 25 gradations.
Notes
-----
.. See https://matplotlib.org/examples/color/colormaps_reference.html
for available Matplotlib colormaps.
"""
cm = mpl_get_cmap(cmap)
palette = [rgb_frac_to_hex(cm(i)[:3]) for i in range(256)]
return bokeh.models.LinearColorMapper(palette=palette)
def _ecdf_vals(data, staircase=False, complementary=False):
"""Get x, y, values of an ECDF for plotting.
Parameters
----------
data : ndarray
One dimensional Numpy array with data.
staircase : bool, default False
If True, generate x and y values for staircase ECDF (staircase). If
False, generate x and y values for ECDF as dots.
complementary : bool
If True, return values for ECCDF.
Returns
-------
x : ndarray
x-values for plot
y : ndarray
y-values for plot
"""
x = np.sort(data)
y = np.arange(1, len(data) + 1) / len(data)
if staircase:
x, y = cdf_to_staircase(x, y)
if complementary:
y = 1 - y
elif complementary:
y = 1 - y + 1 / len(y)
return x, y
@numba.jit(nopython=True)
def _ecdf_arbitrary_points(data, x):
"""Give the value of an ECDF at arbitrary points x."""
y = np.arange(len(data) + 1) / len(data)
return y[np.searchsorted(np.sort(data), x, side="right")]
def _ecdf_from_samples(df, name, ptiles, x):
"""Compute ECDFs and percentiles from samples."""
df_ecdf = pd.DataFrame()
df_ecdf_vals = pd.DataFrame()
grouped = df.groupby(["chain", "chain_idx"])
for i, g in grouped:
df_ecdf_vals[i] = _ecdf_arbitrary_points(g[name].values, x)
for ptile in ptiles:
df_ecdf[str(ptile)] = df_ecdf_vals.quantile(
ptile / 100, axis=1, interpolation="higher"
)
df_ecdf["x"] = x
return df_ecdf
def cdf_to_staircase(x, y):
"""Convert discrete values of CDF to staircase for plotting.
Parameters
----------
x : array_like, shape (n,)
x-values for concave corners of CDF
y : array_like, shape (n,)
y-values of the concave corvners of the CDF
Returns
-------
x_staircase : array_like, shape (2*n, )
x-values for staircase CDF.
y_staircase : array_like, shape (2*n, )
y-values for staircase CDF.
"""
# Set up output arrays
x_staircase = np.empty(2 * len(x))
y_staircase = np.empty(2 * len(x))
# y-values for steps
y_staircase[0] = 0
y_staircase[1::2] = y
y_staircase[2::2] = y[:-1]
# x- values for steps
x_staircase[::2] = x
x_staircase[1::2] = x
return x_staircase, y_staircase
@numba.jit(nopython=True)
def _y_ecdf(data, x):
y = np.arange(len(data) + 1) / len(data)
return y[np.searchsorted(np.sort(data), x, side="right")]
@numba.jit(nopython=True)
def _draw_ecdf_bootstrap(L, n, n_bs_reps=100000):
x = np.arange(L + 1)
ys = np.empty((n_bs_reps, len(x)))
for i in range(n_bs_reps):
draws = np.random.randint(0, L + 1, size=n)
ys[i, :] = _y_ecdf(draws, x)
return ys
def _sbc_rank_envelope(L, n, ptile=95, diff=True, bootstrap=False, n_bs_reps=None):
x = np.arange(L + 1)
y = st.randint.cdf(x, 0, L + 1)
std = np.sqrt(y * (1 - y) / n)
if bootstrap:
if n_bs_reps is None:
n_bs_reps = int(max(n, max(L + 1, 100 / (100 - ptile))) * 100)
ys = _draw_ecdf_bootstrap(L, n, n_bs_reps=n_bs_reps)
y_low, y_high = np.percentile(ys, [50 - ptile / 2, 50 + ptile / 2], axis=0)
else:
y_low = np.concatenate(
(st.norm.ppf((50 - ptile / 2) / 100, y[:-1], std[:-1]), (1.0,))
)
y_high = np.concatenate(
(st.norm.ppf((50 + ptile / 2) / 100, y[:-1], std[:-1]), (1.0,))
)
# Ensure that ends are appropriate
y_low = np.maximum(0, y_low)
y_high = np.minimum(1, y_high)
# Make "staircase" stepped ECDFs
_, y_low = cdf_to_staircase(x, y_low)
x_staircase, y_high = cdf_to_staircase(x, y_high)
if diff:
_, y = cdf_to_staircase(x, y)
y_low -= y
y_high -= y
return x_staircase, y_low, y_high
def _ecdf_diff(data, L, staircase=False):
x, y = _ecdf_vals(data)
y_uniform = (x + 1) / L
if staircase:
x, y = cdf_to_staircase(x, y)
_, y_uniform = cdf_to_staircase(np.arange(len(data)), y_uniform)
y -= y_uniform
return x, y
def _get_cat_range(df, grouped, order, color_column, horizontal):
if order is None:
if isinstance(list(grouped.groups.keys())[0], tuple):
factors = tuple(
[tuple([str(k) for k in key]) for key in grouped.groups.keys()]
)
else:
factors = tuple([str(key) for key in grouped.groups.keys()])
else:
if type(order[0]) in [list, tuple]:
factors = tuple([tuple([str(k) for k in key]) for key in order])
else:
factors = tuple([str(entry) for entry in order])
if horizontal:
cat_range = bokeh.models.FactorRange(*(factors[::-1]))
else:
cat_range = bokeh.models.FactorRange(*factors)
if color_column is None:
color_factors = factors
else:
color_factors = tuple(sorted(list(df[color_column].unique().astype(str))))
return cat_range, factors, color_factors
def _cat_figure(
df,
grouped,
plot_height,
plot_width,
x_axis_label,
y_axis_label,
title,
order,
color_column,
tooltips,
horizontal,
val_axis_type,
):
fig_kwargs = dict(
plot_height=plot_height,
plot_width=plot_width,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
title=title,
tooltips=tooltips,
)
cat_range, factors, color_factors = _get_cat_range(
df, grouped, order, color_column, horizontal
)
if horizontal:
fig_kwargs["y_range"] = cat_range
fig_kwargs["x_axis_type"] = val_axis_type
else:
fig_kwargs["x_range"] = cat_range
fig_kwargs["y_axis_type"] = val_axis_type
return bokeh.plotting.figure(**fig_kwargs), factors, color_factors
def _cat_source(df, cats, cols, color_column):
if type(cats) in [list, tuple]:
cat_source = list(zip(*tuple([df[cat].astype(str) for cat in cats])))
labels = [", ".join(cat) for cat in cat_source]
else:
cat_source = list(df[cats].astype(str).values)
labels = cat_source
if type(cols) in [list, tuple, pd.core.indexes.base.Index]:
source_dict = {col: list(df[col].values) for col in cols}
else:
source_dict = {cols: list(df[cols].values)}
source_dict["cat"] = cat_source
if color_column in [None, "cat"]:
source_dict["__label"] = labels
else:
source_dict["__label"] = list(df[color_column].astype(str).values)
source_dict[color_column] = list(df[color_column].astype(str).values)
return bokeh.models.ColumnDataSource(source_dict)
def _tooltip_cols(tooltips):
if tooltips is None:
return []
if type(tooltips) not in [list, tuple]:
raise RuntimeError("`tooltips` must be a list or tuple of two-tuples.")
cols = []
for tip in tooltips:
if type(tip) not in [list, tuple] or len(tip) != 2:
raise RuntimeError("Invalid tooltip.")
if tip[1][0] == "@":
if tip[1][1] == "{":
cols.append(tip[1][2 : tip[1].find("}")])
elif "{" in tip[1]:
cols.append(tip[1][1 : tip[1].find("{")])
else:
cols.append(tip[1][1:])
return cols
def _cols_to_keep(cats, val, color_column, tooltips):
cols = _tooltip_cols(tooltips)
cols += [val]
if type(cats) in [list, tuple]:
cols += list(cats)
else:
cols += [cats]
if color_column is not None:
cols += [color_column]
return list(set(cols))
def _check_cat_input(df, cats, val, color_column, tooltips, palette, kwargs):
if df is None:
raise RuntimeError("`df` argument must be provided.")
if cats is None:
raise RuntimeError("`cats` argument must be provided.")
if val is None:
raise RuntimeError("`val` argument must be provided.")
if type(palette) not in [list, tuple]:
raise RuntimeError("`palette` must be a list or tuple.")
if val not in df.columns:
raise RuntimeError(f"{val} is not a column in the inputted data frame")
cats_array = type(cats) in [list, tuple]
if cats_array:
for cat in cats:
if cat not in df.columns:
raise RuntimeError(f"{cat} is not a column in the inputted data frame")
else:
if cats not in df.columns:
raise RuntimeError(f"{cats} is not a column in the inputted data frame")
if color_column is not None and color_column not in df.columns:
raise RuntimeError(f"{color_column} is not a column in the inputted data frame")
cols = _cols_to_keep(cats, val, color_column, tooltips)
for col in cols:
if col not in df.columns:
raise RuntimeError(f"{col} is not a column in the inputted data frame")
bad_kwargs = ["x", "y", "source", "cat", "legend"]
if kwargs is not None and any([key in kwargs for key in bad_kwargs]):
raise RuntimeError(", ".join(bad_kwargs) + " are not allowed kwargs.")
if val == "cat":
raise RuntimeError("`'cat'` cannot be used as `val`.")
if val == "__label" or (cats == "__label" or (cats_array and "__label" in cats)):
raise RuntimeError("'__label' cannot be used for `val` or `cats`.")
return cols
def _outliers(data):
bottom, middle, top = np.percentile(data, [25, 50, 75])
iqr = top - bottom
outliers = data[(data > top + 1.5 * iqr) | (data < bottom - 1.5 * iqr)]
return outliers
def _box_and_whisker(data):
middle = data.median()
bottom = data.quantile(0.25)
top = data.quantile(0.75)
iqr = top - bottom
top_whisker = data[data <= top + 1.5 * iqr].max()
bottom_whisker = data[data >= bottom - 1.5 * iqr].min()
return pd.Series(
{
"middle": middle,
"bottom": bottom,
"top": top,
"top_whisker": top_whisker,
"bottom_whisker": bottom_whisker,
}
)
def _box_source(df, cats, val, cols):
"""Construct a data frame for making box plot."""
# Need to reset index for use in slicing outliers
df_source = df.reset_index(drop=True)
if type(cats) in [list, tuple]:
level = list(range(len(cats)))
else:
level = 0
if cats is None:
grouped = df_source
else:
grouped = df_source.groupby(cats)
# Data frame for boxes and whiskers
df_box = grouped[val].apply(_box_and_whisker).unstack().reset_index()
source_box = _cat_source(
df_box, cats, ["middle", "bottom", "top", "top_whisker", "bottom_whisker"], None
)
# Data frame for outliers
df_outliers = grouped[val].apply(_outliers).reset_index(level=level)
df_outliers[cols] = df_source.loc[df_outliers.index, cols]
source_outliers = _cat_source(df_outliers, cats, cols, None)
return source_box, source_outliers
def _ecdf_y(data, complementary=False):
"""Give y-values of an ECDF for an unsorted column in a data frame.
Parameters
----------
data : Pandas Series
Series (or column of a DataFrame) from which to generate ECDF
values
complementary : bool, default False
If True, give the ECCDF values.
Returns
-------
output : Pandas Series
Corresponding y-values for an ECDF when plotted with dots.
Notes
-----
.. This only works for plotting an ECDF with points, not for staircase
ECDFs
"""
if complementary:
return 1 - data.rank(method="first") / len(data) + 1 / len(data)
else:
return data.rank(method="first") / len(data)
def _point_ecdf_source(data, val, cats, cols, complementary, colored):
"""DataFrame for making point-wise ECDF."""
df = data.copy()
if complementary:
col = "__ECCDF"
else:
col = "__ECDF"
if cats is None or colored:
df[col] = _ecdf_y(df[val], complementary)
else:
df[col] = df.groupby(cats)[val].transform(_ecdf_y, complementary)
cols += [col]
return _cat_source(df, cats, cols, None)
def _ecdf_collection_dots(
df, val, cats, cols, complementary, order, palette, show_legend, y, p, **kwargs
):
_, _, color_factors = _get_cat_range(df, df.groupby(cats), order, None, False)
source = _point_ecdf_source(df, val, cats, cols, complementary, False)
if "color" not in kwargs:
kwargs["color"] = bokeh.transform.factor_cmap(
"cat", palette=palette, factors=color_factors
)
if show_legend:
kwargs["legend"] = "__label"
p.circle(source=source, x=val, y=y, **kwargs)
return p
def _ecdf_collection_staircase(
df, val, cats, complementary, order, palette, show_legend, p, **kwargs
):
grouped = df.groupby(cats)
color_not_in_kwargs = "color" not in kwargs
if order is None:
order = list(grouped.groups.keys())
grouped_iterator = [
(order_val, grouped.get_group(order_val)) for order_val in order
]
for i, g in enumerate(grouped_iterator):
if show_legend:
if type(g[0]) == tuple:
legend = ", ".join([str(c) for c in g[0]])
else:
legend = str(g[0])
else:
legend = None
if color_not_in_kwargs:
kwargs["color"] = palette[i % len(palette)]
ecdf(
g[1][val],
staircase=True,
p=p,
legend=legend,
complementary=complementary,
**kwargs,
)
return p
def _display_clicks(div, attributes=[], style="float:left;clear:left;font_size=0.5pt"):
"""Build a suitable CustomJS to display the current event
in the div model."""
return bokeh.models.CustomJS(
args=dict(div=div),
code="""
var attrs = %s; var args = [];
for (var i=0; i<attrs.length; i++ ) {
args.push(Number(cb_obj[attrs[i]]).toFixed(4));
}
var line = "<span style=%r>[" + args.join(", ") + "], </span>\\n";
var text = div.text.concat(line);
var lines = text.split("\\n")
if ( lines.length > 35 ) { lines.shift(); }
div.text = lines.join("\\n");
"""
% (attributes, style),
)
def _data_range(df, x, y, margin=0.02):
x_range = df[x].max() - df[x].min()
y_range = df[y].max() - df[y].min()
return (
[df[x].min() - x_range * margin, df[x].max() + x_range * margin],
[df[y].min() - y_range * margin, df[y].max() + y_range * margin],
)
def _create_points_image(x_range, y_range, w, h, df, x, y, cmap):
cvs = ds.Canvas(
x_range=x_range, y_range=y_range, plot_height=int(h), plot_width=int(w)
)
agg = cvs.points(df, x, y, agg=ds.reductions.count())
return ds.transfer_functions.dynspread(
ds.transfer_functions.shade(agg, cmap=cmap, how="linear")
)
def _create_line_image(x_range, y_range, w, h, df, x, y, cmap=None):
cvs = ds.Canvas(
x_range=x_range, y_range=y_range, plot_height=int(h), plot_width=int(w)
)
agg = cvs.line(df, x, y)
return ds.transfer_functions.dynspread(ds.transfer_functions.shade(agg, cmap=cmap))
def _contour_lines(X, Y, Z, levels):
"""
Generate lines for contour plot.
"""
# Compute the density levels.
Zflat = Z.flatten()
inds = np.argsort(Zflat)[::-1]
Zflat = Zflat[inds]
sm = np.cumsum(Zflat)
sm /= sm[-1]
V = np.empty(len(levels))
for i, v0 in enumerate(levels):
try:
V[i] = Zflat[sm <= v0][-1]
except:
V[i] = Zflat[0]
V.sort()
m = np.diff(V) == 0
while np.any(m):
V[np.where(m)[0][0]] *= 1.0 - 1e-4
m = np.diff(V) == 0
V.sort()
# Make contours
c = matplotlib._contour.QuadContourGenerator(X, Y, Z, None, True, 0)
xs = []
ys = []
for level in V:
paths = c.create_contour(level)
for line in paths:
xs.append(line[:, 0])
ys.append(line[:, 1])
return xs, ys
def contour_lines_from_samples(
x, y, smooth=0.02, levels=None, bins=50, weights=None, extend_domain=False
):
"""
Get lines for a contour plot from (x, y) samples.
Parameters
----------
x : array_like, shape (n,)
x-values of samples.
y : array_like, shape (n,)
y-values of samples.
smooth : float, default 0.02
Smoothing parameter for Gaussian smoothing of contour. A
Gaussian filter is applied with standard deviation given by
`smooth * bins`. If None, no smoothing is done.
levels : float, list of floats, or None
The levels of the contours. To enclose 95% of the samples, use
`levels=0.95`. If provided as a list, multiple levels are used.
If None, `levels` is approximated [0.12, 0.39, 0.68, 0.86].
bins : int, default 50
Binning of samples into square bins is necessary to construct
the contours. `bins` gives the number of bins in each direction.
weights : array_like, shape (n,), default None
Weights to apply to each sample in constructing the histogram.
Default is `None`, such that all samples are equally weighted.
extend_domain : bool, default False
If True, extend the domain of the contours beyond the domain
of the min and max of the samples. This can be useful if the
contours might clash with the edges of a plot.
Returns
-------
xs : list of arrays
Each array is the x-values for a plotted contour
ys : list of arrays
Each array is the y-values for a plotted contour
Notes
-----
.. The method proceeds as follows: the samples are binned. The
counts of samples landing in bins are thought of as values of a
function f(xb, yb), where (xb, yb) denotes the center of the
respective bins. This function is then optionally smoothed using
a Gaussian blur, and then the result is used to construct a
contour plot.
.. Based heavily on code from the corner package by Dan
Forman-Mackey.
"""
# The code in this function is based on the corner package by Dan Forman-Mackey.
# Following is the copyright notice from that pacakge.
#
# Copyright (c) 2013-2016 Daniel Foreman-Mackey
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
if type(bins) != int or bins <= 0:
raise ValueError("`bins` must be a positive integer.")
data_range = [[x.min(), x.max()], [y.min(), y.max()]]
# Choose the default "sigma" contour levels.
if levels is None:
levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5) ** 2)
elif type(levels) not in [list, tuple, np.ndarray]:
levels = [levels]
for level in levels:
if level <= 0 or level > 1:
raise ValueError("All level values must be between zero and one.")
# We'll make the 2D histogram to directly estimate the density.
try:
H, X, Y = np.histogram2d(
x.flatten(),
y.flatten(),
bins=bins,
range=list(map(np.sort, data_range)),
weights=weights,
)
except ValueError:
raise ValueError(
"2D histogram generation failed. It could be that one of your sampling ranges has no dynamic range."
)
if smooth is not None:
H = scipy.ndimage.gaussian_filter(H, smooth * bins)
# Compute the bin centers.
X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])
# Extend the array for the sake of the contours at the plot edges.
if extend_domain:
H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
H2[2:-2, 2:-2] = H
H2[2:-2, 1] = H[:, 0]
H2[2:-2, -2] = H[:, -1]
H2[1, 2:-2] = H[0]
H2[-2, 2:-2] = H[-1]
H2[1, 1] = H[0, 0]
H2[1, -2] = H[0, -1]
H2[-2, 1] = H[-1, 0]
H2[-2, -2] = H[-1, -1]
X2 = np.concatenate(
[
X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
X1,
X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
]
)
Y2 = np.concatenate(
[
Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
Y1,
Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
]
)
X2, Y2 = np.meshgrid(X2, Y2)
else:
X2, Y2 = np.meshgrid(X1, Y1)
H2 = H
return _contour_lines(X2, Y2, H2.transpose(), levels)
def _sample_pars_to_df(samples, pars):
"""Convert ArviZ InferenceData posterior results to a data frame"""
if pars is not None and type(pars) not in (list, tuple):
raise RuntimeError("`pars` must be a list or tuple.")
if pars is None:
var_names = None
else:
var_names = az_utils.purge_duplicates([_get_var_name(par) for par in pars])
sample_pars = list(samples.posterior.data_vars)
for var_name in var_names:
if var_name not in sample_pars:
raise RuntimeError(f"parameter {var_name} not in the input.")
df = stan.posterior_to_dataframe(samples, var_names=var_names)
if pars is None:
pars = [
col for col in df.columns if col not in ["chain__", "draw__", "divergent__"]
]
cols = df.columns
else:
cols = list(pars) + ["chain__", "draw__", "divergent__"]
return pars, df[cols].copy()
def _get_var_name(name):
"""Convert a parameter name to a var_name. Example: 'alpha[0,1]'
return 'alpha'."""
if name[-1] != "]":
return name
ind = name.rfind("[")
if ind == 0 or ind == len(name) - 1:
return name
substr = name[ind + 1 : -1]
if len(substr) == 0:
return name
if not substr[0].isdigit():
return name
if not substr[-1].isdigit():
return name
for char in substr:
if not (char.isdigit() or char == ","):
return name
if ",," in substr:
return name
return name[:ind]
def box(**kwargs):
raise RuntimeError(
"`bebi103` no longer supports box plots. Instead, use the bokeh-catplot package."
)
def boxwhisker(**kwargs):
raise RuntimeError(
"`bebi103` no longer supports box plots. Instead, use the bokeh-catplot package."
)
def jitter(**kwargs):
raise RuntimeError(
"`bebi103` no longer supports jitter plots. Instead, use the bokeh-catplot package."
)
def colored_ecdf(**kwargs):
raise RuntimeError(
"`bebi103` no longer supports colored ECDFs. Instead, use the bokeh-catplot package."
)
def ecdf_collection(**kwargs):
raise RuntimeError(
"`bebi103` no longer supports ECDF collections. Instead, use the bokeh-catplot package."
)
def colored_scatter(**kwargs):
raise RuntimeError(
"`bebi103` no longer supports colored scatter plots. Instead, use HoloViews."
)
def distribution_plot_app(**kwargs):
raise RuntimeError(
"`bebi103` no longer supports the distribution plot app. Instead, see https://github.com/justinbois/distribution-explorer-app."
)
def adjust_range(**kwargs):
raise RuntimeError(
"`bebi103` no longer supports the adjust_range. This feature is not part of HoloViews, available using `padding` kwargs for many plotting elements."
)
|
justinbois/bebi103_utils
|
bebi103/viz.py
|
Python
|
mit
| 91,620
|
[
"Gaussian"
] |
499f89c0d03a6a665e152e44adee807e8f26c94fc66b0fa06405ee1e701cda3e
|
#===============================================================================
# This file is part of TEMPy.
#
# TEMPy is a software designed to help the user in the manipulation
# and analyses of macromolecular assemblies using 3D electron microscopy maps.
#
# Copyright 2015 Birkbeck College University of London.
#
# Authors: Maya Topf, Daven Vasishtan, Arun Prasad Pandurangan,
# Irene Farabella, Agnel-Praveen Joseph, Harpal Sahota
#
# This software is made available under GPL V3 license
# http://www.gnu.org/licenses/gpl-3.0.html
#
#
# Please cite your use of TEMPy in published work:
#
# Farabella, I., Vasishtan, D., Joseph, A.P., Pandurangan, A.P., Sahota, H. & Topf, M. (2015). J. Appl. Cryst. 48.
#
#===============================================================================
from TEMPy.StructureBlurrer import StructureBlurrer
from TEMPy.ScoringFunctions import ScoringFunctions
from TEMPy.Cluster import Cluster
from numpy import zeros,mean,median,asarray
from scipy.stats import mode
import sys
from collections import defaultdict
class Consensus:
"""A class to clustering an ensemble of structure instance"""
def __init__(self):
pass
def _makedict_value(self,rankCCC):
"""
private function used in Consensus Module.
"""
#print rankCCC
rank_dict={}
for r in rankCCC:
rank_dict[r[0]]=r[2]
return rank_dict
def _makedict(self,rank_score):
"""
private function used in Consensus Module.
"""
namerank_score=[mod[0] for mod in rank_score]
d_rank={i:j for i,j in enumerate(namerank_score,start=1)}
return d_rank
def _makedict_list(self,list_score):
"""
private function used in Consensus Module.
"""
#print enumerate(rankCCC)
d_rank={i:j for i,j in list_score}
return d_rank
def _printdict(self,dict_score):
"""
private function used in Consensus Module.
"""
for k,v in list(dict_score.items()):
print(k,v)
def _modes(self,values):
"""
private function used in Consensus Module.
"""
count = defaultdict(int)
for v in values:
count[v] +=1
best = max(count.values())
print([k for k,v in list(count.items()) if v == best])
def _mode_here(self,arr):
"""
private function used in Consensus Module.
"""
m = max([arr.count(a) for a in arr])
print([x for x in arr if arr.count(x) == m][0] if m>1 else None)
def vote_mode(self,ensemble_list,score_list,res_target_map,sigma_coeff,number_top_mod=0,write=False,targetMap=False):
"""
Mode consensus scoring calculation between multiple "fits" using a user defined set of scores.
Arguments:
*ensemble_list*
Input list of Structure Instances.
*score_list*
Input list of scoring function to use.
See ScoringFunctions class for a list of the available Scoring Function.
E.g. set score='CCC' to use the Cross-correlation coefficient.
Score option are:
i 'CCC' - Cross-correlation coefficient;
ii 'LAP' - Laplacian-filtered cross-correlation coefficient: useful for maps with resolutions worse than 10-15 A;
iii 'MI' - Mutual information score: a good and robust score but relatively slow to calculate;
iv 'ENV' - Envelope score: the fastest score to calculate due to binarisation of the map.
v-vii 'NV','NV_Sobel','NV_Laplace'- Normal vector score: a vector-based surface superimposition score with or without Sobel/Laplace filter.
viii 'CD' - Chamfer Distance: a score used in computer vision algorithms as a fast similarity metric
*res_target_map*
the resolution, in Angstroms, of the target Map.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*number_top_mod*
Number of Fits to cluster. Default is all.
*write*
True will write out a file that contains the list of the structure instances representing different fits scored and clustered.
note the lrms column is the Calpha RMSD of each fit from the first fit in its class
*targetMap*
Target Map Instance.
"""
cluster=Cluster()
list_dict=[]
if targetMap==False:
#targetMap = self.protMap(prot, min(resolution/4., 3.5), resolution)
print("WARNING:Need target map")
sys.exit()
score_select=[]
for score in score_list:
#check if score chosen are correct
if score not in ['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']:
print('Incorrect Scoring Function: %s' % score)
print('Please select from one of the following scoring functions: %s' % ', '.join(['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']))
sys.exit()
if score not in score_select:
score_select.append(score)
else:
print('Chose the %s twice' % score)
sys.exit()
for score in score_list:
print("******",score)
if score=='CCC':
rankCCC=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictCCC=Consensus()._makedict(rankCCC)
list_dict.append(dictCCC)
Consensus()._printdict(dictCCC)
elif score=='LAP':
rankLAP=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictLAP=Consensus()._makedict(rankLAP)
list_dict.append(dictLAP)
Consensus()._printdict(dictLAP)
elif score=='MI':
rankMI=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictMI=Consensus()._makedict(rankMI)
list_dict.append(dictMI)
Consensus()._printdict(dictMI)
elif score=='NV':
rankNV=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNV=Consensus()._makedict(rankNV)
list_dict.append(dictNV)
Consensus()._printdict(dictNV)
elif score=='NV_Sobel':
rankNVS=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNVS=Consensus()._makedict(rankNVS)
list_dict.append(dictNVS)
Consensus()._printdict(dictNVS)
elif score=='NV_Laplace':
rankNVL=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNVL=Consensus()._makedict(rankNVL)
list_dict.append(dictNVL)
Consensus()._printdict(dictNVL)
elif score=='ENV':
rankENV=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictENV=Consensus()._makedict(rankENV)
list_dict.append(dictENV)
Consensus()._printdict(dictENV)
if score=='CD':
rankCD=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictCD=Consensus()._makedict(rankCD)
list_dict.append(dictCD)
Consensus()._printdict(dictCD)
dict_count={}
mxcinsensus = zeros(shape=(7,number_top_mod))
for k,v in list(list_dict[0].items()):
dict_count[v]=[]
for k in dict_count:
for num in range(len(list_dict)):
for k2,v2 in list(list_dict[num].items()):
if k == v2:
dict_count[k].append(k2)
dict_out={}
for k,v in list(dict_count.items()):
median_list=median(v)
m = max([v.count(a) for a in v])
if m>1:
mode_list=[x for x in v if v.count(x) == m][0]
dict_out[k]=[median_list,mode_list]
else:
pass
mode_list=max(set(v), key=v.count)
sorted_dict = sorted(list(dict_out.items()), key=lambda x: x[1])
print("**************")
print("Consensus rank")
for fit in sorted_dict:
print(fit[1],fit[0])
return sorted_dict
def _borda_score(self,list_rank,candidate,voters):
"""
private function used in vote function.
It calculates the Borda count is a single-winner election method in which voters rank candidates in order of preference.
"""
score=0
for r in list_rank:
score+=(candidate-r)*voters
return score
def vote(self,ensemble_list,score_list,res_target_map,sigma_coeff,number_top_mod=0,write=False,targetMap=False):
"""
Borda consensus scoring calculation between multiple "fits" using a user defined set of scores.
The Borda count is a single-winner election method in which voters rank candidates in order of preference.
Arguments:
*ensemble_list*
Input list of Structure Instances.
*score_list*
Input list of scoring function to use.
See ScoringFunctions class for a list of the available Scoring Function.
E.g. set score='CCC' to use the Cross-correlation coefficient.
Score option are:
i 'CCC' - Cross-correlation coefficient;
ii 'LAP' - Laplacian-filtered cross-correlation coefficient: useful for maps with resolutions worse than 10-15 A;
iii 'MI' - Mutual information score: a good and robust score but relatively slow to calculate;
iv 'ENV' - Envelope score: the fastest score to calculate due to binarisation of the map.
v-vii 'NV','NV_Sobel','NV_Laplace'- Normal vector score: a vector-based surface superimposition score with or without Sobel/Laplace filter.
viii 'CD' - Chamfer Distance: a score used in computer vision algorithms as a fast similarity metric
*res_target_map*
the resolution, in Angstroms, of the target Map.
*sigma_coeff*
the sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.225R which makes the Fourier transform of the distribution fall to 1/e of its maximum value at wavenumber 1/resolution, the default in Chimera (Petterson et al, 2004)
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, an option in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*number_top_mod*
Number of Fits to cluster. Default is all.
*write*
True will write out a file that contains the list of the structure instances representing different fits scored and clustered.
note the lrms column is the Calpha RMSD of each fit from the first fit in its class
*targetMap*
Target Map Instance.
"""
cluster=Cluster()
list_dict=[]
candidate=len(ensemble_list)
voters=len(score_list)
if targetMap==False:
#targetMap = self.protMap(prot, min(resolution/4., 3.5), resolution)
print("WARNING:Need target map")
sys.exit()
score_select=[]
for score in score_list:
#check if score chosen are correct
if score not in ['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']:
print('Incorrect Scoring Function: %s' % score)
print('Please select from one of the following scoring functions: %s' % ', '.join(['CCC','LAP','MI','NV','NV_Sobel','NV_Laplace','ENV','CD']))
sys.exit()
if score not in score_select:
score_select.append(score)
else:
print('Chose the %s twice' % score)
sys.exit()
for score in score_list:
print("******",score)
if score=='CCC':
rankCCC=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
#print rankCCC
dictCCC=Consensus()._makedict(rankCCC)
list_dict.append(dictCCC)
Consensus()._printdict(dictCCC)
elif score=='LAP':
rankLAP=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictLAP=Consensus()._makedict(rankLAP)
list_dict.append(dictLAP)
Consensus()._printdict(dictLAP)
elif score=='MI':
rankMI=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictMI=Consensus()._makedict(rankMI)
list_dict.append(dictMI)
Consensus()._printdict(dictMI)
elif score=='NV':
rankNV=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNV=Consensus()._makedict(rankNV)
list_dict.append(dictNV)
Consensus()._printdict(dictNV)
for i in rankNV:
print(i[0],i[2])
elif score=='NV_Sobel':
rankNVS=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNVS=Consensus()._makedict(rankNVS)
list_dict.append(dictNVS)
Consensus()._printdict(dictNVS)
elif score=='NV_Laplace':
rankNVL=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictNVL=Consensus()._makedict(rankNVL)
list_dict.append(dictNVL)
Consensus()._printdict(dictNVL)
elif score=='ENV':
rankENV=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictENV=Consensus()._makedict(rankENV)
list_dict.append(dictENV)
Consensus()._printdict(dictENV)
if score=='CD':
rankCD=cluster.rank_fit_ensemble(ensemble_list,score,res_target_map,sigma_coeff,number_top_mod=number_top_mod,targetMap=targetMap.copy())
dictCD=Consensus()._makedict(rankCD)
list_dict.append(dictCD)
Consensus()._printdict(dictCD)
dict_count={}
#dict with [0,0,0,0,0,0,0,score_mod] possibility CCC,MI ... so keep order. add a colum for 'goal done -goal concived'
#in our case how many time is 1st.
#for d_rank in list_dict:
#sorted_dict = sorted(dict.items(), key=lambda x: x[1])
#print sorted_dict
mxcinsensus = zeros(shape=(7,number_top_mod))
for k,v in list(list_dict[0].items()):
dict_count[v]=[]
for k in dict_count:
#print 'k',k
for num in range(len(list_dict)):
for k2,v2 in list(list_dict[num].items()):
if k == v2:
dict_count[k].append(k2)
dict_out={}
for k,v in list(dict_count.items()):
# print k
# print sum(v)
#print mean(v)
median_list=median(v)
#print mode(v)
#most_frequent=mode(v)[0][0]
#print 'mode most freq',most_frequent
#Consensus().modes(v)
#Consensus().mode_here(v)
#print v
borda_score=Consensus()._borda_score(v,candidate,voters)
dict_out[k]=[borda_score,v]
sorted_dict = sorted(list(dict_out.items()), key=lambda x: x[1][0],reverse=True)
print("**************")
print("Consensus rank")
line=''
line+="Borda_score\t"
for score in score_list:
line+='%s\t'%score
line+="Fit\n"
count=0
for fit in sorted_dict:
count+=1
line+='%s\t'%count
b=fit[1][0]
line+='%s\t'%b
for s in fit[1][1]:
line+='%s\t'%s
m=fit[0]
line+='%s\n'%m
print(line)
return sorted_dict
#need to make it more elegant this come from private scripting.
def vote_list(self,score_lists):
"""
Borda consensus scoring calculation between multiple "fits" using a user defined set of scores.
The Borda count is a single-winner election method in which voters rank candidates in order of preference.
Arguments:
*ensemble_list*
Input list of Structure Instances.
*score_list*
Input list of list. Each list is a list of Structure Instances associated with a score.
"""
dict_count={}
list_dict=[]
candidate=[]
voters=len(score_lists)
for i in score_lists:
candidate.append(len(i))
for list_score in score_lists:
dictScore=Consensus()._makedict(list_score)
list_dict.append(dictScore)
for k,v in list(list_dict[0].items()):
dict_count[v]=[]
for k in dict_count:
#print 'k',k
for num in range(len(list_dict)):
for k2,v2 in list(list_dict[num].items()):
if k == v2:
dict_count[k].append(k2)
dict_out={}
for k,v in list(dict_count.items()):
#print v
#v = asarray(v)
#print v
#median_list=median(v)
borda_score=Consensus()._borda_score(v,candidate[0],voters)
dict_out[k]=[borda_score]
sorted_dict = sorted(list(dict_out.items()), key=lambda x: x[1][0],reverse=True)
print("**************")
print("Consensus rank")
line=''
line+="Borda_score\t"
count=0
for score in score_lists:
count+=1
line+='%s\t'%count
line+="Fit\n"
count=0
for fit in sorted_dict:
count+=1
line+='%s\t'%count
b=fit[1][0]
line+='%s\t'%b
for s in fit[1][1]:
line+='%s\t'%s
m=fit[0]
line+='%s\n'%m
print(line)
return sorted_dict
# def vote_list(self,score_lists):
#
# dict_count={}
# list_dict=[]
# for list_score in score_lists:
# dictScore=Consensus()._makedict_list(list_score)
# list_dict.append(dictScore)
# for k,v in list_dict[0].items():
# dict_count[v]=[]
# for k in dict_count:
# for num in range(len(list_dict)):
# for k2,v2 in list_dict[num].items():
# if k == v2:
# dict_count[k].append(k2)
# dict_out={}
# for k,v in dict_count.items():
# # print k
# # print sum(v)
# #print mean(v)
# #print median(v)
# most_frequent=mode(v)[0][0]
# dict_out[k]=most_frequent
# sorted_dict = sorted(dict_out.items(), key=lambda x: x[1])
# print "**************"
# print "Consensus rank"
# for fit in sorted_dict:
# print fit[1],fit[0]
# return sorted_dict
#
|
OniDaito/ChimeraXTempy
|
TEMPy/Consensus.py
|
Python
|
mit
| 24,096
|
[
"Gaussian"
] |
94340f6d31d3c47b29a5052713cc8aa463393b29e13eada40e1eb449f43407a9
|
from django import forms
from django.contrib.admin.widgets import AdminRadioSelect, AdminRadioFieldRenderer
from edc_base.form.forms import BaseModelForm
from microbiome.apps.mb.choices import VISIT_REASON, VISIT_INFO_SOURCE, MATERNAL_VISIT_STUDY_STATUS, INFO_PROVIDER
from ..models import MaternalVisit, MaternalConsent
from edc_constants.constants import ON_STUDY, MISSED_VISIT
from edc_visit_tracking.forms import VisitFormMixin
class MaternalVisitForm (VisitFormMixin, BaseModelForm):
participant_label = 'mother'
study_status = forms.ChoiceField(
label='What is the mother\'s current study status',
choices=MATERNAL_VISIT_STUDY_STATUS,
initial=ON_STUDY,
help_text="",
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
reason = forms.ChoiceField(
label='Reason for visit',
choices=[choice for choice in VISIT_REASON],
help_text="",
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
info_source = forms.ChoiceField(
label='Source of information',
required=False,
choices=[choice for choice in VISIT_INFO_SOURCE],
widget=AdminRadioSelect(renderer=AdminRadioFieldRenderer))
def clean(self):
cleaned_data = super(MaternalVisitForm, self).clean()
instance = None
if self.instance.id:
instance = self.instance
else:
instance = MaternalVisit(**self.cleaned_data)
instance.subject_failed_eligibility(forms.ValidationError)
return cleaned_data
class Meta:
model = MaternalVisit
fields = '__all__'
|
botswana-harvard/microbiome
|
microbiome/apps/mb_maternal/forms/maternal_visit_form.py
|
Python
|
gpl-2.0
| 1,637
|
[
"VisIt"
] |
9708c0feb088fdd3ee1e587e9553718079976b7c33afb776c51c06090381789e
|
""" Python test discovery, setup and run of test functions. """
import enum
import fnmatch
import inspect
import itertools
import os
import sys
import typing
import warnings
from collections import Counter
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from typing import Callable
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
import py
import _pytest
from _pytest import fixtures
from _pytest import nodes
from _pytest._code import filter_traceback
from _pytest._code.code import ExceptionInfo
from _pytest._code.source import getfslineno
from _pytest._io import TerminalWriter
from _pytest._io.saferepr import saferepr
from _pytest.compat import ascii_escaped
from _pytest.compat import get_default_arg_names
from _pytest.compat import get_real_func
from _pytest.compat import getimfunc
from _pytest.compat import getlocation
from _pytest.compat import is_generator
from _pytest.compat import iscoroutinefunction
from _pytest.compat import NOTSET
from _pytest.compat import REGEX_TYPE
from _pytest.compat import safe_getattr
from _pytest.compat import safe_isclass
from _pytest.compat import STRING_TYPES
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.deprecated import FUNCARGNAMES
from _pytest.fixtures import FuncFixtureInfo
from _pytest.mark import MARK_GEN
from _pytest.mark import ParameterSet
from _pytest.mark.structures import get_unpacked_marks
from _pytest.mark.structures import Mark
from _pytest.mark.structures import normalize_mark_list
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.pathlib import parts
from _pytest.warning_types import PytestCollectionWarning
from _pytest.warning_types import PytestUnhandledCoroutineWarning
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(__import__("pytest"), name))
if node is not None:
return node.obj
doc = "python {} object this node was collected from (can be None).".format(
name.lower()
)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--fixtures",
"--funcargs",
action="store_true",
dest="showfixtures",
default=False,
help="show available fixtures, sorted by plugin appearance "
"(fixtures with leading '_' are only shown with '-v')",
)
group.addoption(
"--fixtures-per-test",
action="store_true",
dest="show_fixtures_per_test",
default=False,
help="show fixtures per test",
)
parser.addini(
"python_files",
type="args",
# NOTE: default is also used in AssertionRewritingHook.
default=["test_*.py", "*_test.py"],
help="glob-style file patterns for Python test module discovery",
)
parser.addini(
"python_classes",
type="args",
default=["Test"],
help="prefixes or glob names for Python test class discovery",
)
parser.addini(
"python_functions",
type="args",
default=["test"],
help="prefixes or glob names for Python test function and method discovery",
)
parser.addini(
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
type="bool",
default=False,
help="disable string escape non-ascii characters, might cause unwanted "
"side effects(use at your own risk)",
)
group.addoption(
"--import-mode",
default="prepend",
choices=["prepend", "append"],
dest="importmode",
help="prepend/append to sys.path when importing test modules, "
"default is to prepend.",
)
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
if config.option.show_fixtures_per_test:
show_fixtures_per_test(config)
return 0
def pytest_generate_tests(metafunc: "Metafunc") -> None:
for marker in metafunc.definition.iter_markers(name="parametrize"):
# TODO: Fix this type-ignore (overlapping kwargs).
metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) # type: ignore[misc] # noqa: F821
def pytest_configure(config):
config.addinivalue_line(
"markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see https://docs.pytest.org/en/latest/parametrize.html for more info "
"and examples.",
)
config.addinivalue_line(
"markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see "
"https://docs.pytest.org/en/latest/fixture.html#usefixtures ",
)
def async_warn(nodeid: str) -> None:
msg = "async def functions are not natively supported and have been skipped.\n"
msg += (
"You need to install a suitable plugin for your async framework, for example:\n"
)
msg += " - pytest-asyncio\n"
msg += " - pytest-trio\n"
msg += " - pytest-tornasync\n"
msg += " - pytest-twisted"
warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid)))
skip(msg="async def function and no async plugin installed (see warnings)")
@hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem: "Function"):
testfunction = pyfuncitem.obj
try:
# ignoring type as the import is invalid in py37 and mypy thinks its a error
from unittest import IsolatedAsyncioTestCase # type: ignore
except ImportError:
async_ok_in_stdlib = False
else:
async_ok_in_stdlib = isinstance(
getattr(testfunction, "__self__", None), IsolatedAsyncioTestCase
)
if (
iscoroutinefunction(testfunction)
or (sys.version_info >= (3, 6) and inspect.isasyncgenfunction(testfunction))
) and not async_ok_in_stdlib:
async_warn(pyfuncitem.nodeid)
funcargs = pyfuncitem.funcargs
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
result = testfunction(**testargs)
if hasattr(result, "__await__") or hasattr(result, "__aiter__"):
if async_ok_in_stdlib:
# todo: investigate moving this to the unittest plugin
# by a test call result hook
testcase = testfunction.__self__
testcase._callMaybeAsync(lambda: result)
else:
async_warn(pyfuncitem.nodeid)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
if not path_matches_patterns(
path, parent.config.getini("python_files") + ["__init__.py"]
):
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def path_matches_patterns(path, patterns):
"""Returns True if the given py.path.local matches one of the patterns in the list of globs given"""
return any(path.fnmatch(pattern) for pattern in patterns)
def pytest_pycollect_makemodule(path, parent):
if path.basename == "__init__.py":
return Package.from_parent(parent, fspath=path)
return Module.from_parent(parent, fspath=path)
@hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
return
# nothing was collected elsewhere, let's do it here
if safe_isclass(obj):
if collector.istestclass(obj, name):
outcome.force_result(Class.from_parent(collector, name=name, obj=obj))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
# We need to try and unwrap the function if it's a functools.partial
# or a functools.wrapped.
# We mustn't if it's been wrapped with mock.patch (python 2 only)
if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))):
filename, lineno = getfslineno(obj)
warnings.warn_explicit(
message=PytestCollectionWarning(
"cannot collect %r because it is not a function." % name
),
category=None,
filename=str(filename),
lineno=lineno + 1,
)
elif getattr(obj, "__test__", True):
if is_generator(obj):
res = Function.from_parent(collector, name=name)
reason = "yield tests were removed in pytest 4.0 - {name} will be ignored".format(
name=name
)
res.add_marker(MARK_GEN.xfail(run=False, reason=reason))
res.warn(PytestCollectionWarning(reason))
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
class PyobjMixin:
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
_ALLOW_MARKERS = True
@property
def obj(self):
"""Underlying Python object."""
obj = getattr(self, "_obj", None)
if obj is None:
self._obj = obj = self._getobj()
# XXX evil hack
# used to avoid Instance collector marker duplication
if self._ALLOW_MARKERS:
self.own_markers.extend(get_unpacked_marks(self.obj))
return obj
@obj.setter
def obj(self, value):
self._obj = value
def _getobj(self):
"""Gets the underlying Python object. May be overwritten by subclasses."""
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
name = os.path.splitext(name)[0]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
return ".".join(parts)
def reportinfo(self) -> Tuple[Union[py.path.local, str], int, str]:
# XXX caching?
obj = self.obj
compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None)
if isinstance(compat_co_firstlineno, int):
# nose compatibility
file_path = sys.modules[obj.__module__].__file__
if file_path.endswith(".pyc"):
file_path = file_path[:-1]
fspath = file_path # type: Union[py.path.local, str]
lineno = compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, nodes.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option("python_functions", name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
# We explicitly check for "is True" here to not mistakenly treat
# classes with a custom __getattr__ returning something truthy (like a
# function) as test classes.
return safe_getattr(obj, "__test__", False) is True
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option("python_classes", name)
def istestfunction(self, obj, name):
if self.funcnamefilter(name) or self.isnosetest(obj):
if isinstance(obj, staticmethod):
# static methods need to be unwrapped
obj = safe_getattr(obj, "__func__", False)
return (
safe_getattr(obj, "__call__", False)
and fixtures.getfixturemarker(obj) is None
)
else:
return False
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch(
name, option
):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, "__dict__", {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
values = []
for dic in dicts:
for name, obj in list(dic.items()):
if name in seen:
continue
seen[name] = True
res = self._makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
values.extend(res)
def sort_key(item):
fspath, lineno, _ = item.reportinfo()
return (str(fspath), lineno)
values.sort(key=sort_key)
return values
def _makeitem(self, name, obj):
# assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
fm = self.session._fixturemanager
definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj)
fixtureinfo = definition._fixtureinfo
metafunc = Metafunc(
definition, fixtureinfo, self.config, cls=cls, module=module
)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc))
if not metafunc._calls:
yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
# add_funcarg_pseudo_fixture_def may have shadowed some fixtures
# with direct parametrization, so make sure we update what the
# function really needs.
fixtureinfo.prune_dependency_tree()
for callspec in metafunc._calls:
subname = "{}[{}]".format(name, callspec.id)
yield Function.from_parent(
self,
name=subname,
callspec=callspec,
callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id: True},
originalname=name,
)
class Module(nodes.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._importtestmodule()
def collect(self):
self._inject_setup_module_fixture()
self._inject_setup_function_fixture()
self.session._fixturemanager.parsefactories(self)
return super().collect()
def _inject_setup_module_fixture(self):
"""Injects a hidden autouse, module scoped fixture into the collected module object
that invokes setUpModule/tearDownModule if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_module = _get_first_non_fixture_func(
self.obj, ("setUpModule", "setup_module")
)
teardown_module = _get_first_non_fixture_func(
self.obj, ("tearDownModule", "teardown_module")
)
if setup_module is None and teardown_module is None:
return
@fixtures.fixture(autouse=True, scope="module")
def xunit_setup_module_fixture(request):
if setup_module is not None:
_call_with_optional_argument(setup_module, request.module)
yield
if teardown_module is not None:
_call_with_optional_argument(teardown_module, request.module)
self.obj.__pytest_setup_module = xunit_setup_module_fixture
def _inject_setup_function_fixture(self):
"""Injects a hidden autouse, function scoped fixture into the collected module object
that invokes setup_function/teardown_function if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",))
teardown_function = _get_first_non_fixture_func(
self.obj, ("teardown_function",)
)
if setup_function is None and teardown_function is None:
return
@fixtures.fixture(autouse=True, scope="function")
def xunit_setup_function_fixture(request):
if request.instance is not None:
# in this case we are bound to an instance, so we need to let
# setup_method handle this
yield
return
if setup_function is not None:
_call_with_optional_argument(setup_function, request.function)
yield
if teardown_function is not None:
_call_with_optional_argument(teardown_function, request.function)
self.obj.__pytest_setup_function = xunit_setup_function_fixture
def _importtestmodule(self):
# we assume we are only called once per module
importmode = self.config.getoption("--import-mode")
try:
mod = self.fspath.pyimport(ensuresyspath=importmode)
except SyntaxError:
raise self.CollectError(ExceptionInfo.from_current().getrepr(style="short"))
except self.fspath.ImportMismatchError as e:
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules" % e.args
)
except ImportError:
exc_info = ExceptionInfo.from_current()
if self.config.getoption("verbose") < 2:
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
exc_repr = (
exc_info.getrepr(style="short")
if exc_info.traceback
else exc_info.exconly()
)
formatted_tb = str(exc_repr)
raise self.CollectError(
"ImportError while importing test module '{fspath}'.\n"
"Hint: make sure your test modules/packages have valid Python names.\n"
"Traceback:\n"
"{traceback}".format(fspath=self.fspath, traceback=formatted_tb)
)
except _pytest.runner.Skipped as e:
if e.allow_module_level:
raise
raise self.CollectError(
"Using pytest.skip outside of a test is not allowed. "
"To decorate a test function, use the @pytest.mark.skip "
"or @pytest.mark.skipif decorators instead, and to skip a "
"module use `pytestmark = pytest.mark.{skip,skipif}."
)
self.config.pluginmanager.consider_module(mod)
return mod
class Package(Module):
def __init__(
self,
fspath: py.path.local,
parent: nodes.Collector,
# NOTE: following args are unused:
config=None,
session=None,
nodeid=None,
) -> None:
# NOTE: could be just the following, but kept as-is for compat.
# nodes.FSCollector.__init__(self, fspath, parent=parent)
session = parent.session
nodes.FSCollector.__init__(
self, fspath, parent=parent, config=config, session=session, nodeid=nodeid
)
self.name = fspath.dirname
def setup(self):
# not using fixtures to call setup_module here because autouse fixtures
# from packages are not called automatically (#4085)
setup_module = _get_first_non_fixture_func(
self.obj, ("setUpModule", "setup_module")
)
if setup_module is not None:
_call_with_optional_argument(setup_module, self.obj)
teardown_module = _get_first_non_fixture_func(
self.obj, ("tearDownModule", "teardown_module")
)
if teardown_module is not None:
func = partial(_call_with_optional_argument, teardown_module, self.obj)
self.addfinalizer(func)
def gethookproxy(self, fspath: py.path.local):
return super()._gethookproxy(fspath)
def isinitpath(self, path):
return path in self.session._initialpaths
def collect(self):
this_path = self.fspath.dirpath()
init_module = this_path.join("__init__.py")
if init_module.check(file=1) and path_matches_patterns(
init_module, self.config.getini("python_files")
):
yield Module.from_parent(self, fspath=init_module)
pkg_prefixes = set()
for path in this_path.visit(rec=self._recurse, bf=True, sort=True):
# We will visit our own __init__.py file, in which case we skip it.
is_file = path.isfile()
if is_file:
if path.basename == "__init__.py" and path.dirpath() == this_path:
continue
parts_ = parts(path.strpath)
if any(
pkg_prefix in parts_ and pkg_prefix.join("__init__.py") != path
for pkg_prefix in pkg_prefixes
):
continue
if is_file:
yield from self._collectfile(path)
elif not path.isdir():
# Broken symlink or invalid/missing file.
continue
elif path.join("__init__.py").check(file=1):
pkg_prefixes.add(path)
def _call_with_optional_argument(func, arg):
"""Call the given function with the given argument if func accepts one argument, otherwise
calls func without arguments"""
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
if arg_count:
func(arg)
else:
func()
def _get_first_non_fixture_func(obj, names):
"""Return the attribute from the given object to be used as a setup/teardown
xunit-style function, but only if not marked as a fixture to
avoid calling it twice.
"""
for name in names:
meth = getattr(obj, name, None)
if meth is not None and fixtures.getfixturemarker(meth) is None:
return meth
class Class(PyCollector):
""" Collector for test methods. """
@classmethod
def from_parent(cls, parent, *, name, obj=None):
"""
The public constructor
"""
return super().from_parent(name=name, parent=parent)
def collect(self):
if not safe_getattr(self.obj, "__test__", True):
return []
if hasinit(self.obj):
self.warn(
PytestCollectionWarning(
"cannot collect test class %r because it has a "
"__init__ constructor (from: %s)"
% (self.obj.__name__, self.parent.nodeid)
)
)
return []
elif hasnew(self.obj):
self.warn(
PytestCollectionWarning(
"cannot collect test class %r because it has a "
"__new__ constructor (from: %s)"
% (self.obj.__name__, self.parent.nodeid)
)
)
return []
self._inject_setup_class_fixture()
self._inject_setup_method_fixture()
return [Instance.from_parent(self, name="()")]
def _inject_setup_class_fixture(self):
"""Injects a hidden autouse, class scoped fixture into the collected class object
that invokes setup_class/teardown_class if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",))
teardown_class = getattr(self.obj, "teardown_class", None)
if setup_class is None and teardown_class is None:
return
@fixtures.fixture(autouse=True, scope="class")
def xunit_setup_class_fixture(cls):
if setup_class is not None:
func = getimfunc(setup_class)
_call_with_optional_argument(func, self.obj)
yield
if teardown_class is not None:
func = getimfunc(teardown_class)
_call_with_optional_argument(func, self.obj)
self.obj.__pytest_setup_class = xunit_setup_class_fixture
def _inject_setup_method_fixture(self):
"""Injects a hidden autouse, function scoped fixture into the collected class object
that invokes setup_method/teardown_method if either or both are available.
Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with
other fixtures (#517).
"""
setup_method = _get_first_non_fixture_func(self.obj, ("setup_method",))
teardown_method = getattr(self.obj, "teardown_method", None)
if setup_method is None and teardown_method is None:
return
@fixtures.fixture(autouse=True, scope="function")
def xunit_setup_method_fixture(self, request):
method = request.function
if setup_method is not None:
func = getattr(self, "setup_method")
_call_with_optional_argument(func, method)
yield
if teardown_method is not None:
func = getattr(self, "teardown_method")
_call_with_optional_argument(func, method)
self.obj.__pytest_setup_method = xunit_setup_method_fixture
class Instance(PyCollector):
_ALLOW_MARKERS = False # hack, destroy later
# instances share the object with their parents in a way
# that duplicates markers instances if not taken out
# can be removed at node structure reorganization time
def _getobj(self):
return self.parent.obj()
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super().collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
def hasinit(obj):
init = getattr(obj, "__init__", None)
if init:
return init != object.__init__
def hasnew(obj):
new = getattr(obj, "__new__", None)
if new:
return new != object.__new__
class CallSpec2:
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._arg2scopenum = {} # used for sorting parametrized resources
self.marks = []
self.indices = {}
def copy(self):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.marks.extend(self.marks)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate {!r}".format(arg))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
raise ValueError(name)
@property
def id(self):
return "-".join(map(str, self._idlist))
def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index):
for arg, val in zip(argnames, valset):
self._checkargnotcontained(arg)
valtype_for_arg = valtypes[arg]
getattr(self, valtype_for_arg)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
self._idlist.append(id)
self.marks.extend(normalize_mark_list(marks))
class Metafunc:
"""
Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
"""
def __init__(
self,
definition: "FunctionDefinition",
fixtureinfo: fixtures.FuncFixtureInfo,
config: Config,
cls=None,
module=None,
) -> None:
self.definition = definition
#: access to the :class:`_pytest.config.Config` object for the test session
self.config = config
#: the module object where the test function is defined in.
self.module = module
#: underlying python test function
self.function = definition.obj
#: set of fixture names required by the test function
self.fixturenames = fixtureinfo.names_closure
#: class object where the test function is defined in or ``None``.
self.cls = cls
self._calls = [] # type: List[CallSpec2]
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
def parametrize(
self,
argnames: Union[str, List[str], Tuple[str, ...]],
argvalues: Iterable[Union[ParameterSet, typing.Sequence[object], object]],
indirect: Union[bool, typing.Sequence[str]] = False,
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
Callable[[object], Optional[object]],
]
] = None,
scope: "Optional[str]" = None,
*,
_param_mark: Optional[Mark] = None
) -> None:
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: The list of argnames or boolean. A list of arguments'
names (subset of argnames). If True the list contains all names from
the argnames. Each argvalue corresponding to an argname in this list will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: sequence of (or generator for) ids for ``argvalues``,
or a callable to return part of the id for each argvalue.
With sequences (and generators like ``itertools.count()``) the
returned ids should be of type ``string``, ``int``, ``float``,
``bool``, or ``None``.
They are mapped to the corresponding index in ``argvalues``.
``None`` means to use the auto-generated id.
If it is a callable it will be called for each entry in
``argvalues``, and the return value is used as part of the
auto-generated id for the whole set (where parts are joined with
dashes ("-")).
This is useful to provide more specific ids for certain items, e.g.
dates. Returning ``None`` will use an auto-generated id.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
from _pytest.fixtures import scope2index
argnames, parameters = ParameterSet._for_parametrize(
argnames,
argvalues,
self.function,
self.config,
function_definition=self.definition,
)
del argvalues
if "request" in argnames:
fail(
"'request' is a reserved name and cannot be used in @pytest.mark.parametrize",
pytrace=False,
)
if scope is None:
scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
self._validate_if_using_arg_names(argnames, indirect)
arg_values_types = self._resolve_arg_value_types(argnames, indirect)
self._validate_explicit_parameters(argnames, indirect)
# Use any already (possibly) generated ids with parametrize Marks.
if _param_mark and _param_mark._param_ids_from:
generated_ids = _param_mark._param_ids_from._param_ids_generated
if generated_ids is not None:
ids = generated_ids
ids = self._resolve_arg_ids(argnames, ids, parameters, item=self.definition)
# Store used (possibly generated) ids with parametrize Marks.
if _param_mark and _param_mark._param_ids_from and generated_ids is None:
object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids)
scopenum = scope2index(
scope, descr="parametrize() call in {}".format(self.function.__name__)
)
# create the new calls: if we are parametrize() multiple times (by applying the decorator
# more than once) then we accumulate those calls generating the cartesian product
# of all calls
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)):
newcallspec = callspec.copy()
newcallspec.setmulti2(
arg_values_types,
argnames,
param_set.values,
param_id,
param_set.marks,
scopenum,
param_index,
)
newcalls.append(newcallspec)
self._calls = newcalls
def _resolve_arg_ids(
self,
argnames: typing.Sequence[str],
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
Callable[[object], Optional[object]],
]
],
parameters: typing.Sequence[ParameterSet],
item,
) -> List[str]:
"""Resolves the actual ids for the given argnames, based on the ``ids`` parameter given
to ``parametrize``.
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param ids: the ids parameter of the parametrized call (see docs).
:param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``.
:param Item item: the item that generated this parametrized call.
:rtype: List[str]
:return: the list of ids for each argname given
"""
if ids is None:
idfn = None
ids_ = None
elif callable(ids):
idfn = ids
ids_ = None
else:
idfn = None
ids_ = self._validate_ids(ids, parameters, self.function.__name__)
return idmaker(argnames, parameters, idfn, ids_, self.config, item=item)
def _validate_ids(
self,
ids: Iterable[Union[None, str, float, int, bool]],
parameters: typing.Sequence[ParameterSet],
func_name: str,
) -> List[Union[None, str]]:
try:
num_ids = len(ids) # type: ignore[arg-type] # noqa: F821
except TypeError:
try:
iter(ids)
except TypeError:
raise TypeError("ids must be a callable or an iterable")
num_ids = len(parameters)
# num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849
if num_ids != len(parameters) and num_ids != 0:
msg = "In {}: {} parameter sets specified, with different number of ids: {}"
fail(msg.format(func_name, len(parameters), num_ids), pytrace=False)
new_ids = []
for idx, id_value in enumerate(itertools.islice(ids, num_ids)):
if id_value is None or isinstance(id_value, str):
new_ids.append(id_value)
elif isinstance(id_value, (float, int, bool)):
new_ids.append(str(id_value))
else:
msg = "In {}: ids must be list of string/float/int/bool, found: {} (type: {!r}) at index {}"
fail(
msg.format(func_name, saferepr(id_value), type(id_value), idx),
pytrace=False,
)
return new_ids
def _resolve_arg_value_types(
self,
argnames: typing.Sequence[str],
indirect: Union[bool, typing.Sequence[str]],
) -> Dict[str, str]:
"""Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg"
to the function, based on the ``indirect`` parameter of the parametrized() call.
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param indirect: same ``indirect`` parameter of ``parametrize()``.
:rtype: Dict[str, str]
A dict mapping each arg name to either:
* "params" if the argname should be the parameter of a fixture of the same name.
* "funcargs" if the argname should be a parameter to the parametrized test function.
"""
if isinstance(indirect, bool):
valtypes = dict.fromkeys(argnames, "params" if indirect else "funcargs")
elif isinstance(indirect, Sequence):
valtypes = dict.fromkeys(argnames, "funcargs")
for arg in indirect:
if arg not in argnames:
fail(
"In {}: indirect fixture '{}' doesn't exist".format(
self.function.__name__, arg
),
pytrace=False,
)
valtypes[arg] = "params"
else:
fail(
"In {func}: expected Sequence or boolean for indirect, got {type}".format(
type=type(indirect).__name__, func=self.function.__name__
),
pytrace=False,
)
return valtypes
def _validate_if_using_arg_names(
self,
argnames: typing.Sequence[str],
indirect: Union[bool, typing.Sequence[str]],
) -> None:
"""
Check if all argnames are being used, by default values, or directly/indirectly.
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param indirect: same ``indirect`` parameter of ``parametrize()``.
:raise ValueError: if validation fails.
"""
default_arg_names = set(get_default_arg_names(self.function))
func_name = self.function.__name__
for arg in argnames:
if arg not in self.fixturenames:
if arg in default_arg_names:
fail(
"In {}: function already takes an argument '{}' with a default value".format(
func_name, arg
),
pytrace=False,
)
else:
if isinstance(indirect, Sequence):
name = "fixture" if arg in indirect else "argument"
else:
name = "fixture" if indirect else "argument"
fail(
"In {}: function uses no {} '{}'".format(func_name, name, arg),
pytrace=False,
)
def _validate_explicit_parameters(
self,
argnames: typing.Sequence[str],
indirect: Union[bool, typing.Sequence[str]],
) -> None:
"""
The argnames in *parametrize* should either be declared explicitly via
indirect list or in the function signature
:param List[str] argnames: list of argument names passed to ``parametrize()``.
:param indirect: same ``indirect`` parameter of ``parametrize()``.
:raise ValueError: if validation fails
"""
if isinstance(indirect, bool):
parametrized_argnames = [] if indirect else argnames
else:
parametrized_argnames = [arg for arg in argnames if arg not in indirect]
if not parametrized_argnames:
return
funcargnames = _pytest.compat.getfuncargnames(self.function)
usefixtures = fixtures.get_use_fixtures_for_node(self.definition)
for arg in parametrized_argnames:
if arg not in funcargnames and arg not in usefixtures:
func_name = self.function.__name__
msg = (
'In function "{func_name}":\n'
'Parameter "{arg}" should be declared explicitly via indirect or in function itself'
).format(func_name=func_name, arg=arg)
fail(msg, pytrace=False)
def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
"""Find the most appropriate scope for a parametrized call based on its arguments.
When there's at least one direct argument, always use "function" scope.
When a test function is parametrized and all its arguments are indirect
(e.g. fixtures), return the most narrow scope based on the fixtures used.
Related to issue #1832, based on code posted by @Kingdread.
"""
from _pytest.fixtures import scopes
if isinstance(indirect, (list, tuple)):
all_arguments_are_fixtures = len(indirect) == len(argnames)
else:
all_arguments_are_fixtures = bool(indirect)
if all_arguments_are_fixtures:
fixturedefs = arg2fixturedefs or {}
used_scopes = [
fixturedef[0].scope
for name, fixturedef in fixturedefs.items()
if name in argnames
]
if used_scopes:
# Takes the most narrow scope from used fixtures
for scope in reversed(scopes):
if scope in used_scopes:
return scope
return "function"
def _ascii_escaped_by_config(val: Union[str, bytes], config: Optional[Config]) -> str:
if config is None:
escape_option = False
else:
escape_option = config.getini(
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
)
# TODO: If escaping is turned off and the user passes bytes,
# will return a bytes. For now we ignore this but the
# code *probably* doesn't handle this case.
return val if escape_option else ascii_escaped(val) # type: ignore
def _idval(
val: object,
argname: str,
idx: int,
idfn: Optional[Callable[[object], Optional[object]]],
item,
config: Optional[Config],
) -> str:
if idfn:
try:
generated_id = idfn(val)
if generated_id is not None:
val = generated_id
except Exception as e:
msg = "{}: error raised while trying to determine id of parameter '{}' at position {}"
msg = msg.format(item.nodeid, argname, idx)
raise ValueError(msg) from e
elif config:
hook_id = config.hook.pytest_make_parametrize_id(
config=config, val=val, argname=argname
) # type: Optional[str]
if hook_id:
return hook_id
if isinstance(val, STRING_TYPES):
return _ascii_escaped_by_config(val, config)
elif val is None or isinstance(val, (float, int, bool)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return ascii_escaped(val.pattern)
elif isinstance(val, enum.Enum):
return str(val)
elif isinstance(getattr(val, "__name__", None), str):
# name of a class, function, module, etc.
name = getattr(val, "__name__") # type: str
return name
return str(argname) + str(idx)
def _idvalset(
idx: int,
parameterset: ParameterSet,
argnames: Iterable[str],
idfn: Optional[Callable[[object], Optional[object]]],
ids: Optional[List[Union[None, str]]],
item,
config: Optional[Config],
):
if parameterset.id is not None:
return parameterset.id
id = None if ids is None or idx >= len(ids) else ids[idx]
if id is None:
this_id = [
_idval(val, argname, idx, idfn, item=item, config=config)
for val, argname in zip(parameterset.values, argnames)
]
return "-".join(this_id)
else:
return _ascii_escaped_by_config(id, config)
def idmaker(
argnames: Iterable[str],
parametersets: Iterable[ParameterSet],
idfn: Optional[Callable[[object], Optional[object]]] = None,
ids: Optional[List[Union[None, str]]] = None,
config: Optional[Config] = None,
item=None,
) -> List[str]:
resolved_ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)
for valindex, parameterset in enumerate(parametersets)
]
# All IDs must be unique!
unique_ids = set(resolved_ids)
if len(unique_ids) != len(resolved_ids):
# Record the number of occurrences of each test ID
test_id_counts = Counter(resolved_ids)
# Map the test ID to its next suffix
test_id_suffixes = defaultdict(int) # type: Dict[str, int]
# Suffix non-unique IDs to make them unique
for index, test_id in enumerate(resolved_ids):
if test_id_counts[test_id] > 1:
resolved_ids[index] = "{}{}".format(test_id, test_id_suffixes[test_id])
test_id_suffixes[test_id] += 1
return resolved_ids
def show_fixtures_per_test(config):
from _pytest.main import wrap_session
return wrap_session(config, _show_fixtures_per_test)
def _show_fixtures_per_test(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
def get_best_relpath(func):
loc = getlocation(func, curdir)
return curdir.bestrelpath(loc)
def write_fixture(fixture_def):
argname = fixture_def.argname
if verbose <= 0 and argname.startswith("_"):
return
if verbose > 0:
bestrel = get_best_relpath(fixture_def.func)
funcargspec = "{} -- {}".format(argname, bestrel)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
fixture_doc = inspect.getdoc(fixture_def.func)
if fixture_doc:
write_docstring(tw, fixture_doc)
else:
tw.line(" no docstring available", red=True)
def write_item(item):
try:
info = item._fixtureinfo
except AttributeError:
# doctests items have no _fixtureinfo attribute
return
if not info.name2fixturedefs:
# this test item does not use any fixtures
return
tw.line()
tw.sep("-", "fixtures used by {}".format(item.name))
tw.sep("-", "({})".format(get_best_relpath(item.function)))
# dict key not used in loop but needed for sorting
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
assert fixturedefs is not None
if not fixturedefs:
continue
# last item is expected to be the one used by the test item
write_fixture(fixturedefs[-1])
for session_item in session.items:
write_item(session_item)
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
seen = set()
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
for fixturedef in fixturedefs:
loc = getlocation(fixturedef.func, curdir)
if (fixturedef.argname, loc) in seen:
continue
seen.add((fixturedef.argname, loc))
available.append(
(
len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname,
fixturedef,
)
)
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from {}".format(module))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
tw.write(argname, green=True)
if fixturedef.scope != "function":
tw.write(" [%s scope]" % fixturedef.scope, cyan=True)
if verbose > 0:
tw.write(" -- %s" % bestrel, yellow=True)
tw.write("\n")
loc = getlocation(fixturedef.func, curdir)
doc = inspect.getdoc(fixturedef.func)
if doc:
write_docstring(tw, doc)
else:
tw.line(" {}: no docstring available".format(loc), red=True)
tw.line()
def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None:
for line in doc.split("\n"):
tw.write(indent + line + "\n")
class Function(PyobjMixin, nodes.Item):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
# disable since functions handle it themselves
_ALLOW_MARKERS = False
def __init__(
self,
name,
parent,
args=None,
config=None,
callspec: Optional[CallSpec2] = None,
callobj=NOTSET,
keywords=None,
session=None,
fixtureinfo: Optional[FuncFixtureInfo] = None,
originalname=None,
) -> None:
super().__init__(name, parent, config=config, session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
self.own_markers.extend(get_unpacked_marks(self.obj))
if callspec:
self.callspec = callspec
# this is total hostile and a mess
# keywords are broken by design by now
# this will be redeemed later
for mark in callspec.marks:
# feel free to cry, this was broken for years before
# and keywords cant fix it per design
self.keywords[mark.name] = mark
self.own_markers.extend(normalize_mark_list(callspec.marks))
if keywords:
self.keywords.update(keywords)
# todo: this is a hell of a hack
# https://github.com/pytest-dev/pytest/issues/4569
self.keywords.update(
{
mark.name: True
for mark in self.iter_markers()
if mark.name not in self.keywords
}
)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self, self.obj, self.cls, funcargs=True
)
self._fixtureinfo = fixtureinfo # type: FuncFixtureInfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
#: original function name, without any decorations (for example
#: parametrization adds a ``"[...]"`` suffix to function names).
#:
#: .. versionadded:: 3.0
self.originalname = originalname
@classmethod
def from_parent(cls, parent, **kw): # todo: determine sound type limitations
"""
The public constructor
"""
return super().from_parent(parent=parent, **kw)
def _initrequest(self):
self.funcargs = {}
self._request = fixtures.FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getimfunc(self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
warnings.warn(FUNCARGNAMES, stacklevel=2)
return self.fixturenames
def runtest(self) -> None:
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self) -> None:
if isinstance(self.parent, Instance):
self.parent.newinstance()
self.obj = self._getobj()
fixtures.fillfixtures(self)
def _prunetraceback(self, excinfo: ExceptionInfo) -> None:
if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False):
code = _pytest._code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.getoption("tbstyle", "auto") == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style("short")
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.getoption("tbstyle", "auto")
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class FunctionDefinition(Function):
"""
internal hack until we get actual definition nodes instead of the
crappy metafunc hack
"""
def runtest(self) -> None:
raise RuntimeError("function definitions are not supposed to be used")
setup = runtest
|
alfredodeza/pytest
|
src/_pytest/python.py
|
Python
|
mit
| 58,223
|
[
"VisIt"
] |
c7faf15a568c66b1240dbcb7c27891b4b842ceeb978d4cbe1d84d892b6462028
|
"""A quick DOM implementation.
Python's xml.dom is very slow. The xml.sax module is also slow (as it imports urllib2).
This is our light-weight version.
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from xml.parsers import expat
class Element(object):
"""An XML element.
@ivar uri: the element's namespace
@type uri: str
@ivar name: the element's localName
@type name: str
@ivar attrs: the element's attributes (key is in the form [namespace " "] localName)
@type attrs: {str: str}
@ivar childNodes: children
@type childNodes: [L{Element}]
@ivar content: the text content
@type content: str"""
__slots__ = ['uri', 'name', 'attrs', 'childNodes', 'content']
def __init__(self, uri, name, attrs):
self.uri = uri
self.name = name
self.attrs = attrs.copy()
self.content = None
self.childNodes = []
def __str__(self):
attrs = [n + '=' + self.attrs[n] for n in self.attrs]
start = '<{%s}%s %s' % (self.uri, self.name, ' '.join(attrs))
if self.childNodes:
return start + '>' + '\n'.join(map(str, self.childNodes)) + ('</%s>' % (self.name))
elif self.content:
return start + '>' + self.content + ('</%s>' % (self.name))
else:
return start + '/>'
def getAttribute(self, name):
return self.attrs.get(name, None)
def toDOM(self, doc, prefixes):
"""Create a DOM Element for this qdom.Element.
@param doc: document to use to create the element
@return: the new element
"""
elem = prefixes.createElementNS(doc, self.uri, self.name)
for fullname, value in self.attrs.items():
if ' ' in fullname:
ns, localName = fullname.split(' ', 1)
else:
ns, localName = None, fullname
prefixes.setAttributeNS(elem, ns, localName, value)
for child in self.childNodes:
elem.appendChild(child.toDOM(doc, prefixes))
if self.content:
elem.appendChild(doc.createTextNode(self.content))
return elem
class QSAXhandler:
"""SAXHandler that builds a tree of L{Element}s"""
def __init__(self):
self.stack = []
def startElementNS(self, fullname, attrs):
split = fullname.split(' ', 1)
if len(split) == 2:
self.stack.append(Element(split[0], split[1], attrs))
else:
self.stack.append(Element(None, fullname, attrs))
self.contents = ''
def characters(self, data):
self.contents += data
def endElementNS(self, name):
contents = self.contents.strip()
self.stack[-1].content = contents
self.contents = ''
new = self.stack.pop()
if self.stack:
self.stack[-1].childNodes.append(new)
else:
self.doc = new
def parse(source):
"""Parse an XML stream into a tree of L{Element}s.
@param source: data to parse
@type source: file
@return: the root
@rtype: L{Element}"""
handler = QSAXhandler()
parser = expat.ParserCreate(namespace_separator = ' ')
parser.StartElementHandler = handler.startElementNS
parser.EndElementHandler = handler.endElementNS
parser.CharacterDataHandler = handler.characters
parser.ParseFile(source)
return handler.doc
class Prefixes:
"""Keep track of namespace prefixes. Used when serialising a document.
@since: 0.54
"""
def __init__(self, default_ns):
self.prefixes = {}
self.default_ns = default_ns
def get(self, ns):
prefix = self.prefixes.get(ns, None)
if prefix:
return prefix
prefix = 'ns%d' % len(self.prefixes)
self.prefixes[ns] = prefix
return prefix
def setAttributeNS(self, elem, uri, localName, value):
if uri is None:
elem.setAttributeNS(None, localName, value)
else:
elem.setAttributeNS(uri, self.get(uri) + ':' + localName, value)
def createElementNS(self, doc, uri, localName):
if uri == self.default_ns:
return doc.createElementNS(uri, localName)
else:
return doc.createElementNS(uri, self.get(uri) + ':' + localName)
|
timdiels/0install
|
zeroinstall/injector/qdom.py
|
Python
|
lgpl-2.1
| 3,773
|
[
"VisIt"
] |
1714883cfab613e7907c6365d11b22b219c826115da978ea82fe40ef2c136cd0
|
# This file is part of Merlin/Arthur.
# Merlin/Arthur is the Copyright (C)2009,2010 of Elliot Rosemarine.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from django.conf.urls import include, url
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from sqlalchemy import and_
from sqlalchemy.sql import asc, desc
from Core.config import Config
from Core.db import session
from Core.maps import Updates, Galaxy, Planet, Alliance
from Arthur.context import menu, render
from Arthur.errors import page_not_found
from Arthur.loadable import loadable, load, require_user
bot = Config.get("Connection","nick")
name = Config.get("Alliance", "name")
@load
@require_user
class login(loadable):
def execute(self, request, user):
from Arthur.views.dashboard import dashboard
if user.is_member():
return dashboard.execute(request, user, dashuser=user)
else:
return home.execute(request, user)
@menu("Home")
@load
class home(loadable):
def execute(self, request, user):
planet, galaxy = (user.planet, user.planet.galaxy,) if user.planet else (Planet(), Galaxy(),)
planets = session.query(Planet).filter(Planet.active == True)
galaxies = session.query(Galaxy).filter(Galaxy.active == True)
alliances = session.query(Alliance).filter(Alliance.active == True)
dup = lambda l,o,c=True: l+[o] if o in session and c and o not in l else l
return render("index.tpl", request,
topplanets = dup(planets.order_by(asc(Planet.score_rank))[:20],
planet),
roidingplanets = dup(planets.filter(Planet.size_growth > 0).order_by(desc(Planet.size_growth))[:5],
planet, planet.size_growth > 0),
roidedplanets = dup(planets.filter(Planet.size_growth < 0).order_by(asc(Planet.size_growth))[:5],
planet, planet.size_growth < 0),
xpplanets = dup(planets.filter(Planet.xp_growth > 0).order_by(desc(Planet.xp_growth))[:5],
planet, planet.xp_growth > 0),
bashedplanets = dup(planets.filter(Planet.value_growth < 0).order_by(asc(Planet.value_growth))[:5],
planet, planet.value_growth < 0),
topgalaxies = dup(galaxies.order_by(asc(Galaxy.score_rank))[:10],
galaxy),
roidinggalaxies = dup(galaxies.filter(Galaxy.size_growth > 0).order_by(desc(Galaxy.size_growth))[:5],
galaxy, galaxy.size_growth > 0),
roidedgalaxies = dup(galaxies.filter(Galaxy.size_growth < 0).order_by(asc(Galaxy.size_growth))[:5],
galaxy, galaxy.size_growth < 0),
xpgalaxies = dup(galaxies.filter(Galaxy.xp_growth > 0).order_by(desc(Galaxy.xp_growth))[:5],
galaxy, galaxy.xp_growth > 0),
bashedgalaxies = dup(galaxies.filter(Galaxy.value_growth < 0).order_by(asc(Galaxy.value_growth))[:5],
galaxy, galaxy.value_growth < 0),
topalliances = alliances.order_by(asc(Alliance.score_rank))[:8],
)
@menu(name, "Intel", suffix = name)
@menu("Planetarion", "BCalc", suffix = "bcalc")
@menu("Planetarion", "Forums", suffix = "forums")
@menu("Planetarion", "Game", suffix = "game")
@load
@require_user
class links(loadable):
def execute(self, request, user, link):
link = {
"game" : Config.get("URL","game"),
"forums" : "http://pirate.planetarion.com",
"bcalc" : Config.get("URL","bcalc"),
name : reverse("alliance_members", kwargs={"name":name}),
}.get(link)
if link is None:
return page_not_found(request)
return HttpResponseRedirect(self.url(link, user))
@menu(bot, "Guide to %s"%(Config.get("Connection","nick"),))
@load
@require_user
class guide(loadable):
def execute(self, request, user):
return render("guide.tpl", request, bot=Config.get("Connection","nick"), alliance=name)
urlpatterns = [
url(r'^(?:home|logout)?/?$', home),
url(r'^login/', login),
url(r'^guide/$', guide),
url(r'^links/(?P<link>[^/]+)/$', links),
]
|
d7415/merlin
|
Arthur/views/home.py
|
Python
|
gpl-2.0
| 5,398
|
[
"Galaxy"
] |
9e39357e12c8fdce65d7d4e0d8259bda798476da09640eeae03b411c97c265bc
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import math
import os
import json
import collections
import itertools
from abc import ABCMeta, abstractmethod
import random
import warnings
from fnmatch import fnmatch
import re
try:
# New Py>=3.5 import
from math import gcd
except ImportError:
# Deprecated import from Py3.5 onwards.
from fractions import gcd
import six
import numpy as np
from pymatgen.core.operations import SymmOp
from pymatgen.core.lattice import Lattice
from pymatgen.core.periodic_table import Element, Specie, get_el_sp, DummySpecie
from monty.json import MSONable
from pymatgen.core.sites import Site, PeriodicSite
from pymatgen.core.bonds import CovalentBond, get_bond_length
from pymatgen.core.composition import Composition
from pymatgen.util.coord import get_angle, all_distances, \
lattice_points_in_supercell
from pymatgen.core.units import Mass, Length
from monty.io import zopen
"""
This module provides classes used to define a non-periodic molecule and a
periodic structure.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
class SiteCollection(six.with_metaclass(ABCMeta, collections.Sequence)):
"""
Basic SiteCollection. Essentially a sequence of Sites or PeriodicSites.
This serves as a base class for Molecule (a collection of Site, i.e., no
periodicity) and Structure (a collection of PeriodicSites, i.e.,
periodicity). Not meant to be instantiated directly.
"""
# Tolerance in Angstrom for determining if sites are too close.
DISTANCE_TOLERANCE = 0.5
@property
@abstractmethod
def sites(self):
"""
Returns a tuple of sites.
"""
return
@abstractmethod
def get_distance(self, i, j):
"""
Returns distance between sites at index i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
(float) Distance between sites at index i and index j.
"""
return
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this is overwritten to return the nearest image
distance.
"""
return all_distances(self.cart_coords, self.cart_coords)
@property
def species(self):
"""
Only works for ordered structures.
Disordered structures will raise an AttributeError.
Returns:
([Specie]) List of species at each site of the structure.
"""
return [site.specie for site in self]
@property
def species_and_occu(self):
"""
List of species and occupancies at each site of the structure.
"""
return [site.species_and_occu for site in self]
@property
def ntypesp(self):
"""Number of types of atoms."""
return len(self.types_of_specie)
@property
def types_of_specie(self):
"""
List of types of specie. Only works for ordered structures.
Disordered structures will raise an AttributeError.
"""
# Cannot use set since we want a deterministic algorithm.
types = []
for site in self:
if site.specie not in types:
types.append(site.specie)
return types
def group_by_types(self):
"""Iterate over species grouped by type"""
for t in self.types_of_specie:
for site in self:
if site.specie == t:
yield site
def indices_from_symbol(self, symbol):
"""
Returns a tuple with the sequential indices of the sites
that contain an element with the given chemical symbol.
"""
return tuple((i for i, specie in enumerate(self.species)
if specie.symbol == symbol))
@property
def symbol_set(self):
"""
Tuple with the set of chemical symbols.
Note that len(symbol_set) == len(types_of_specie)
"""
return tuple((specie.symbol for specie in self.types_of_specie))
@property
def atomic_numbers(self):
"""List of atomic numbers."""
return [site.specie.number for site in self]
@property
def site_properties(self):
"""
Returns the site properties as a dict of sequences. E.g.,
{"magmom": (5,-5), "charge": (-4,4)}.
"""
props = {}
prop_keys = set()
for site in self:
prop_keys.update(site.properties.keys())
for k in prop_keys:
props[k] = [site.properties.get(k, None) for site in self]
return props
def __contains__(self, site):
return site in self.sites
def __iter__(self):
return self.sites.__iter__()
def __getitem__(self, ind):
return self.sites[ind]
def __len__(self):
return len(self.sites)
def __hash__(self):
# for now, just use the composition hash code.
return self.composition.__hash__()
@property
def num_sites(self):
"""
Number of sites.
"""
return len(self)
@property
def cart_coords(self):
"""
Returns a np.array of the cartesian coordinates of sites in the
structure.
"""
return np.array([site.coords for site in self])
@property
def formula(self):
"""
(str) Returns the formula.
"""
return self.composition.formula
@property
def composition(self):
"""
(Composition) Returns the composition
"""
elmap = collections.defaultdict(float)
for site in self:
for species, occu in site.species_and_occu.items():
elmap[species] += occu
return Composition(elmap)
@property
def charge(self):
"""
Returns the net charge of the structure based on oxidation states. If
Elements are found, a charge of 0 is assumed.
"""
charge = 0
for site in self:
for specie, amt in site.species_and_occu.items():
charge += getattr(specie, "oxi_state", 0) * amt
return charge
@property
def is_ordered(self):
"""
Checks if structure is ordered, meaning no partial occupancies in any
of the sites.
"""
return all((site.is_ordered for site in self))
def get_angle(self, i, j, k):
"""
Returns angle specified by three sites.
Args:
i (int): Index of first site.
j (int): Index of second site.
k (int): Index of third site.
Returns:
(float) Angle in degrees.
"""
v1 = self[i].coords - self[j].coords
v2 = self[k].coords - self[j].coords
return get_angle(v1, v2, units="degrees")
def get_dihedral(self, i, j, k, l):
"""
Returns dihedral angle specified by four sites.
Args:
i (int): Index of first site
j (int): Index of second site
k (int): Index of third site
l (int): Index of fourth site
Returns:
(float) Dihedral angle in degrees.
"""
v1 = self[k].coords - self[l].coords
v2 = self[j].coords - self[k].coords
v3 = self[i].coords - self[j].coords
v23 = np.cross(v2, v3)
v12 = np.cross(v1, v2)
return math.degrees(math.atan2(np.linalg.norm(v2) * np.dot(v1, v23),
np.dot(v12, v23)))
def is_valid(self, tol=DISTANCE_TOLERANCE):
"""
True if SiteCollection does not contain atoms that are too close
together. Note that the distance definition is based on type of
SiteCollection. Cartesian distances are used for non-periodic
Molecules, while PBC is taken into account for periodic structures.
Args:
tol (float): Distance tolerance. Default is 0.01A.
Returns:
(bool) True if SiteCollection does not contain atoms that are too
close together.
"""
if len(self.sites) == 1:
return True
all_dists = self.distance_matrix[np.triu_indices(len(self), 1)]
return bool(np.min(all_dists) > tol)
@abstractmethod
def to(self, fmt=None, filename=None):
"""
Generates well-known string representations of SiteCollections (e.g.,
molecules / structures). Should return a string type or write to a file.
"""
pass
@classmethod
@abstractmethod
def from_str(cls, input_string, fmt):
"""
Reads in SiteCollection from a string.
"""
pass
@classmethod
@abstractmethod
def from_file(cls, filename):
"""
Reads in SiteCollection from a filename.
"""
pass
class IStructure(SiteCollection, MSONable):
"""
Basic immutable Structure object with periodicity. Essentially a sequence
of PeriodicSites having a common lattice. IStructure is made to be
(somewhat) immutable so that they can function as keys in a dict. To make
modifications, use the standard Structure object instead. Structure
extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a
structure is equivalent to going through the sites in sequence.
"""
def __init__(self, lattice, species, coords, charge=None,
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None):
"""
Create a periodic structure.
Args:
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
charge (int): overall charge of the structure. Defaults to behavior
in SiteCollection where total charge is the sum of the oxidation states
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError("The list of atomic species must be of the"
" same length as the list of fractional"
" coordinates.")
if isinstance(lattice, Lattice):
self._lattice = lattice
else:
self._lattice = Lattice(lattice)
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i]
for k, v in site_properties.items()}
sites.append(
PeriodicSite(species[i], coords[i], self._lattice,
to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Structure contains sites that are ",
"less than 0.01 Angstrom apart!"))
self._charge = charge
@classmethod
def from_sites(cls, sites, charge=None, validate_proximity=False,
to_unit_cell=False):
"""
Convenience constructor to make a Structure from a list of sites.
Args:
sites: Sequence of PeriodicSites. Sites must have the same
lattice.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
to_unit_cell (bool): Whether to translate sites into the unit
cell.
Returns:
(Structure) Note that missing properties are set as None.
"""
if len(sites) < 1:
raise ValueError("You need at least one site to construct a %s" %
cls)
if (not validate_proximity) and (not to_unit_cell):
# This is not really a good solution, but if we are not changing
# the sites, initializing an empty structure and setting _sites
# to be sites is much faster than doing the full initialization.
lattice = sites[0].lattice
for s in sites[1:]:
if s.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
s_copy = cls(lattice=lattice, charge=charge, species=[], coords=[])
s_copy._sites = list(sites)
return s_copy
prop_keys = []
props = {}
lattice = None
for i, site in enumerate(sites):
if not lattice:
lattice = site.lattice
elif site.lattice != lattice:
raise ValueError("Sites must belong to the same lattice")
for k, v in site.properties.items():
if k not in prop_keys:
prop_keys.append(k)
props[k] = [None] * len(sites)
props[k][i] = v
for k, v in props.items():
if any((vv is None for vv in v)):
warnings.warn("Not all sites have property %s. Missing values "
"are set to None." % k)
return cls(lattice, [site.species_and_occu for site in sites],
[site.frac_coords for site in sites],
charge=charge,
site_properties=props,
validate_proximity=validate_proximity,
to_unit_cell=to_unit_cell)
@classmethod
def from_spacegroup(cls, sg, lattice, species, coords, site_properties=None,
coords_are_cartesian=False, tol=1e-5):
"""
Generate a structure using a spacegroup. Note that only symmetrically
distinct species and coords should be provided. All equivalent sites
are generated from the spacegroup operations.
Args:
sg (str/int): The spacegroup. If a string, it will be interpreted
as one of the notations supported by
pymatgen.symmetry.groups.Spacegroup. E.g., "R-3c" or "Fm-3m".
If an int, it will be interpreted as an international number.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.symmetry.groups import SpaceGroup
try:
i = int(sg)
sgp = SpaceGroup.from_int_number(i)
except ValueError:
sgp = SpaceGroup(sg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not sgp.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.lengths_and_angles,
sgp.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are "
"different!" % (len(species), len(coords))
)
frac_coords = np.array(coords, dtype=np.float) \
if not coords_are_cartesian else \
lattice.get_fractional_coords(coords)
props = {} if site_properties is None else site_properties
all_sp = []
all_coords = []
all_site_properties = collections.defaultdict(list)
for i, (sp, c) in enumerate(zip(species, frac_coords)):
cc = sgp.get_orbit(c, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
for k, v in props.items():
all_site_properties[k].extend([v[i]] * len(cc))
return cls(latt, all_sp, all_coords,
site_properties=all_site_properties)
@classmethod
def from_magnetic_spacegroup(
cls, msg, lattice, species, coords, site_properties,
transform_setting=None, coords_are_cartesian=False, tol=1e-5):
"""
Generate a structure using a magnetic spacegroup. Note that only
symmetrically distinct species, coords and magmoms should be provided.]
All equivalent sites are generated from the spacegroup operations.
Args:
msg (str/list/:class:`pymatgen.symmetry.maggroups.MagneticSpaceGroup`):
The magnetic spacegroup.
If a string, it will be interpreted as one of the notations
supported by MagneticSymmetryGroup, e.g., "R-3'c" or "Fm'-3'm".
If a list of two ints, it will be interpreted as the number of
the spacegroup in its Belov, Neronova and Smirnova (BNS) setting.
lattice (Lattice/3x3 array): The lattice, either as a
:class:`pymatgen.core.lattice.Lattice` or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
Note that no attempt is made to check that the lattice is
compatible with the spacegroup specified. This may be
introduced in a future version.
species ([Specie]): Sequence of species on each site. Can take in
flexible input, including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
coords (Nx3 array): list of fractional/cartesian coordinates of
each species.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Unlike Structure.from_spacegroup(),
this argument is mandatory, since magnetic moment information
has to be included. Note that the *direction* of the supplied
magnetic moment relative to the crystal is important, even if
the resulting structure is used for collinear calculations.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
tol (float): A fractional tolerance to deal with numerical
precision issues in determining if orbits are the same.
"""
from pymatgen.electronic_structure.core import Magmom
from pymatgen.symmetry.maggroups import MagneticSpaceGroup
if 'magmom' not in site_properties:
raise ValueError('Magnetic moments have to be defined.')
else:
magmoms = [Magmom(m) for m in site_properties['magmom']]
if not isinstance(msg, MagneticSpaceGroup):
msg = MagneticSpaceGroup(msg)
if isinstance(lattice, Lattice):
latt = lattice
else:
latt = Lattice(lattice)
if not msg.is_compatible(latt):
raise ValueError(
"Supplied lattice with parameters %s is incompatible with "
"supplied spacegroup %s!" % (latt.lengths_and_angles,
msg.symbol)
)
if len(species) != len(coords):
raise ValueError(
"Supplied species and coords lengths (%d vs %d) are "
"different!" % (len(species), len(coords))
)
if len(species) != len(magmoms):
raise ValueError(
"Supplied species and magmom lengths (%d vs %d) are "
"different!" % (len(species), len(magmoms))
)
frac_coords = coords if not coords_are_cartesian else \
lattice.get_fractional_coords(coords)
all_sp = []
all_coords = []
all_magmoms = []
all_site_properties = collections.defaultdict(list)
for i, (sp, c, m) in enumerate(zip(species, frac_coords, magmoms)):
cc, mm = msg.get_orbit(c, m, tol=tol)
all_sp.extend([sp] * len(cc))
all_coords.extend(cc)
all_magmoms.extend(mm)
for k, v in site_properties.items():
if k != 'magmom':
all_site_properties[k].extend([v[i]] * len(cc))
all_site_properties['magmom'] = all_magmoms
return cls(latt, all_sp, all_coords,
site_properties=all_site_properties)
@property
def charge(self):
"""
Overall charge of the structure
"""
if self._charge is None:
return super(IStructure, self).charge
else:
return self._charge
@property
def distance_matrix(self):
"""
Returns the distance matrix between all sites in the structure. For
periodic structures, this should return the nearest image distance.
"""
return self.lattice.get_all_distances(self.frac_coords,
self.frac_coords)
@property
def sites(self):
"""
Returns an iterator for the sites in the Structure.
"""
return self._sites
@property
def lattice(self):
"""
Lattice of the structure.
"""
return self._lattice
@property
def density(self):
"""
Returns the density in units of g/cc
"""
m = Mass(self.composition.weight, "amu")
return m.to("g") / (self.volume * Length(1, "ang").to("cm") ** 3)
def get_space_group_info(self, symprec=1e-2, angle_tolerance=5.0):
"""
Convenience method to quickly get the spacegroup of a structure.
Args:
symprec (float): Same definition as in SpacegroupAnalyzer.
Defaults to 1e-2.
angle_tolerance (float): Same definition as in SpacegroupAnalyzer.
Defaults to 5 degrees.
Returns:
spacegroup_symbol, international_number
"""
# Import within method needed to avoid cyclic dependency.
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
a = SpacegroupAnalyzer(self, symprec=symprec,
angle_tolerance=angle_tolerance)
return a.get_space_group_symbol(), a.get_space_group_number()
def matches(self, other, **kwargs):
"""
Check whether this structure is similar to another structure.
Basically a convenience method to call structure matching fitting.
Args:
other (IStructure/Structure): Another structure.
**kwargs: Same **kwargs as in
:class:`pymatgen.analysis.structure_matcher.StructureMatcher`.
Returns:
(bool) True is the structures are similar under some affine
transformation.
"""
from pymatgen.analysis.structure_matcher import StructureMatcher
m = StructureMatcher(**kwargs)
return m.fit(Structure.from_sites(self), Structure.from_sites(other))
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self.lattice != other.lattice:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __mul__(self, scaling_matrix):
"""
Makes a supercell. Allowing to have sites outside the unit cell
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
Returns:
Supercell structure. Note that a Structure is always returned,
even if the input structure is a subclass of Structure. This is
to avoid different arguments signatures from causing problems. If
you prefer a subclass to return its own type, you need to override
this method in the subclass.
"""
scale_matrix = np.array(scaling_matrix, np.int16)
if scale_matrix.shape != (3, 3):
scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)
new_lattice = Lattice(np.dot(scale_matrix, self._lattice.matrix))
f_lat = lattice_points_in_supercell(scale_matrix)
c_lat = new_lattice.get_cartesian_coords(f_lat)
new_sites = []
for site in self:
for v in c_lat:
s = PeriodicSite(site.species_and_occu, site.coords + v,
new_lattice, properties=site.properties,
coords_are_cartesian=True, to_unit_cell=False)
new_sites.append(s)
new_charge = self._charge * np.linalg.det(scale_matrix) if self._charge else None
return Structure.from_sites(new_sites,charge=new_charge)
def __rmul__(self, scaling_matrix):
"""
Similar to __mul__ to preserve commutativeness.
"""
return self.__mul__(scaling_matrix)
@property
def frac_coords(self):
"""
Fractional coordinates as a Nx3 numpy array.
"""
return np.array([site.frac_coords for site in self._sites])
@property
def volume(self):
"""
Returns the volume of the structure.
"""
return self._lattice.volume
def get_distance(self, i, j, jimage=None):
"""
Get distance between site i and j assuming periodic boundary
conditions. If the index jimage of two sites atom j is not specified it
selects the jimage nearest to the i atom and returns the distance and
jimage indices in terms of lattice vector translations if the index
jimage of atom j is specified it returns the distance between the i
atom and the specified jimage atom.
Args:
i (int): Index of first site
j (int): Index of second site
jimage: Number of lattice translations in each lattice direction.
Default is None for nearest image.
Returns:
distance
"""
return self[i].distance(self[j], jimage)
def get_sites_in_sphere(self, pt, r, include_index=False):
"""
Find all sites within a sphere from the point. This includes sites
in other periodic images.
Algorithm:
1. place sphere of radius r in crystal and determine minimum supercell
(parallelpiped) which would contain a sphere of radius r. for this
we need the projection of a_1 on a unit vector perpendicular
to a_2 & a_3 (i.e. the unit vector in the direction b_1) to
determine how many a_1"s it will take to contain the sphere.
Nxmax = r * length_of_b_1 / (2 Pi)
2. keep points falling within r.
Args:
pt (3x1 array): cartesian coordinates of center of sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
site_fcoords = np.mod(self.frac_coords, 1)
neighbors = []
for fcoord, dist, i in self._lattice.get_points_in_sphere(
site_fcoords, pt, r):
nnsite = PeriodicSite(self[i].species_and_occu,
fcoord, self._lattice,
properties=self[i].properties)
neighbors.append((nnsite, dist) if not include_index
else (nnsite, dist, i))
return neighbors
def get_neighbors(self, site, r, include_index=False):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site:
site, which is the center of the sphere.
r:
radius of sphere.
include_index:
boolean that determines whether the non-supercell site index
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
nn = self.get_sites_in_sphere(site.coords, r,
include_index=include_index)
return [d for d in nn if site != d[0]]
def get_all_neighbors(self, r, include_index=False):
"""
Get neighbors for each atom in the unit cell, out to a distance r
Returns a list of list of neighbors for each site in structure.
Use this method if you are planning on looping over all sites in the
crystal. If you only want neighbors for a particular site, use the
method get_neighbors as it may not have to build such a large supercell
However if you are looping over all sites in the crystal, this method
is more efficient since it only performs one pass over a large enough
supercell to contain all possible atoms out to a distance r.
The return type is a [(site, dist) ...] since most of the time,
subsequent processing requires the distance.
Args:
r (float): Radius of sphere.
include_index (bool): Whether to include the non-supercell site
in the returned data
Returns:
A list of a list of nearest neighbors for each site, i.e.,
[[(site, dist, index) ...], ..]
Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
"""
# Use same algorithm as get_sites_in_sphere to determine supercell but
# loop over all atoms in crystal
recp_len = np.array(self.lattice.reciprocal_lattice.abc)
maxr = np.ceil((r + 0.15) * recp_len / (2 * math.pi))
nmin = np.floor(np.min(self.frac_coords, axis=0)) - maxr
nmax = np.ceil(np.max(self.frac_coords, axis=0)) + maxr
all_ranges = [np.arange(x, y) for x, y in zip(nmin, nmax)]
latt = self._lattice
neighbors = [list() for _ in range(len(self._sites))]
all_fcoords = np.mod(self.frac_coords, 1)
coords_in_cell = latt.get_cartesian_coords(all_fcoords)
site_coords = self.cart_coords
indices = np.arange(len(self))
for image in itertools.product(*all_ranges):
coords = latt.get_cartesian_coords(image) + coords_in_cell
all_dists = all_distances(coords, site_coords)
all_within_r = np.bitwise_and(all_dists <= r, all_dists > 1e-8)
for (j, d, within_r) in zip(indices, all_dists, all_within_r):
nnsite = PeriodicSite(self[j].species_and_occu, coords[j],
latt, properties=self[j].properties,
coords_are_cartesian=True)
for i in indices[within_r]:
item = (nnsite, d[i], j) if include_index else (
nnsite, d[i])
neighbors[i].append(item)
return neighbors
def get_neighbors_in_shell(self, origin, r, dr, include_index=False):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
include_index (bool): Whether to include the non-supercell site
in the returned data
Returns:
[(site, dist, index) ...] since most of the time, subsequent
processing
requires the distance. Index only supplied if include_index = True.
The index is the index of the site in the original (non-supercell)
structure. This is needed for ewaldmatrix by keeping track of which
sites contribute to the ewald sum.
"""
outer = self.get_sites_in_sphere(origin, r + dr,
include_index=include_index)
inner = r - dr
return [t for t in outer if t[1] > inner]
def get_sorted_structure(self, key=None, reverse=False):
"""
Get a sorted copy of the structure. The parameters have the same
meaning as in list.sort. By default, sites are sorted by the
electronegativity of the species.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
sites = sorted(self, key=key, reverse=reverse)
return self.__class__.from_sites(sites, charge=self._charge)
def get_reduced_structure(self, reduction_algo="niggli"):
"""
Get a reduced structure.
Args:
reduction_algo (str): The lattice reduction algorithm to use.
Currently supported options are "niggli" or "LLL".
"""
if reduction_algo == "niggli":
reduced_latt = self._lattice.get_niggli_reduced_lattice()
elif reduction_algo == "LLL":
reduced_latt = self._lattice.get_lll_reduced_lattice()
else:
raise ValueError("Invalid reduction algo : {}"
.format(reduction_algo))
if reduced_latt != self.lattice:
return self.__class__(reduced_latt, self.species_and_occu,
self.cart_coords,
coords_are_cartesian=True, to_unit_cell=True,
site_properties=self.site_properties, charge=self._charge)
else:
return self.copy()
def copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
if (not site_properties) and (not sanitize):
# This is not really a good solution, but if we are not changing
# the site_properties or sanitizing, initializing an empty
# structure and setting _sites to be sites is much faster (~100x)
# than doing the full initialization.
s_copy = self.__class__(lattice=self._lattice, species=[],
charge=self._charge, coords=[])
s_copy._sites = list(self._sites)
return s_copy
props = self.site_properties
if site_properties:
props.update(site_properties)
if not sanitize:
return self.__class__(self._lattice,
self.species_and_occu,
self.frac_coords,
charge=self._charge,
site_properties=props)
else:
reduced_latt = self._lattice.get_lll_reduced_lattice()
new_sites = []
for i, site in enumerate(self):
frac_coords = reduced_latt.get_fractional_coords(site.coords)
site_props = {}
for p in props:
site_props[p] = props[p][i]
new_sites.append(PeriodicSite(site.species_and_occu,
frac_coords, reduced_latt,
to_unit_cell=True,
properties=site_props))
new_sites = sorted(new_sites)
return self.__class__.from_sites(new_sites, charge=self._charge)
def interpolate(self, end_structure, nimages=10,
interpolate_lattices=False, pbc=True, autosort_tol=0):
"""
Interpolate between this structure and end_structure. Useful for
construction of NEB inputs.
Args:
end_structure (Structure): structure to interpolate between this
structure and end.
nimages (int): No. of interpolation images. Defaults to 10 images.
interpolate_lattices (bool): Whether to interpolate the lattices.
Interpolates the lengths and angles (rather than the matrix)
so orientation may be affected.
pbc (bool): Whether to use periodic boundary conditions to find
the shortest path between endpoints.
autosort_tol (float): A distance tolerance in angstrom in
which to automatically sort end_structure to match to the
closest points in this particular structure. This is usually
what you want in a NEB calculation. 0 implies no sorting.
Otherwise, a 0.5 value usually works pretty well.
Returns:
List of interpolated structures. The starting and ending
structures included as the first and last structures respectively.
A total of (nimages + 1) structures are returned.
"""
# Check length of structures
if len(self) != len(end_structure):
raise ValueError("Structures have different lengths!")
if not (interpolate_lattices or self.lattice == end_structure.lattice):
raise ValueError("Structures with different lattices!")
# Check that both structures have the same species
for i in range(len(self)):
if self[i].species_and_occu != end_structure[i].species_and_occu:
raise ValueError("Different species!\nStructure 1:\n" +
str(self) + "\nStructure 2\n" +
str(end_structure))
start_coords = np.array(self.frac_coords)
end_coords = np.array(end_structure.frac_coords)
if autosort_tol:
dist_matrix = self.lattice.get_all_distances(start_coords,
end_coords)
site_mappings = collections.defaultdict(list)
unmapped_start_ind = []
for i, row in enumerate(dist_matrix):
ind = np.where(row < autosort_tol)[0]
if len(ind) == 1:
site_mappings[i].append(ind[0])
else:
unmapped_start_ind.append(i)
if len(unmapped_start_ind) > 1:
raise ValueError("Unable to reliably match structures "
"with auto_sort_tol = %f. unmapped indices "
"= %s" % (autosort_tol, unmapped_start_ind))
sorted_end_coords = np.zeros_like(end_coords)
matched = []
for i, j in site_mappings.items():
if len(j) > 1:
raise ValueError("Unable to reliably match structures "
"with auto_sort_tol = %f. More than one "
"site match!" % autosort_tol)
sorted_end_coords[i] = end_coords[j[0]]
matched.append(j[0])
if len(unmapped_start_ind) == 1:
i = unmapped_start_ind[0]
j = list(set(range(len(start_coords))).difference(matched))[0]
sorted_end_coords[i] = end_coords[j]
end_coords = sorted_end_coords
vec = end_coords - start_coords
if pbc:
vec -= np.round(vec)
sp = self.species_and_occu
structs = []
if interpolate_lattices:
# interpolate lattice matrices using polar decomposition
from scipy.linalg import polar
# u is unitary (rotation), p is stretch
u, p = polar(np.dot(end_structure.lattice.matrix.T,
np.linalg.inv(self.lattice.matrix.T)))
lvec = p - np.identity(3)
lstart = self.lattice.matrix.T
for x in range(nimages + 1):
if interpolate_lattices:
l_a = np.dot(np.identity(3) + x / nimages * lvec, lstart).T
l = Lattice(l_a)
else:
l = self.lattice
fcoords = start_coords + x / nimages * vec
structs.append(self.__class__(l, sp, fcoords,
site_properties=self.site_properties))
return structs
def get_primitive_structure(self, tolerance=0.25, use_site_props=False,
constrain_latt=[False, False, False, False,
False, False]):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance in the primitive cell, the algorithm will reject this lattice.
Args:
tolerance (float), Angstroms: Tolerance for each coordinate of a
particular site. For example, [0.1, 0, 0.1] in cartesian
coordinates will be considered to be on the same coordinates
as [0, 0, 0] for a tolerance of 0.25. Defaults to 0.25.
use_site_props (bool): Whether to account for site properties in
differntiating sites.
constrain_latt (list of bools): Determines which lattice constant
we want to preserve (True), if any. Order of bools in the list
corresponds to [a, b, c, alpha, beta, gamme].
Returns:
The most primitive structure found.
"""
def site_label(site):
if not use_site_props:
return site.species_string
else:
d = [site.species_string]
for k in sorted(site.properties.keys()):
d.append(k + "=" + str(site.properties[k]))
return ", ".join(d)
# group sites by species string
sites = sorted(self._sites, key=site_label)
grouped_sites = [
list(a[1])
for a in itertools.groupby(sites, key=site_label)]
grouped_fcoords = [np.array([s.frac_coords for s in g])
for g in grouped_sites]
# min_vecs are approximate periodicities of the cell. The exact
# periodicities from the supercell matrices are checked against these
# first
min_fcoords = min(grouped_fcoords, key=lambda x: len(x))
min_vecs = min_fcoords - min_fcoords[0]
# fractional tolerance in the supercell
super_ftol = np.divide(tolerance, self.lattice.abc)
super_ftol_2 = super_ftol * 2
def pbc_coord_intersection(fc1, fc2, tol):
"""
Returns the fractional coords in fc1 that have coordinates
within tolerance to some coordinate in fc2
"""
d = fc1[:, None, :] - fc2[None, :, :]
d -= np.round(d)
np.abs(d, d)
return fc1[np.any(np.all(d < tol, axis=-1), axis=-1)]
# here we reduce the number of min_vecs by enforcing that every
# vector in min_vecs approximately maps each site onto a similar site.
# The subsequent processing is O(fu^3 * min_vecs) = O(n^4) if we do no
# reduction.
# This reduction is O(n^3) so usually is an improvement. Using double
# the tolerance because both vectors are approximate
for g in sorted(grouped_fcoords, key=lambda x: len(x)):
for f in g:
min_vecs = pbc_coord_intersection(min_vecs, g - f, super_ftol_2)
def get_hnf(fu):
"""
Returns all possible distinct supercell matrices given a
number of formula units in the supercell. Batches the matrices
by the values in the diagonal (for less numpy overhead).
Computational complexity is O(n^3), and difficult to improve.
Might be able to do something smart with checking combinations of a
and b first, though unlikely to reduce to O(n^2).
"""
def factors(n):
for i in range(1, n + 1):
if n % i == 0:
yield i
for det in factors(fu):
if det == 1:
continue
for a in factors(det):
for e in factors(det // a):
g = det // a // e
yield det, np.array(
[[[a, b, c], [0, e, f], [0, 0, g]]
for b, c, f in
itertools.product(range(a), range(a),
range(e))])
# we cant let sites match to their neighbors in the supercell
grouped_non_nbrs = []
for gfcoords in grouped_fcoords:
fdist = gfcoords[None, :, :] - gfcoords[:, None, :]
fdist -= np.round(fdist)
np.abs(fdist, fdist)
non_nbrs = np.any(fdist > 2 * super_ftol[None, None, :], axis=-1)
# since we want sites to match to themselves
np.fill_diagonal(non_nbrs, True)
grouped_non_nbrs.append(non_nbrs)
num_fu = six.moves.reduce(gcd, map(len, grouped_sites))
for size, ms in get_hnf(num_fu):
inv_ms = np.linalg.inv(ms)
# find sets of lattice vectors that are are present in min_vecs
dist = inv_ms[:, :, None, :] - min_vecs[None, None, :, :]
dist -= np.round(dist)
np.abs(dist, dist)
is_close = np.all(dist < super_ftol, axis=-1)
any_close = np.any(is_close, axis=-1)
inds = np.all(any_close, axis=-1)
for inv_m, m in zip(inv_ms[inds], ms[inds]):
new_m = np.dot(inv_m, self.lattice.matrix)
ftol = np.divide(tolerance, np.sqrt(np.sum(new_m ** 2, axis=1)))
valid = True
new_coords = []
new_sp = []
new_props = collections.defaultdict(list)
for gsites, gfcoords, non_nbrs in zip(grouped_sites,
grouped_fcoords,
grouped_non_nbrs):
all_frac = np.dot(gfcoords, m)
# calculate grouping of equivalent sites, represented by
# adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
close_in_prim = np.all(fdist < ftol[None, None, :], axis=-1)
groups = np.logical_and(close_in_prim, non_nbrs)
# check that groups are correct
if not np.all(np.sum(groups, axis=0) == size):
valid = False
break
# check that groups are all cliques
for g in groups:
if not np.all(groups[g][:, g]):
valid = False
break
if not valid:
break
# add the new sites, averaging positions
added = np.zeros(len(gsites))
new_fcoords = all_frac % 1
for i, group in enumerate(groups):
if not added[i]:
added[group] = True
inds = np.where(group)[0]
coords = new_fcoords[inds[0]]
for n, j in enumerate(inds[1:]):
offset = new_fcoords[j] - coords
coords += (offset - np.round(offset)) / (n + 2)
new_sp.append(gsites[inds[0]].species_and_occu)
for k in gsites[inds[0]].properties:
new_props[k].append(gsites[inds[0]].properties[k])
new_coords.append(coords)
if valid:
inv_m = np.linalg.inv(m)
new_l = Lattice(np.dot(inv_m, self.lattice.matrix))
s = Structure(new_l, new_sp, new_coords,
site_properties=new_props,
coords_are_cartesian=False)
# Default behavior
p = s.get_primitive_structure(
tolerance=tolerance, use_site_props=use_site_props,
constrain_latt=constrain_latt
).get_reduced_structure()
if not any(constrain_latt):
return p
# Only return primitive structures that
# satisfy the restriction condition
p_l, s_l = p._lattice, self._lattice
p_latt = [p_l.a, p_l.b, p_l.c, p_l.alpha, p_l.beta, p_l.gamma]
s_latt = [s_l.a, s_l.b, s_l.c, s_l.alpha, s_l.beta, s_l.gamma]
if all([p_latt[i] == s_latt[i] for i, b in enumerate(constrain_latt) if b]):
return p
return self.copy()
def __repr__(self):
outs = ["Structure Summary", repr(self.lattice)]
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
for s in self:
outs.append(repr(s))
return "\n".join(outs)
def __str__(self):
outs = ["Full Formula ({s})".format(s=self.composition.formula),
"Reduced Formula: {}"
.format(self.composition.reduced_formula)]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
if self._charge:
if self._charge >= 0:
outs.append("Overall Charge: +{}".format(self._charge))
else:
outs.append("Overall Charge: -{}".format(self._charge))
outs.append("Sites ({i})".format(i=len(self)))
data = []
props = self.site_properties
keys = sorted(props.keys())
for i, site in enumerate(self):
row = [str(i), site.species_string]
row.extend([to_s(j) for j in site.frac_coords])
for k in keys:
row.append(props[k][i])
data.append(row)
from tabulate import tabulate
outs.append(tabulate(data, headers=["#", "SP", "a", "b", "c"] + keys,
))
return "\n".join(outs)
def as_dict(self, verbosity=1, fmt=None, **kwargs):
"""
Dict representation of Structure.
Args:
verbosity (int): Verbosity level. Default of 1 includes both
direct and cartesian coordinates for all sites, lattice
parameters, etc. Useful for reading and for insertion into a
database. Set to 0 for an extremely lightweight version
that only includes sufficient information to reconstruct the
object.
fmt (str): Specifies a format for the dict. Defaults to None,
which is the default format used in pymatgen. Other options
include "abivars".
**kwargs: Allow passing of other kwargs needed for certain
formats, e.g., "abivars".
Returns:
JSON serializable dict representation.
"""
if fmt == "abivars":
"""Returns a dictionary with the ABINIT variables."""
from pymatgen.io.abinit.abiobjects import structure_to_abivars
return structure_to_abivars(self, **kwargs)
latt_dict = self._lattice.as_dict(verbosity=verbosity)
del latt_dict["@module"]
del latt_dict["@class"]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"lattice": latt_dict, "sites": []}
for site in self:
site_dict = site.as_dict(verbosity=verbosity)
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d, fmt=None):
"""
Reconstitute a Structure object from a dict representation of Structure
created using as_dict().
Args:
d (dict): Dict representation of structure.
Returns:
Structure object
"""
if fmt == "abivars":
from pymatgen.io.abinit.abiobjects import structure_from_abivars
return structure_from_abivars(cls=cls, **d)
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
charge = d.get("charge", None)
return cls.from_sites(sites, charge=charge)
def to(self, fmt=None, filename=None, **kwargs):
"""
Outputs the structure to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "cif", "poscar", "cssr", "json".
Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
Returns:
(str) if filename is None. None otherwise.
"""
from pymatgen.io.cif import CifWriter
from pymatgen.io.vasp import Poscar
from pymatgen.io.cssr import Cssr
from pymatgen.io.xcrysden import XSF
from pymatgen.io.atat import Mcsqs
filename = filename or ""
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename)
if fmt == "cif" or fnmatch(fname, "*.cif*"):
writer = CifWriter(self)
elif fmt == "mcif" or fnmatch(fname, "*.mcif*"):
writer = CifWriter(self, write_magmoms=True)
elif fmt == "poscar" or fnmatch(fname, "*POSCAR*"):
writer = Poscar(self)
elif fmt == "cssr" or fnmatch(fname.lower(), "*.cssr*"):
writer = Cssr(self)
elif fmt == "json" or fnmatch(fname.lower(), "*.json"):
s = json.dumps(self.as_dict())
if filename:
with zopen(filename, "wt") as f:
f.write("%s" % s)
return
else:
return s
elif fmt == "xsf" or fnmatch(fname.lower(), "*.xsf*"):
if filename:
with zopen(fname, "wt", encoding='utf8') as f:
s = XSF(self).to_string()
f.write(s)
return s
else:
return XSF(self).to_string()
elif fmt == 'mcsqs' or fnmatch(fname, "*rndstr.in*") \
or fnmatch(fname, "*lat.in*") \
or fnmatch(fname, "*bestsqs*"):
if filename:
with zopen(fname, "wt", encoding='ascii') as f:
s = Mcsqs(self).to_string()
f.write(s)
return
else:
return Mcsqs(self).to_string()
else:
import ruamel.yaml as yaml
if filename:
with zopen(filename, "wt") as f:
yaml.safe_dump(self.as_dict(), f)
return
else:
return yaml.safe_dump(self.as_dict())
if filename:
writer.write_file(filename)
else:
return writer.__str__()
@classmethod
def from_str(cls, input_string, fmt, primitive=False, sort=False,
merge_tol=0.0):
"""
Reads a structure from a string.
Args:
input_string (str): String to parse.
fmt (str): A format specification.
primitive (bool): Whether to find a primitive cell. Defaults to
False.
sort (bool): Whether to sort the sites in accordance to the default
ordering criteria, i.e., electronegativity.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
IStructure / Structure
"""
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp import Poscar
from pymatgen.io.cssr import Cssr
from pymatgen.io.xcrysden import XSF
from pymatgen.io.atat import Mcsqs
fmt = fmt.lower()
if fmt == "cif":
parser = CifParser.from_string(input_string)
s = parser.get_structures(primitive=primitive)[0]
elif fmt == "poscar":
s = Poscar.from_string(input_string, False,
read_velocities=False).structure
elif fmt == "cssr":
cssr = Cssr.from_string(input_string)
s = cssr.structure
elif fmt == "json":
d = json.loads(input_string)
s = Structure.from_dict(d)
elif fmt == "yaml":
import ruamel.yaml as yaml
d = yaml.safe_load(input_string)
s = Structure.from_dict(d)
elif fmt == "xsf":
s = XSF.from_string(input_string).structure
elif fmt == "mcsqs":
s = Mcsqs.structure_from_string(input_string)
else:
raise ValueError("Unrecognized format `%s`!" % fmt)
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
return cls.from_sites(s)
@classmethod
def from_file(cls, filename, primitive=False, sort=False, merge_tol=0.0):
"""
Reads a structure from a file. For example, anything ending in
a "cif" is assumed to be a Crystallographic Information Format file.
Supported formats include CIF, POSCAR/CONTCAR, CHGCAR, LOCPOT,
vasprun.xml, CSSR, Netcdf and pymatgen's JSON serialized structures.
Args:
filename (str): The filename to read from.
primitive (bool): Whether to convert to a primitive cell
Only available for cifs. Defaults to False.
sort (bool): Whether to sort sites. Default to False.
merge_tol (float): If this is some positive number, sites that
are within merge_tol from each other will be merged. Usually
0.01 should be enough to deal with common numerical issues.
Returns:
Structure.
"""
if filename.endswith(".nc"):
# Read Structure from a netcdf file.
from pymatgen.io.abinit.netcdf import structure_from_ncdata
s = structure_from_ncdata(filename, cls=cls)
if sort:
s = s.get_sorted_structure()
return s
from pymatgen.io.lmto import LMTOCtrl
from pymatgen.io.vasp import Vasprun, Chgcar
from pymatgen.io.exciting import ExcitingInput
from monty.io import zopen
fname = os.path.basename(filename)
with zopen(filename, "rt") as f:
contents = f.read()
if fnmatch(fname.lower(), "*.cif*") or fnmatch(fname.lower(), "*.mcif*"):
return cls.from_str(contents, fmt="cif",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*POSCAR*") or fnmatch(fname, "*CONTCAR*"):
s = cls.from_str(contents, fmt="poscar",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "CHGCAR*") or fnmatch(fname, "LOCPOT*"):
s = Chgcar.from_file(filename).structure
elif fnmatch(fname, "vasprun*.xml*"):
s = Vasprun(filename).final_structure
elif fnmatch(fname.lower(), "*.cssr*"):
return cls.from_str(contents, fmt="cssr",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "*.xsf"):
return cls.from_str(contents, fmt="xsf",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "input*.xml"):
return ExcitingInput.from_file(fname).structure
elif fnmatch(fname, "*rndstr.in*") \
or fnmatch(fname, "*lat.in*") \
or fnmatch(fname, "*bestsqs*"):
return cls.from_str(contents, fmt="mcsqs",
primitive=primitive, sort=sort,
merge_tol=merge_tol)
elif fnmatch(fname, "CTRL*"):
return LMTOCtrl.from_file(filename=filename).structure
else:
raise ValueError("Unrecognized file extension!")
if sort:
s = s.get_sorted_structure()
if merge_tol:
s.merge_sites(merge_tol)
s.__class__ = cls
return s
class IMolecule(SiteCollection, MSONable):
"""
Basic immutable Molecule object without periodicity. Essentially a
sequence of sites. IMolecule is made to be immutable so that they can
function as keys in a dict. For a mutable molecule,
use the :class:Molecule.
Molecule extends Sequence and Hashable, which means that in many cases,
it can be used like any Python sequence. Iterating through a molecule is
equivalent to going through the sites in sequence.
"""
def __init__(self, species, coords, charge=0,
spin_multiplicity=None, validate_proximity=False,
site_properties=None):
"""
Creates a Molecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Specie, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
if len(species) != len(coords):
raise StructureError(("The list of atomic species must be of the",
" same length as the list of fractional ",
"coordinates."))
sites = []
for i in range(len(species)):
prop = None
if site_properties:
prop = {k: v[i] for k, v in site_properties.items()}
sites.append(Site(species[i], coords[i], properties=prop))
self._sites = tuple(sites)
if validate_proximity and not self.is_valid():
raise StructureError(("Molecule contains sites that are ",
"less than 0.01 Angstrom apart!"))
self._charge = charge
nelectrons = 0
for site in sites:
for sp, amt in site.species_and_occu.items():
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of %d and spin multiplicity of %d is"
" not possible for this molecule" %
(self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
@property
def charge(self):
"""
Charge of molecule
"""
return self._charge
@property
def spin_multiplicity(self):
"""
Spin multiplicity of molecule.
"""
return self._spin_multiplicity
@property
def nelectrons(self):
"""
Number of electrons in the molecule.
"""
return self._nelectrons
@property
def center_of_mass(self):
"""
Center of mass of molecule.
"""
center = np.zeros(3)
total_weight = 0
for site in self:
wt = site.species_and_occu.weight
center += site.coords * wt
total_weight += wt
return center / total_weight
@property
def sites(self):
"""
Returns a tuple of sites in the Molecule.
"""
return self._sites
@classmethod
def from_sites(cls, sites, charge=0, spin_multiplicity=None,
validate_proximity=False):
"""
Convenience constructor to make a Molecule from a list of sites.
Args:
sites ([Site]): Sequence of Sites.
charge (int): Charge of molecule. Defaults to 0.
spin_multiplicity (int): Spin multicipity. Defaults to None,
in which it is determined automatically.
validate_proximity (bool): Whether to check that atoms are too
close.
"""
props = collections.defaultdict(list)
for site in sites:
for k, v in site.properties.items():
props[k].append(v)
return cls([site.species_and_occu for site in sites],
[site.coords for site in sites],
charge=charge, spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=props)
def break_bond(self, ind1, ind2, tol=0.2):
"""
Returns two molecules based on breaking the bond between atoms at index
ind1 and ind2.
Args:
ind1 (int): Index of first site.
ind2 (int): Index of second site.
tol (float): Relative tolerance to test. Basically, the code
checks if the distance between the sites is less than (1 +
tol) * typical bond distances. Defaults to 0.2, i.e.,
20% longer.
Returns:
Two Molecule objects representing the two clusters formed from
breaking the bond.
"""
sites = self._sites
clusters = [[sites[ind1]], [sites[ind2]]]
sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)]
def belongs_to_cluster(site, cluster):
for test_site in cluster:
if CovalentBond.is_bonded(site, test_site, tol=tol):
return True
return False
while len(sites) > 0:
unmatched = []
for site in sites:
for cluster in clusters:
if belongs_to_cluster(site, cluster):
cluster.append(site)
break
else:
unmatched.append(site)
if len(unmatched) == len(sites):
raise ValueError("Not all sites are matched!")
sites = unmatched
return (self.__class__.from_sites(cluster)
for cluster in clusters)
def get_covalent_bonds(self, tol=0.2):
"""
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
"""
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds
def __eq__(self, other):
if other is None:
return False
if len(self) != len(other):
return False
if self.charge != other.charge:
return False
if self.spin_multiplicity != other.spin_multiplicity:
return False
for site in self:
if site not in other:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
# For now, just use the composition hash code.
return self.composition.__hash__()
def __repr__(self):
outs = ["Molecule Summary"]
for s in self:
outs.append(s.__repr__())
return "\n".join(outs)
def __str__(self):
outs = ["Full Formula (%s)" % self.composition.formula,
"Reduced Formula: " + self.composition.reduced_formula,
"Charge = %s, Spin Mult = %s" % (
self._charge, self._spin_multiplicity),
"Sites (%d)" % len(self)]
for i, site in enumerate(self):
outs.append(" ".join([str(i), site.species_string,
" ".join([("%0.6f" % j).rjust(12)
for j in site.coords])]))
return "\n".join(outs)
def as_dict(self):
"""
Json-serializable dict representation of Molecule
"""
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"charge": self._charge,
"spin_multiplicity": self._spin_multiplicity,
"sites": []}
for site in self:
site_dict = site.as_dict()
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a Molecule object from a dict representation created using
as_dict().
Args:
d (dict): dict representation of Molecule.
Returns:
Molecule object
"""
species = []
coords = []
props = collections.defaultdict(list)
for site_dict in d["sites"]:
species.append({Specie(sp["element"], sp["oxidation_state"])
if "oxidation_state" in sp else
Element(sp["element"]): sp["occu"]
for sp in site_dict["species"]})
coords.append(site_dict["xyz"])
siteprops = site_dict.get("properties", {})
for k, v in siteprops.items():
props[k].append(v)
return cls(species, coords, charge=d.get("charge", 0),
spin_multiplicity=d.get("spin_multiplicity"),
site_properties=props)
def get_distance(self, i, j):
"""
Get distance between site i and j.
Args:
i (int): Index of first site
j (int): Index of second site
Returns:
Distance between the two sites.
"""
return self[i].distance(self[j])
def get_sites_in_sphere(self, pt, r):
"""
Find all sites within a sphere from a point.
Args:
pt (3x1 array): Cartesian coordinates of center of sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
neighbors = []
for site in self._sites:
dist = site.distance_from_point(pt)
if dist <= r:
neighbors.append((site, dist))
return neighbors
def get_neighbors(self, site, r):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Site at the center of the sphere.
r (float): Radius of sphere.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
nn = self.get_sites_in_sphere(site.coords, r)
return [(s, dist) for (s, dist) in nn if site != s]
def get_neighbors_in_shell(self, origin, r, dr):
"""
Returns all sites in a shell centered on origin (coords) between radii
r-dr and r+dr.
Args:
origin (3x1 array): Cartesian coordinates of center of sphere.
r (float): Inner radius of shell.
dr (float): Width of shell.
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
"""
outer = self.get_sites_in_sphere(origin, r + dr)
inner = r - dr
return [(site, dist) for (site, dist) in outer if dist > inner]
def get_boxed_structure(self, a, b, c, images=(1, 1, 1),
random_rotation=False, min_dist=1, cls=None,
offset=None, no_cross=False):
"""
Creates a Structure from a Molecule by putting the Molecule in the
center of a orthorhombic box. Useful for creating Structure for
calculating molecules using periodic codes.
Args:
a (float): a-lattice parameter.
b (float): b-lattice parameter.
c (float): c-lattice parameter.
images: No. of boxed images in each direction. Defaults to
(1, 1, 1), meaning single molecule with 1 lattice parameter
in each direction.
random_rotation (bool): Whether to apply a random rotation to
each molecule. This jumbles all the molecules so that they
are not exact images of each other.
min_dist (float): The minimum distance that atoms should be from
each other. This is only used if random_rotation is True.
The randomized rotations are searched such that no two atoms
are less than min_dist from each other.
cls: The Structure class to instantiate (defaults to pymatgen
structure)
offset: Translation to offset molecule from center of mass coords
no_cross: Whether to forbid molecule coords from extending beyond
boundary of box.
Returns:
Structure containing molecule in a box.
"""
if offset is None:
offset = np.array([0, 0, 0])
coords = np.array(self.cart_coords)
x_range = max(coords[:, 0]) - min(coords[:, 0])
y_range = max(coords[:, 1]) - min(coords[:, 1])
z_range = max(coords[:, 2]) - min(coords[:, 2])
if a <= x_range or b <= y_range or c <= z_range:
raise ValueError("Box is not big enough to contain Molecule.")
lattice = Lattice.from_parameters(a * images[0], b * images[1],
c * images[2],
90, 90, 90)
nimages = images[0] * images[1] * images[2]
coords = []
centered_coords = self.cart_coords - self.center_of_mass + offset
for i, j, k in itertools.product(list(range(images[0])),
list(range(images[1])),
list(range(images[2]))):
box_center = [(i + 0.5) * a, (j + 0.5) * b, (k + 0.5) * c]
if random_rotation:
while True:
op = SymmOp.from_origin_axis_angle(
(0, 0, 0), axis=np.random.rand(3),
angle=random.uniform(-180, 180))
m = op.rotation_matrix
new_coords = np.dot(m, centered_coords.T).T + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
if len(coords) == 0:
break
distances = lattice.get_all_distances(
lattice.get_fractional_coords(new_coords),
lattice.get_fractional_coords(coords))
if np.amin(distances) > min_dist:
break
else:
new_coords = centered_coords + box_center
if no_cross:
x_max, x_min = max(new_coords[:, 0]), min(new_coords[:, 0])
y_max, y_min = max(new_coords[:, 1]), min(new_coords[:, 1])
z_max, z_min = max(new_coords[:, 2]), min(new_coords[:, 2])
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0:
raise ValueError("Molecule crosses boundary of box.")
coords.extend(new_coords)
sprops = {k: v * nimages for k, v in self.site_properties.items()}
if cls is None:
cls = Structure
return cls(lattice, self.species * nimages, coords,
coords_are_cartesian=True,
site_properties=sprops).get_sorted_structure()
def get_centered_molecule(self):
"""
Returns a Molecule centered at the center of mass.
Returns:
Molecule centered with center of mass at origin.
"""
center = self.center_of_mass
new_coords = np.array(self.cart_coords) - center
return self.__class__(self.species_and_occu, new_coords,
charge=self._charge,
spin_multiplicity=self._spin_multiplicity,
site_properties=self.site_properties)
def to(self, fmt=None, filename=None):
"""
Outputs the molecule to a file or string.
Args:
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
filename (str): If provided, output will be written to a file. If
fmt is not specified, the format is determined from the
filename. Defaults is None, i.e. string output.
Returns:
(str) if filename is None. None otherwise.
"""
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
from pymatgen.io.babel import BabelMolAdaptor
fmt = "" if fmt is None else fmt.lower()
fname = os.path.basename(filename or "")
if fmt == "xyz" or fnmatch(fname.lower(), "*.xyz*"):
writer = XYZ(self)
elif any([fmt == r or fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
writer = GaussianInput(self)
elif fmt == "json" or fnmatch(fname, "*.json*") or fnmatch(fname,
"*.mson*"):
if filename:
with zopen(filename, "wt", encoding='utf8') as f:
return json.dump(self.as_dict(), f)
else:
return json.dumps(self.as_dict())
elif fmt == "yaml" or fnmatch(fname, "*.yaml*"):
import ruamel.yaml as yaml
if filename:
with zopen(fname, "wt", encoding='utf8') as f:
return yaml.safe_dump(self.as_dict(), f)
else:
return yaml.safe_dump(self.as_dict())
else:
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
fname.lower())
if (not fmt) and m:
fmt = m.group(1)
writer = BabelMolAdaptor(self)
return writer.write_file(filename, file_format=fmt)
if filename:
writer.write_file(filename)
else:
return str(writer)
@classmethod
def from_str(cls, input_string, fmt):
"""
Reads the molecule from a string.
Args:
input_string (str): String to parse.
fmt (str): Format to output to. Defaults to JSON unless filename
is provided. If fmt is specifies, it overrides whatever the
filename is. Options include "xyz", "gjf", "g03", "json". If
you have OpenBabel installed, any of the formats supported by
OpenBabel. Non-case sensitive.
Returns:
IMolecule or Molecule.
"""
from pymatgen.io.xyz import XYZ
from pymatgen.io.gaussian import GaussianInput
if fmt.lower() == "xyz":
m = XYZ.from_string(input_string).molecule
elif fmt in ["gjf", "g03", "g09", "com", "inp"]:
m = GaussianInput.from_string(input_string).molecule
elif fmt == "json":
d = json.loads(input_string)
return cls.from_dict(d)
elif fmt == "yaml":
import ruamel.yaml as yaml
d = yaml.safe_load(input_string)
return cls.from_dict(d)
else:
from pymatgen.io.babel import BabelMolAdaptor
m = BabelMolAdaptor.from_string(input_string,
file_format=fmt).pymatgen_mol
return cls.from_sites(m)
@classmethod
def from_file(cls, filename):
"""
Reads a molecule from a file. Supported formats include xyz,
gaussian input (gjf|g03|g09|com|inp), Gaussian output (.out|and
pymatgen's JSON serialized molecules. Using openbabel,
many more extensions are supported but requires openbabel to be
installed.
Args:
filename (str): The filename to read from.
Returns:
Molecule
"""
from pymatgen.io.gaussian import GaussianOutput
with zopen(filename) as f:
contents = f.read()
fname = filename.lower()
if fnmatch(fname, "*.xyz*"):
return cls.from_str(contents, fmt="xyz")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["gjf", "g03", "g09", "com", "inp"]]):
return cls.from_str(contents, fmt="g09")
elif any([fnmatch(fname.lower(), "*.{}*".format(r))
for r in ["out", "lis", "log"]]):
return GaussianOutput(filename).final_structure
elif fnmatch(fname, "*.json*") or fnmatch(fname, "*.mson*"):
return cls.from_str(contents, fmt="json")
elif fnmatch(fname, "*.yaml*"):
return cls.from_str(contents, fmt="yaml")
else:
from pymatgen.io.babel import BabelMolAdaptor
m = re.search(r"\.(pdb|mol|mdl|sdf|sd|ml2|sy2|mol2|cml|mrv)",
filename.lower())
if m:
new = BabelMolAdaptor.from_file(filename,
m.group(1)).pymatgen_mol
new.__class__ = cls
return new
raise ValueError("Unrecognized file extension!")
def extract_cluster(self, target_sites, **kwargs):
"""
Extracts a cluster of atoms from a molecule based on bond lengths
Args:
target_sites ([Site]): List of initial sites to nucleate cluster.
\\*\\*kwargs: kwargs passed through to CovalentBond.is_bonded.
Returns:
(Molecule) Cluster of atoms.
"""
cluster = list(target_sites)
others = [site for site in self if site not in cluster]
size = 0
while len(cluster) > size:
size = len(cluster)
new_others = []
for site in others:
for site2 in cluster:
if CovalentBond.is_bonded(site, site2, **kwargs):
cluster.append(site)
break
else:
new_others.append(site)
others = new_others
return Molecule.from_sites(cluster)
class Structure(IStructure, collections.MutableSequence):
"""
Mutable version of structure.
"""
__hash__ = None
def __init__(self, lattice, species, coords, charge=None, validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None):
"""
Create a periodic structure.
Args:
lattice: The lattice, either as a pymatgen.core.lattice.Lattice or
simply as any 2D array. Each row should correspond to a lattice
vector. E.g., [[10,0,0], [20,10,0], [0,0,30]] specifies a
lattice with lattice vectors [10,0,0], [20,10,0] and [0,0,30].
species: List of species on each site. Can take in flexible input,
including:
i. A sequence of element / specie specified either as string
symbols, e.g. ["Li", "Fe2+", "P", ...] or atomic numbers,
e.g., (3, 56, ...) or actual Element or Specie objects.
ii. List of dict of elements/species and occupancies, e.g.,
[{"Fe" : 0.5, "Mn":0.5}, ...]. This allows the setup of
disordered structures.
fractional_coords: list of fractional coordinates of each species.
validate_proximity (bool): Whether to check if there are sites
that are less than 0.01 Ang apart. Defaults to False.
coords_are_cartesian (bool): Set to True if you are providing
coordinates in cartesian coordinates. Defaults to False.
site_properties (dict): Properties associated with the sites as a
dict of sequences, e.g., {"magmom":[5,5,5,5]}. The sequences
have to be the same length as the atomic species and
fractional_coords. Defaults to None for no properties.
"""
super(Structure, self).__init__(
lattice, species, coords, charge=charge,
validate_proximity=validate_proximity, to_unit_cell=to_unit_cell,
coords_are_cartesian=coords_are_cartesian,
site_properties=site_properties)
self._sites = list(self._sites)
def __setitem__(self, i, site):
"""
Modify a site in the structure.
Args:
i (int, [int], slice, Specie-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Specie/Sequence): Three options exist. You
can provide a PeriodicSite directly (lattice will be
checked). Or more conveniently, you can provide a
specie-like object or a tuple of up to length 3.
Examples:
s[0] = "Fe"
s[0] = Element("Fe")
both replaces the species only.
s[0] = "Fe", [0.5, 0.5, 0.5]
Replaces site and *fractional* coordinates. Any properties
are inherited from current site.
s[0] = "Fe", [0.5, 0.5, 0.5], {"spin": 2}
Replaces site and *fractional* coordinates and properties.
s[(0, 2, 3)] = "Fe"
Replaces sites 0, 2 and 3 with Fe.
s[0::2] = "Fe"
Replaces all even index sites with Fe.
s["Mn"] = "Fe"
Replaces all Mn in the structure with Fe. This is
a short form for the more complex replace_species.
s["Mn"] = "Fe0.5Co0.5"
Replaces all Mn in the structure with Fe: 0.5, Co: 0.5, i.e.,
creates a disordered structure!
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, six.string_types + (Element, Specie)):
self.replace_species({i: site})
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites)
if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, PeriodicSite):
if site.lattice != self._lattice:
raise ValueError("PeriodicSite added must have same lattice "
"as Structure!")
elif len(indices) != 1:
raise ValueError("Site assignments makes sense only for "
"single int indices!")
self._sites[ii] = site
else:
if isinstance(site, six.string_types) or (
not isinstance(site, collections.Sequence)):
sp = site
frac_coords = self._sites[ii].frac_coords
properties = self._sites[ii].properties
else:
sp = site[0]
frac_coords = site[1] if len(site) > 1 else \
self._sites[ii].frac_coords
properties = site[2] if len(site) > 2 else \
self._sites[ii].properties
self._sites[ii] = PeriodicSite(sp, frac_coords, self._lattice,
properties=properties)
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
def append(self, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Append a site to the structure.
Args:
species: Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties of the site.
Returns:
New structure with inserted site.
"""
return self.insert(len(self), species, coords,
coords_are_cartesian=coords_are_cartesian,
validate_proximity=validate_proximity,
properties=properties)
def insert(self, i, species, coords, coords_are_cartesian=False,
validate_proximity=False, properties=None):
"""
Insert a site to the structure.
Args:
i (int): Index to insert site
species (species-like): Species of inserted site
coords (3x1 array): Coordinates of inserted site
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to False.
properties (dict): Properties associated with the site.
Returns:
New structure with inserted site.
"""
if not coords_are_cartesian:
new_site = PeriodicSite(species, coords, self._lattice,
properties=properties)
else:
frac_coords = self._lattice.get_fractional_coords(coords)
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def add_site_property(self, property_name, values):
"""
Adds a property to all sites.
Args:
property_name (str): The name of the property to add.
values: A sequence of values. Must be same length as number of
sites.
"""
if len(values) != len(self._sites):
raise ValueError("Values must be same length as sites.")
for i in range(len(self._sites)):
site = self._sites[i]
props = site.properties
if not props:
props = {}
props[property_name] = values[i]
self._sites[i] = PeriodicSite(site.species_and_occu,
site.frac_coords, self._lattice,
properties=props)
def remove_site_property(self, property_name):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
values (list): A sequence of values. Must be same length as
number of sites.
"""
for i in range(len(self._sites)):
site = self._sites[i]
props = {k: v
for k, v in site.properties.items()
if k != property_name}
self._sites[i] = PeriodicSite(site.species_and_occu,
site.frac_coords, self._lattice,
properties=props)
def replace_species(self, species_mapping):
"""
Swap species in a structure.
Args:
species_mapping (dict): Dict of species to swap. Species can be
elements too. e.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C. You can
also supply strings that represent elements or species and
the code will try to figure out the meaning. E.g.,
{"C": "C0.5Si0.5"} will replace all C with 0.5 C and 0.5 Si,
i.e., a disordered site.
"""
latt = self._lattice
species_mapping = {get_el_sp(k): v
for k, v in species_mapping.items()}
sp_to_replace = set(species_mapping.keys())
sp_in_structure = set(self.composition.keys())
if not sp_in_structure.issuperset(sp_to_replace):
warnings.warn("Some species to be substituted are not present in "
"structure. Pls check your input. Species to be "
"substituted = %s; Species in structure = %s"
% (sp_to_replace, sp_in_structure))
def mod_site(site):
if sp_to_replace.intersection(site.species_and_occu):
c = Composition()
for sp, amt in site.species_and_occu.items():
new_sp = species_mapping.get(sp, sp)
try:
c += Composition(new_sp) * amt
except Exception:
c += {new_sp: amt}
return PeriodicSite(c, site.frac_coords, latt,
properties=site.properties)
return site
self._sites = [mod_site(site) for site in self._sites]
def replace(self, i, species, coords=None, coords_are_cartesian=False,
properties=None):
"""
Replace a single site. Takes either a species or a dict of species and
occupations.
Args:
i (int): Index of the site in the _sites list.
species (species-like): Species of replacement site
coords (3x1 array): Coordinates of replacement site. If None,
the current coordinates are assumed.
coords_are_cartesian (bool): Whether coordinates are cartesian.
Defaults to False.
properties (dict): Properties associated with the site.
"""
if coords is None:
frac_coords = self[i].frac_coords
elif coords_are_cartesian:
frac_coords = self._lattice.get_fractional_coords(coords)
else:
frac_coords = coords
new_site = PeriodicSite(species, frac_coords, self._lattice,
properties=properties)
self._sites[i] = new_site
def remove_species(self, species):
"""
Remove all occurrences of several species from a structure.
Args:
species: Sequence of species to remove, e.g., ["Li", "Na"].
"""
new_sites = []
species = [get_el_sp(s) for s in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species_and_occu.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(PeriodicSite(
new_sp_occu, site.frac_coords, self._lattice,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [s for i, s in enumerate(self._sites)
if i not in indices]
def apply_operation(self, symmop, fractional=False):
"""
Apply a symmetry operation to the structure and return the new
structure. The lattice is operated by the rotation matrix only.
Coords are operated in full and then transformed to the new lattice.
Args:
symmop (SymmOp): Symmetry operation to apply.
fractional (bool): Whether the symmetry operation is applied in
fractional space. Defaults to False, i.e., symmetry operation
is applied in cartesian coordinates.
"""
if not fractional:
self._lattice = Lattice([symmop.apply_rotation_only(row)
for row in self._lattice.matrix])
def operate_site(site):
new_cart = symmop.operate(site.coords)
new_frac = self._lattice.get_fractional_coords(new_cart)
return PeriodicSite(site.species_and_occu, new_frac,
self._lattice,
properties=site.properties)
else:
new_latt = np.dot(symmop.rotation_matrix, self._lattice.matrix)
self._lattice = Lattice(new_latt)
def operate_site(site):
return PeriodicSite(site.species_and_occu,
symmop.operate(site.frac_coords),
self._lattice,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
def modify_lattice(self, new_lattice):
"""
Modify the lattice of the structure. Mainly used for changing the
basis.
Args:
new_lattice (Lattice): New lattice
"""
self._lattice = new_lattice
new_sites = []
for site in self._sites:
new_sites.append(PeriodicSite(site.species_and_occu,
site.frac_coords,
self._lattice,
properties=site.properties))
self._sites = new_sites
def apply_strain(self, strain):
"""
Apply a strain to the lattice.
Args:
strain (float or list): Amount of strain to apply. Can be a float,
or a sequence of 3 numbers. E.g., 0.01 means all lattice
vectors are increased by 1%. This is equivalent to calling
modify_lattice with a lattice with lattice parameters that
are 1% larger.
"""
s = (1 + np.array(strain)) * np.eye(3)
self.modify_lattice(Lattice(np.dot(self._lattice.matrix.T, s).T))
def sort(self, key=None, reverse=False):
"""
Sort a structure in place. The parameters have the same meaning as in
list.sort. By default, sites are sorted by the electronegativity of
the species. The difference between this method and
get_sorted_structure (which also works in IStructure) is that the
latter returns a new Structure, while this just sorts the Structure
in place.
Args:
key: Specifies a function of one argument that is used to extract
a comparison key from each list element: key=str.lower. The
default value is None (compare the elements directly).
reverse (bool): If set to True, then the list elements are sorted
as if each comparison were reversed.
"""
self._sites = sorted(self._sites, key=key, reverse=reverse)
def translate_sites(self, indices, vector, frac_coords=True,
to_unit_cell=True):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices: Integer or List of site indices on which to perform the
translation.
vector: Translation vector for sites.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
if not isinstance(indices, collections.Iterable):
indices = [indices]
for i in indices:
site = self._sites[i]
if frac_coords:
fcoords = site.frac_coords + vector
else:
fcoords = self._lattice.get_fractional_coords(
site.coords + vector)
new_site = PeriodicSite(site.species_and_occu, fcoords,
self._lattice, to_unit_cell=to_unit_cell,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None,
to_unit_cell=True):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
to_unit_cell (bool): Whether new sites are transformed to unit
cell
"""
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
s = ((rm * np.matrix(site.coords - anchor).T).T + anchor).A1
new_site = PeriodicSite(
site.species_and_occu, s, self._lattice,
to_unit_cell=to_unit_cell, coords_are_cartesian=True,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec(), frac_coords=False)
def add_oxidation_state_by_element(self, oxidation_states):
"""
Add oxidation states to a structure.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except KeyError:
raise ValueError("Oxidation state of all elements must be "
"specified in the dictionary.")
def add_oxidation_state_by_site(self, oxidation_states):
"""
Add oxidation states to a structure by site.
Args:
oxidation_states (list): List of oxidation states.
E.g., [1, 1, 1, 1, 2, 2, 2, 2, 5, 5, 5, 5, -2, -2, -2, -2]
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[i])] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except IndexError:
raise ValueError("Oxidation state of all sites must be "
"specified in the dictionary.")
def remove_oxidation_states(self):
"""
Removes oxidation states from a structure.
"""
for i, site in enumerate(self._sites):
new_sp = collections.defaultdict(float)
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Element(sym)] += occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def add_oxidation_state_by_guess(self, **kwargs):
"""
Decorates the structure with oxidation state, guessing
using Composition.oxi_state_guesses()
Args:
**kwargs: parameters to pass into oxi_state_guesses()
"""
oxid_guess = self.composition.oxi_state_guesses(**kwargs)
oxid_guess = oxid_guess or \
[dict([(e.symbol, 0) for e in self.composition])]
self.add_oxidation_state_by_element(oxid_guess[0])
def add_spin_by_element(self, spins):
"""
Add spin states to a structure.
Args:
spisn (dict): Dict of spins associated with
elements or species, e.g. {"Ni":+5} or {"Ni2+":5}
"""
for i, site in enumerate(self._sites):
new_sp = {}
for sp, occu in site.species_and_occu.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sym, oxidation_state=oxi_state,
properties={'spin': spins.get(str(sp), spins.get(sym, None))})] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def add_spin_by_site(self, spins):
"""
Add spin states to a structure by site.
Args:
spins (list): List of spins
E.g., [+5, -5, 0, 0]
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for sp, occu in site.species_and_occu.items():
sym = sp.symbol
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sym, oxidation_state=oxi_state,
properties={'spin': spins[i]})] = occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
except IndexError:
raise ValueError("Spin of all sites must be "
"specified in the dictionary.")
def remove_spin(self):
"""
Removes spin states from a structure.
"""
for i, site in enumerate(self._sites):
new_sp = collections.defaultdict(float)
for sp, occu in site.species_and_occu.items():
oxi_state = getattr(sp, "oxi_state", None)
new_sp[Specie(sp.symbol, oxidation_state=oxi_state)] += occu
new_site = PeriodicSite(new_sp, site.frac_coords,
self._lattice,
coords_are_cartesian=False,
properties=site.properties)
self._sites[i] = new_site
def make_supercell(self, scaling_matrix, to_unit_cell=True):
"""
Create a supercell.
Args:
scaling_matrix: A scaling matrix for transforming the lattice
vectors. Has to be all integers. Several options are possible:
a. A full 3x3 scaling matrix defining the linear combination
the old lattice vectors. E.g., [[2,1,0],[0,3,0],[0,0,
1]] generates a new structure with lattice vectors a' =
2a + b, b' = 3b, c' = c where a, b, and c are the lattice
vectors of the original structure.
b. An sequence of three scaling factors. E.g., [2, 1, 1]
specifies that the supercell should have dimensions 2a x b x
c.
c. A number, which simply scales all lattice vectors by the
same factor.
to_unit_cell: Whether or not to fall back sites into the unit cell
"""
s = self*scaling_matrix
if to_unit_cell:
for isite, site in enumerate(s):
s[isite] = site.to_unit_cell
self._sites = s.sites
self._lattice = s.lattice
def scale_lattice(self, volume):
"""
Performs a scaling of the lattice vectors so that length proportions
and angles are preserved.
Args:
volume (float): New volume of the unit cell in A^3.
"""
self.modify_lattice(self._lattice.scale(volume))
def merge_sites(self, tol=0.01, mode="sum"):
"""
Merges sites (adding occupancies) within tol of each other.
Removes site properties.
Args:
tol (float): Tolerance for distance to merge sites.
mode (str): Two modes supported. "delete" means duplicate sites are
deleted. "sum" means the occupancies are summed for the sites.
Only first letter is considered.
"""
mode = mode.lower()[0]
from scipy.spatial.distance import squareform
from scipy.cluster.hierarchy import fcluster, linkage
d = self.distance_matrix
np.fill_diagonal(d, 0)
clusters = fcluster(linkage(squareform((d + d.T) / 2)),
tol, 'distance')
sites = []
for c in np.unique(clusters):
inds = np.where(clusters == c)[0]
species = self[inds[0]].species_and_occu
coords = self[inds[0]].frac_coords
for n, i in enumerate(inds[1:]):
sp = self[i].species_and_occu
if mode == "s":
species += sp
offset = self[i].frac_coords - coords
coords += ((offset - np.round(offset)) / (n + 2)).astype(
coords.dtype)
sites.append(PeriodicSite(species, coords, self.lattice))
self._sites = sites
class Molecule(IMolecule, collections.MutableSequence):
"""
Mutable Molecule. It has all the methods in IMolecule, but in addition,
it allows a user to perform edits on the molecule.
"""
__hash__ = None
def __init__(self, species, coords, charge=0,
spin_multiplicity=None, validate_proximity=False,
site_properties=None):
"""
Creates a MutableMolecule.
Args:
species: list of atomic species. Possible kinds of input include a
list of dict of elements/species and occupancies, a List of
elements/specie specified as actual Element/Specie, Strings
("Fe", "Fe2+") or atomic numbers (1,56).
coords (3x1 array): list of cartesian coordinates of each species.
charge (float): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
validate_proximity (bool): Whether to check if there are sites
that are less than 1 Ang apart. Defaults to False.
site_properties (dict): Properties associated with the sites as
a dict of sequences, e.g., {"magmom":[5,5,5,5]}. The
sequences have to be the same length as the atomic species
and fractional_coords. Defaults to None for no properties.
"""
super(Molecule, self).__init__(species, coords, charge=charge,
spin_multiplicity=spin_multiplicity,
validate_proximity=validate_proximity,
site_properties=site_properties)
self._sites = list(self._sites)
def __setitem__(self, i, site):
"""
Modify a site in the molecule.
Args:
i (int, [int], slice, Specie-like): Indices to change. You can
specify these as an int, a list of int, or a species-like
string.
site (PeriodicSite/Specie/Sequence): Three options exist. You can
provide a Site directly, or for convenience, you can provide
simply a Specie-like string/object, or finally a (Specie,
coords) sequence, e.g., ("Fe", [0.5, 0.5, 0.5]).
"""
if isinstance(i, int):
indices = [i]
elif isinstance(i, six.string_types + (Element, Specie)):
self.replace_species({i: site})
return
elif isinstance(i, slice):
to_mod = self[i]
indices = [ii for ii, s in enumerate(self._sites)
if s in to_mod]
else:
indices = list(i)
for ii in indices:
if isinstance(site, Site):
self._sites[ii] = site
else:
if isinstance(site, six.string_types) or (
not isinstance(site, collections.Sequence)):
sp = site
coords = self._sites[ii].coords
properties = self._sites[ii].properties
else:
sp = site[0]
coords = site[1] if len(site) > 1 else self._sites[
ii].coords
properties = site[2] if len(site) > 2 else self._sites[ii] \
.properties
self._sites[ii] = Site(sp, coords, properties=properties)
def __delitem__(self, i):
"""
Deletes a site from the Structure.
"""
self._sites.__delitem__(i)
def append(self, species, coords, validate_proximity=True, properties=None):
"""
Appends a site to the molecule.
Args:
species: Species of inserted site
coords: Coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): A dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
return self.insert(len(self), species, coords,
validate_proximity=validate_proximity,
properties=properties)
def set_charge_and_spin(self, charge, spin_multiplicity=None):
"""
Set the charge and spin multiplicity.
Args:
charge (int): Charge for the molecule. Defaults to 0.
spin_multiplicity (int): Spin multiplicity for molecule.
Defaults to None, which means that the spin multiplicity is
set to 1 if the molecule has no unpaired electrons and to 2
if there are unpaired electrons.
"""
self._charge = charge
nelectrons = 0
for site in self._sites:
for sp, amt in site.species_and_occu.items():
if not isinstance(sp, DummySpecie):
nelectrons += sp.Z * amt
nelectrons -= charge
self._nelectrons = nelectrons
if spin_multiplicity:
if (nelectrons + spin_multiplicity) % 2 != 1:
raise ValueError(
"Charge of {} and spin multiplicity of {} is"
" not possible for this molecule".format(
self._charge, spin_multiplicity))
self._spin_multiplicity = spin_multiplicity
else:
self._spin_multiplicity = 1 if nelectrons % 2 == 0 else 2
def add_oxidation_state_by_element(self, oxidation_states):
"""
Add oxidation states to a structure.
Args:
oxidation_states (dict): Dict of oxidation states.
E.g., {"Li":1, "Fe":2, "P":5, "O":-2}
"""
try:
for i, site in enumerate(self._sites):
new_sp = {}
for el, occu in site.species_and_occu.items():
sym = el.symbol
new_sp[Specie(sym, oxidation_states[sym])] = occu
new_site = Site(new_sp, site.coords,
properties=site.properties)
self._sites[i] = new_site
except KeyError:
raise ValueError("Oxidation state of all elements must be "
"specified in the dictionary.")
def insert(self, i, species, coords, validate_proximity=False,
properties=None):
"""
Insert a site to the molecule.
Args:
i (int): Index to insert site
species: species of inserted site
coords (3x1 array): coordinates of inserted site
validate_proximity (bool): Whether to check if inserted site is
too close to an existing site. Defaults to True.
properties (dict): Dict of properties for the Site.
Returns:
New molecule with inserted site.
"""
new_site = Site(species, coords, properties=properties)
if validate_proximity:
for site in self:
if site.distance(new_site) < self.DISTANCE_TOLERANCE:
raise ValueError("New site is too close to an existing "
"site!")
self._sites.insert(i, new_site)
def add_site_property(self, property_name, values):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
values (list): A sequence of values. Must be same length as
number of sites.
"""
if len(values) != len(self._sites):
raise ValueError("Values must be same length as sites.")
for i in range(len(self._sites)):
site = self._sites[i]
props = site.properties
if not props:
props = {}
props[property_name] = values[i]
self._sites[i] = Site(site.species_and_occu, site.coords,
properties=props)
def remove_site_property(self, property_name):
"""
Adds a property to a site.
Args:
property_name (str): The name of the property to add.
"""
for i in range(len(self._sites)):
site = self._sites[i]
props = {k: v
for k, v in site.properties.items()
if k != property_name}
self._sites[i] = Site(site.species_and_occu, site.coords,
properties=props)
def replace_species(self, species_mapping):
"""
Swap species in a molecule.
Args:
species_mapping (dict): dict of species to swap. Species can be
elements too. E.g., {Element("Li"): Element("Na")} performs
a Li for Na substitution. The second species can be a
sp_and_occu dict. For example, a site with 0.5 Si that is
passed the mapping {Element('Si): {Element('Ge'):0.75,
Element('C'):0.25} } will have .375 Ge and .125 C.
"""
species_mapping = {get_el_sp(k): v
for k, v in species_mapping.items()}
def mod_site(site):
c = Composition()
for sp, amt in site.species_and_occu.items():
new_sp = species_mapping.get(sp, sp)
try:
c += Composition(new_sp) * amt
except TypeError:
c += {new_sp: amt}
return Site(c, site.coords, properties=site.properties)
self._sites = [mod_site(site) for site in self._sites]
def remove_species(self, species):
"""
Remove all occurrences of a species from a molecule.
Args:
species: Species to remove.
"""
new_sites = []
species = [get_el_sp(sp) for sp in species]
for site in self._sites:
new_sp_occu = {sp: amt for sp, amt in site.species_and_occu.items()
if sp not in species}
if len(new_sp_occu) > 0:
new_sites.append(Site(new_sp_occu, site.coords,
properties=site.properties))
self._sites = new_sites
def remove_sites(self, indices):
"""
Delete sites with at indices.
Args:
indices: Sequence of indices of sites to delete.
"""
self._sites = [self._sites[i] for i in range(len(self._sites))
if i not in indices]
def translate_sites(self, indices=None, vector=None):
"""
Translate specific sites by some vector, keeping the sites within the
unit cell.
Args:
indices (list): List of site indices on which to perform the
translation.
vector (3x1 array): Translation vector for sites.
"""
if indices is None:
indices = range(len(self))
if vector is None:
vector == [0, 0, 0]
for i in indices:
site = self._sites[i]
new_site = Site(site.species_and_occu, site.coords + vector,
properties=site.properties)
self._sites[i] = new_site
def rotate_sites(self, indices=None, theta=0, axis=None, anchor=None):
"""
Rotate specific sites by some angle around vector at anchor.
Args:
indices (list): List of site indices on which to perform the
translation.
theta (float): Angle in radians
axis (3x1 array): Rotation axis vector.
anchor (3x1 array): Point of rotation.
"""
from numpy.linalg import norm
from numpy import cross, eye
from scipy.linalg import expm
if indices is None:
indices = range(len(self))
if axis is None:
axis = [0, 0, 1]
if anchor is None:
anchor = [0, 0, 0]
anchor = np.array(anchor)
axis = np.array(axis)
theta %= 2 * np.pi
rm = expm(cross(eye(3), axis / norm(axis)) * theta)
for i in indices:
site = self._sites[i]
s = ((rm * np.matrix(site.coords - anchor).T).T + anchor).A1
new_site = Site(site.species_and_occu, s,
properties=site.properties)
self._sites[i] = new_site
def perturb(self, distance):
"""
Performs a random perturbation of the sites in a structure to break
symmetries.
Args:
distance (float): Distance in angstroms by which to perturb each
site.
"""
def get_rand_vec():
# deals with zero vectors.
vector = np.random.randn(3)
vnorm = np.linalg.norm(vector)
return vector / vnorm * distance if vnorm != 0 else get_rand_vec()
for i in range(len(self._sites)):
self.translate_sites([i], get_rand_vec())
def apply_operation(self, symmop):
"""
Apply a symmetry operation to the molecule.
Args:
symmop (SymmOp): Symmetry operation to apply.
"""
def operate_site(site):
new_cart = symmop.operate(site.coords)
return Site(site.species_and_occu, new_cart,
properties=site.properties)
self._sites = [operate_site(s) for s in self._sites]
def copy(self):
"""
Convenience method to get a copy of the molecule.
Returns:
A copy of the Molecule.
"""
return self.__class__.from_sites(self)
def substitute(self, index, func_grp, bond_order=1):
"""
Substitute atom at index with a functional group.
Args:
index (int): Index of atom to substitute.
func_grp: Substituent molecule. There are two options:
1. Providing an actual molecule as the input. The first atom
must be a DummySpecie X, indicating the position of
nearest neighbor. The second atom must be the next
nearest atom. For example, for a methyl group
substitution, func_grp should be X-CH3, where X is the
first site and C is the second site. What the code will
do is to remove the index site, and connect the nearest
neighbor to the C atom in CH3. The X-C bond indicates the
directionality to connect the atoms.
2. A string name. The molecule will be obtained from the
relevant template in func_groups.json.
bond_order (int): A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site. Defaults to 1.
"""
# Find the nearest neighbor that is not a terminal atom.
all_non_terminal_nn = []
for nn, dist in self.get_neighbors(self[index], 3):
# Check that the nn has neighbors within a sensible distance but
# is not the site being substituted.
for inn, dist2 in self.get_neighbors(nn, 3):
if inn != self[index] and \
dist2 < 1.2 * get_bond_length(nn.specie, inn.specie):
all_non_terminal_nn.append((nn, dist))
break
if len(all_non_terminal_nn) == 0:
raise RuntimeError("Can't find a non-terminal neighbor to attach"
" functional group to.")
non_terminal_nn = min(all_non_terminal_nn, key=lambda d: d[1])[0]
# Set the origin point to be the coordinates of the nearest
# non-terminal neighbor.
origin = non_terminal_nn.coords
# Pass value of functional group--either from user-defined or from
# functional.json
if isinstance(func_grp, Molecule):
func_grp = func_grp
else:
# Check to see whether the functional group is in database.
if func_grp not in FunctionalGroups:
raise RuntimeError("Can't find functional group in list. "
"Provide explicit coordinate instead")
else:
func_grp = FunctionalGroups[func_grp]
# If a bond length can be found, modify func_grp so that the X-group
# bond length is equal to the bond length.
bl = get_bond_length(non_terminal_nn.specie, func_grp[1].specie,
bond_order=bond_order)
if bl is not None:
func_grp = func_grp.copy()
vec = func_grp[0].coords - func_grp[1].coords
vec /= np.linalg.norm(vec)
func_grp[0] = "X", func_grp[1].coords + float(bl) * vec
# Align X to the origin.
x = func_grp[0]
func_grp.translate_sites(list(range(len(func_grp))), origin - x.coords)
# Find angle between the attaching bond and the bond to be replaced.
v1 = func_grp[1].coords - origin
v2 = self[index].coords - origin
angle = get_angle(v1, v2)
if 1 < abs(angle % 180) < 179:
# For angles which are not 0 or 180, we perform a rotation about
# the origin along an axis perpendicular to both bonds to align
# bonds.
axis = np.cross(v1, v2)
op = SymmOp.from_origin_axis_angle(origin, axis, angle)
func_grp.apply_operation(op)
elif abs(abs(angle) - 180) < 1:
# We have a 180 degree angle. Simply do an inversion about the
# origin
for i in range(len(func_grp)):
func_grp[i] = (func_grp[i].species_and_occu,
origin - (func_grp[i].coords - origin))
# Remove the atom to be replaced, and add the rest of the functional
# group.
del self[index]
for site in func_grp[1:]:
self._sites.append(site)
class StructureError(Exception):
"""
Exception class for Structure.
Raised when the structure has problems, e.g., atoms that are too close.
"""
pass
with open(os.path.join(os.path.dirname(__file__),
"func_groups.json"), "rt") as f:
FunctionalGroups = {k: Molecule(v["species"], v["coords"])
for k, v in json.load(f).items()}
|
nisse3000/pymatgen
|
pymatgen/core/structure.py
|
Python
|
mit
| 140,131
|
[
"ABINIT",
"CRYSTAL",
"Gaussian",
"NetCDF",
"VASP",
"exciting",
"pymatgen"
] |
813b028daa79cc00a88a9007dba59f180bda8550f7acc61b26e6b97c99cf916d
|
"""Internal helper files for user output."""
__author__ = ("Luc Anselin luc.anselin@asu.edu, "
"David C. Folch david.folch@asu.edu, "
"Levi John Wolf levi.john.wolf@gmail.com, "
"Jing Yao jingyao@asu.edu")
import numpy as np
import copy as COPY
from . import diagnostics
from . import sputils as spu
from pysal.lib import weights
from scipy.sparse.csr import csr_matrix
__all__ = []
def set_name_ds(name_ds):
"""Set the dataset name in regression; return generic name if user
provides no explicit name."
Parameters
----------
name_ds : string
User provided dataset name.
Returns
-------
name_ds : string
"""
if not name_ds:
name_ds = 'unknown'
return name_ds
def set_name_y(name_y):
"""Set the dataset name in regression; return generic name if user
provides no explicit name."
Parameters
----------
name_ds : string
User provided dataset name.
Returns
-------
name_ds : string
"""
if not name_y:
name_y = 'dep_var'
return name_y
def set_name_x(name_x, x, constant=False):
"""Set the independent variable names in regression; return generic name if user
provides no explicit name."
Parameters
----------
name_x : list of string
User provided exogenous variable names.
x : array
User provided exogenous variables.
constant : boolean
If False (default), constant name not included in name_x list yet
Append 'CONSTANT' at the front of the names
Returns
-------
name_x : list of strings
"""
if not name_x:
name_x = ['var_' + str(i + 1) for i in range(x.shape[1])]
else:
name_x = name_x[:]
if not constant:
name_x.insert(0, 'CONSTANT')
return name_x
def set_name_yend(name_yend, yend):
"""Set the endogenous variable names in regression; return generic name if user
provides no explicit name."
Parameters
----------
name_yend : list of strings
User provided exogenous variable names.
Returns
-------
name_yend : list of strings
"""
if yend is not None:
if not name_yend:
return ['endogenous_' + str(i + 1) for i in range(len(yend[0]))]
else:
return name_yend[:]
else:
return []
def set_name_q(name_q, q):
"""Set the external instrument names in regression; return generic name if user
provides no explicit name."
Parameters
----------
name_q : string
User provided instrument names.
q : array
Array of instruments
Returns
-------
name_q : list of strings
"""
if q is not None:
if not name_q:
return ['instrument_' + str(i + 1) for i in range(len(q[0]))]
else:
return name_q[:]
else:
return []
def set_name_yend_sp(name_y):
"""Set the spatial lag name in regression; return generic name if user
provides no explicit name."
Parameters
----------
name_y : string
User provided dependent variable name.
Returns
-------
name_yend_sp : string
"""
return 'W_' + name_y
def set_name_q_sp(name_x, w_lags, name_q, lag_q, force_all=False):
"""Set the spatial instrument names in regression; return generic name if user
provides no explicit name."
Parameters
----------
name_x : list of strings
User provided exogenous variable names.
w_lags : int
User provided number of spatial instruments lags
Returns
-------
name_q_sp : list of strings
"""
if force_all:
names = name_x
else:
names = name_x[1:] # drop the constant
if lag_q:
names = names + name_q
sp_inst_names = []
for j in names:
sp_inst_names.append('W_' + j)
if w_lags > 1:
for i in range(2, w_lags + 1):
for j in names:
sp_inst_names.append('W' + str(i) + '_' + j)
return sp_inst_names
def set_name_h(name_x, name_q):
"""Set the full instruments names in regression; return generic name if user
provides no explicit name."
Parameters
----------
name_x : list of strings
User provided exogenous variable names.
name_q : list of strings
User provided instrument variable names.
Returns
-------
name_h : list of strings
"""
return name_x + name_q
def set_robust(robust):
"""Return generic name if user passes None to the robust parameter in a
regression. Note: already verified that the name is valid in
check_robust() if the user passed anything besides None to robust.
Parameters
----------
robust : string or None
Object passed by the user to a regression class
Returns
-------
robust : string
"""
if not robust:
return 'unadjusted'
return robust
def set_name_w(name_w, w):
"""Return generic name if user passes None to the robust parameter in a
regression. Note: already verified that the name is valid in
check_robust() if the user passed anything besides None to robust.
Parameters
----------
name_w : string
Name passed in by user. Default is None.
w : W object
pysal W object passed in by user
Returns
-------
name_w : string
"""
if w != None:
if name_w != None:
return name_w
else:
return 'unknown'
return None
def set_name_multi(multireg, multi_set, name_multiID, y, x, name_y, name_x, name_ds, title, name_w, robust, endog=False, sp_lag=False):
"""Returns multiple regression objects with generic names
Parameters
----------
endog : tuple
If the regression object contains endogenous variables, endog must have the
following parameters in the following order: (yend, q, name_yend, name_q)
sp_lag : tuple
If the regression object contains spatial lag, sp_lag must have the
following parameters in the following order: (w_lags, lag_q)
"""
name_ds = set_name_ds(name_ds)
name_y = set_name_y(name_y)
name_x = set_name_x(name_x, x)
name_multiID = set_name_ds(name_multiID)
if endog or sp_lag:
name_yend = set_name_yend(endog[2], endog[0])
name_q = set_name_q(endog[3], endog[1])
for r in multi_set:
multireg[r].title = title + "%s" % r
multireg[r].name_ds = name_ds
multireg[r].robust = set_robust(robust)
multireg[r].name_w = name_w
multireg[r].name_y = '%s_%s' % (str(r), name_y)
multireg[r].name_x = ['%s_%s' % (str(r), i) for i in name_x]
multireg[r].name_multiID = name_multiID
if endog or sp_lag:
multireg[r].name_yend = ['%s_%s' % (str(r), i) for i in name_yend]
multireg[r].name_q = ['%s_%s' % (str(r), i) for i in name_q]
if sp_lag:
multireg[r].name_yend.append(
set_name_yend_sp(multireg[r].name_y))
multireg[r].name_q.extend(
set_name_q_sp(multireg[r].name_x, sp_lag[0], multireg[r].name_q, sp_lag[1]))
multireg[r].name_z = multireg[r].name_x + multireg[r].name_yend
multireg[r].name_h = multireg[r].name_x + multireg[r].name_q
return multireg
def check_arrays(*arrays):
"""Check if the objects passed by a user to a regression class are
correctly structured. If the user's data is correctly formed this function
returns nothing, if not then an exception is raised. Note, this does not
check for model setup, simply the shape and types of the objects.
Parameters
----------
*arrays : anything
Objects passed by the user to a regression class; any type
object can be passed and any number of objects can be passed
Returns
-------
Returns : int
number of observations
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> # Extract CRIME column from the dbf file
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
>>> n = check_arrays(y, X)
>>> print n
49
"""
rows = []
for i in arrays:
if i is None:
continue
if not isinstance(i, (np.ndarray, csr_matrix)):
raise Exception("all input data must be either numpy arrays or sparse csr matrices")
shape = i.shape
if len(shape) > 2:
raise Exception("all input arrays must have exactly two dimensions")
if len(shape) == 1:
raise Exception("all input arrays must have exactly two dimensions")
if shape[0] < shape[1]:
raise Exception("one or more input arrays have more columns than rows")
if not spu.spisfinite(i):
raise Exception("one or more input arrays have missing/NaN values")
rows.append(shape[0])
if len(set(rows)) > 1:
raise Exception("arrays not all of same length")
return rows[0]
def check_y(y, n):
"""Check if the y object passed by a user to a regression class is
correctly structured. If the user's data is correctly formed this function
returns nothing, if not then an exception is raised. Note, this does not
check for model setup, simply the shape and types of the objects.
Parameters
----------
y : anything
Object passed by the user to a regression class; any type
object can be passed
n : int
number of observations
Returns
-------
Returns : nothing
Nothing is returned
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> # Extract CRIME column from the dbf file
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> check_y(y, 49)
>>> # should not raise an exception
"""
if not isinstance(y, np.ndarray):
print(y.__class__.__name__)
raise Exception("y must be a numpy array")
shape = y.shape
if len(shape) > 2:
raise Exception("all input arrays must have exactly two dimensions")
if len(shape) == 1:
raise Exception("all input arrays must have exactly two dimensions")
if shape != (n, 1):
raise Exception("y must be a single column array matching the length of other arrays")
def check_weights(w, y, w_required=False):
"""Check if the w parameter passed by the user is a pysal.lib.W object and
check that its dimensionality matches the y parameter. Note that this
check is not performed if w set to None.
Parameters
----------
w : any python object
Object passed by the user to a regression class; any type
object can be passed
y : numpy array
Any shape numpy array can be passed. Note: if y passed
check_arrays, then it will be valid for this function
Returns
-------
Returns : nothing
Nothing is returned
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> # Extract CRIME column from the dbf file
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
>>> w = pysal.lib.io.open(pysal.lib.examples.get_path("columbus.gal"), 'r').read()
>>> check_weights(w, y)
>>> # should not raise an exception
"""
if w_required == True or w != None:
if w == None:
raise Exception("A weights matrix w must be provided to run this method.")
if not isinstance(w, weights.W):
from warnings import warn
warn("w must be API-compatible pysal weights object")
if w.n != y.shape[0]:
raise Exception("y must be nx1, and w must be an nxn PySAL W object")
diag = w.sparse.diagonal()
# check to make sure all entries equal 0
if diag.min() != 0:
raise Exception("All entries on diagonal must equal 0.")
if diag.max() != 0:
raise Exception("All entries on diagonal must equal 0.")
def check_robust(robust, wk):
"""Check if the combination of robust and wk parameters passed by the user
are valid. Note: this does not check if the W object is a valid adaptive
kernel weights matrix needed for the HAC.
Parameters
----------
robust : string or None
Object passed by the user to a regression class
w : any python object
Object passed by the user to a regression class; any type
object can be passed
Returns
-------
Returns : nothing
Nothing is returned
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> # Extract CRIME column from the dbf file
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
>>> wk = None
>>> check_robust('White', wk)
>>> # should not raise an exception
"""
if robust:
if robust.lower() == 'hac':
if not isinstance(wk, weights.Kernel):
raise Exception("HAC requires that wk be a Kernel Weights object")
diag = wk.sparse.diagonal()
# check to make sure all entries equal 1
if diag.min() < 1.0:
print(diag.min())
raise Exception("All entries on diagonal of kernel weights matrix must equal 1.")
if diag.max() > 1.0:
print(diag.max())
raise Exception("All entries on diagonal of kernel weights matrix must equal 1.")
# ensure off-diagonal entries are in the set of real numbers [0,1)
wegt = wk.weights
for i in wk.id_order:
vals = wegt[i]
vmin = min(vals)
vmax = max(vals)
if vmin < 0.0:
raise Exception("Off-diagonal entries must be greater than or equal to 0.")
if vmax > 1.0:
# NOTE: we are not checking for the case of exactly 1.0 ###
raise Exception("Off-diagonal entries must be less than 1.")
elif robust.lower() == 'white' or robust.lower() == 'ogmm':
if wk:
raise Exception("White requires that wk be set to None")
else:
raise Exception("invalid value passed to robust, see docs for valid options")
def check_spat_diag(spat_diag, w):
"""Check if there is a w parameter passed by the user if the user also
requests spatial diagnostics.
Parameters
----------
spat_diag : boolean
Value passed by a used to a regression class
w : any python object
Object passed by the user to a regression class; any type
object can be passed
Returns
-------
Returns : nothing
Nothing is returned
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> # Extract CRIME column from the dbf file
>>> y = np.array(db.by_col("CRIME"))
>>> y = np.reshape(y, (49,1))
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
>>> w = pysal.lib.io.open(pysal.lib.examples.get_path("columbus.gal"), 'r').read()
>>> check_spat_diag(True, w)
>>> # should not raise an exception
"""
if spat_diag:
if not isinstance(w, weights.W):
raise Exception("w must be a pysal.lib.W object to run spatial diagnostics")
def check_regimes(reg_set, N=None, K=None):
"""Check if there are at least two regimes
Parameters
----------
reg_set : list
List of the regimes IDs
Returns
-------
Returns : nothing
Nothing is returned
"""
if len(reg_set) < 2:
raise Exception("At least 2 regimes are needed to run regimes methods. Please check your regimes variable.")
if 1.0 * N / len(reg_set) < K + 1:
raise Exception("There aren't enough observations for the given number of regimes and variables. Please check your regimes variable.")
def check_constant(x):
"""Check if the X matrix contains a constant, raise exception if it does
not
Parameters
----------
x : array
Value passed by a used to a regression class
Returns
-------
Returns : nothing
Nothing is returned
Examples
--------
>>> import numpy as np
>>> import pysal.lib
>>> db = pysal.lib.io.open(pysal.lib.examples.get_path('columbus.dbf'),'r')
>>> X = []
>>> X.append(db.by_col("INC"))
>>> X.append(db.by_col("HOVAL"))
>>> X = np.array(X).T
>>> x_constant = check_constant(X)
>>> x_constant.shape
(49, 3)
"""
if diagnostics.constant_check(x):
raise Exception("x array cannot contain a constant vector; constant will be added automatically")
else:
x_constant = COPY.copy(x)
return spu.sphstack(np.ones((x_constant.shape[0], 1)), x_constant)
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
lixun910/pysal
|
pysal/model/spreg/user_output.py
|
Python
|
bsd-3-clause
| 18,352
|
[
"COLUMBUS"
] |
150b747e7469b942a7f89dab493c94afdce4cecad043e8e4ab10b0f8616e43fb
|
# ***************************************************************************
# *
# * Copyright (C) 2013-2016 University of Dundee
# * All rights reserved.
# *
# * This file is part of SAMoS (Soft Active Matter on Surfaces) program.
# *
# * SAMoS is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2 of the License, or
# * (at your option) any later version.
# *
# * SAMoS is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# *
# *****************************************************************************
from Configuration import *
from Tesselation import *
import vtk
from glob import glob
from datetime import *
class Writer:
def __init__(self,nematic=False,alpha=0,connected=False):
self.nematic=nematic
self.connected=connected
self.alpha=alpha
def writeConfigurationVTK(self,conf,outfile):
# Data which goes into file: positions, directors, velocities
# radii
r = conf.radius
# types
tp = conf.ptype
# positions
x = conf.rval[:,0]
y = conf.rval[:,1]
z = conf.rval[:,2]
# directors
nx = conf.nval[:,0]
ny = conf.nval[:,1]
nz = conf.nval[:,2]
# velocities
vx = conf.vval[:,0]
vy = conf.vval[:,1]
vz = conf.vval[:,2]
# Preparking the vtk structure
Points = vtk.vtkPoints()
Radii = vtk.vtkDoubleArray()
Radii.SetNumberOfComponents(1)
Radii.SetName('Radius')
Type = vtk.vtkDoubleArray()
Type.SetNumberOfComponents(1)
Type.SetName('Type')
Velocities = vtk.vtkDoubleArray()
Velocities.SetNumberOfComponents(3)
Velocities.SetName("Velocity")
Directors = vtk.vtkDoubleArray()
Directors.SetNumberOfComponents(3)
Directors.SetName("Directors")
if self.nematic:
NDirectors = vtk.vtkDoubleArray()
NDirectors.SetNumberOfComponents(3)
NDirectors.SetName("NDirectors")
# Adding the data to the vtk structures
for (xx,yy,zz,rr,tt) in zip(x,y,z,r,tp):
Points.InsertNextPoint(xx,yy,zz)
Radii.InsertNextValue(rr)
Type.InsertNextValue(tt)
for (vvx,vvy,vvz) in zip(vx,vy,vz):
Velocities.InsertNextTuple3(vvx,vvy,vvz)
for (nnx,nny,nnz) in zip(nx,ny,nz):
if self.nematic:
Directors.InsertNextTuple3(0.5*nnx,0.5*nny,0.5*nnz)
NDirectors.InsertNextTuple3(-0.5*nnx,-0.5*nny,-0.5*nnz)
else:
Directors.InsertNextTuple3(nnx,nny,nnz)
# Connected, using convex hull (? ask Rastko ...?)
if self.connected:
Lines = vtk.vtkCellArray()
Line = vtk.vtkLine()
points = np.column_stack((x,y,z))
hull = ConvexHull(points)
edges = []
for h in hull.simplices:
i, j, k = h
if not sorted([i,j]) in edges: edges.append(sorted([i,j]))
if not sorted([i,k]) in edges: edges.append(sorted([i,k]))
if not sorted([j,k]) in edges: edges.append(sorted([j,k]))
for (i,j) in edges:
Line.GetPointIds().SetId(0,i)
Line.GetPointIds().SetId(1,j)
Lines.InsertNextCell(Line)
# Putting the results into a polydata structure
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
if self.connected:
polydata.SetLines(Lines)
polydata.GetPointData().AddArray(Radii)
polydata.GetPointData().AddArray(Type)
polydata.GetPointData().AddArray(Velocities)
polydata.GetPointData().AddArray(Directors)
if self.nematic:
polydata.GetPointData().AddArray(NDirectors)
polydata.Modified()
# Finally, output via binary writer
writer = vtk.vtkXMLPolyDataWriter()
#outname = '.'.join(f.split('.')[:-1])
writer.SetFileName(outfile)
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polydata)
else:
writer.SetInputData(polydata)
#writer.SetDataModeToAscii()
writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib()
writer.Write()
def writeDefects(self,defects, numdefect,outfile):
# Preparing the vtp output
# Create point structure in vtk
Points = vtk.vtkPoints()
print "Created Points"
Charge = vtk.vtkDoubleArray()
Charge.SetNumberOfComponents(1)
Charge.SetName('Charge')
for u in range(numdefect):
Points.InsertNextPoint(defects[u][1],defects[u][2],defects[u][3])
Charge.InsertNextValue(defects[u][0])
#lines = vtk.vtkCellArray()
#line = vtk.vtkLine()
#for i in range(numdefect_n):
#line = vtk.vtkLine()
#line.GetPointIds().SetId(0,0)
#line.GetPointIds().SetId(1,i+1)
#lines.InsertNextCell(line)
#for i in range(numdefect_v):
#line = vtk.vtkLine()
#line.GetPointIds().SetId(0,0)
#line.GetPointIds().SetId(1,numdefect_n+i+1)
#lines.InsertNextCell(line)
#print "Added lines"
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
#polydata.SetLines(lines)
polydata.GetPointData().AddArray(Charge)
print "Finished Polydata"
polydata.Modified()
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(outfile)
# Python 2.7 vs. 3 incompatibility?
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polydata)
else:
writer.SetInputData(polydata)
#writer.SetDataModeToAscii()
writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib()
writer.Write()
print "Wrote File"
def writePatches(self,tess,outname,contractile=False):
print outname
points = vtk.vtkPoints()
polygons = vtk.vtkCellArray()
v=0
polygon = vtk.vtkPolygon()
havePoly=[]
for k in range(len(tess.ParList)):
nedge=len(tess.ParList[k])
if nedge<2:
huh=0
#print nedge
#print k
#print tess.ParList[k]
else:
havePoly.append(k)
#for k in range(300):
# Create the points of the polygon: the loop centers
polygon = vtk.vtkPolygon()
for l in tess.ParList[k]:
if tess.geom.periodic:
dl=tess.geom.ApplyPeriodic11(tess.rval[k,:],tess.LoopCen[l])
dl+=tess.rval[k,:]
points.InsertNextPoint(dl[0],dl[1],dl[2])
else:
points.InsertNextPoint(tess.LoopCen[l][0],tess.LoopCen[l][1],tess.LoopCen[l][2])
polygon.GetPointIds().SetNumberOfIds(nedge)
for l in range(nedge):
#print l
polygon.GetPointIds().SetId(l,v+l)
polygons.InsertNextCell(polygon)
v+=nedge
# Create the matching polydata
polygonPolyData = vtk.vtkPolyData()
polygonPolyData.SetPoints(points)
polygonPolyData.SetPolys(polygons)
# Add stresses ...
try:
eng, press,ncon,stress = tess.conf.compute_energy_and_pressure()
except:
pass
contractile = False
if contractile:
print "Are we actually going here??"
press_c=tess.computeContractile(self.alpha)
print press_c
print np.mean(press_c)
print np.std(press_c)
print np.min(press_c)
print np.max(press_c)
press+=press_c
#print press
#print np.mean(press)
#print np.std(press)
#print np.min(press)
#print np.max(press)
#pressure = vtk.vtkDoubleArray()
#pressure.SetNumberOfComponents(1)
#pressure.SetName('Pressure')
#for k in havePoly:
#pressure.InsertNextValue(press[k])
#polygonPolyData.GetCellData().AddArray(pressure)
# Add type
ncon = vtk.vtkDoubleArray()
ncon.SetNumberOfComponents(1)
ncon.SetName('Z')
for k in havePoly:
ncon.InsertNextValue(len(tess.ParList[k]))
polygonPolyData.GetCellData().AddArray(ncon)
## Add type
#ptype = vtk.vtkDoubleArray()
#ptype.SetNumberOfComponents(1)
#ptype.SetName('Type')
#for k in havePoly:
#ptype.InsertNextValue(tess.conf.ptype[k])
#polygonPolyData.GetCellData().AddArray(ptype)
# Add denisity
tess.ComputePatchArea()
density = vtk.vtkDoubleArray()
density.SetNumberOfComponents(1)
density.SetName('Density')
for k in havePoly:
if tess.conf.geom.manifold=='sphere':
N = tess.conf.N
R = tess.conf.geom.R
A0 = 4.0*np.pi*R**2/N
if tess.area[k] < 0.1*A0:
density.InsertNextValue(10.0)
else:
density.InsertNextValue(A0/tess.area[k])
else:
density.InsertNextValue(1.0/tess.area[k])
polygonPolyData.GetCellData().AddArray(density)
writer = vtk.vtkXMLPolyDataWriter()
writer.SetFileName(outname)
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polygonPolyData)
else:
writer.SetInputData(polygonPolyData)
#writer.SetDataModeToAscii()
writer.SetDataModeToBinary()
writer.SetCompressorTypeToZLib()
writer.Write()
|
sknepneklab/SAMoS
|
utils/Writer.py
|
Python
|
gpl-3.0
| 8,769
|
[
"VTK"
] |
378a5322ef4615ee907fceada7291fb94df00bafbc7159f173f2412bb87f76b9
|
from kinko.nodes import Symbol, Tuple, String, Number, Keyword, Dict, List
from kinko.nodes import Placeholder, NodeVisitor
from kinko.errors import Errors
from kinko.parser import parser, parse as _parse, ParseError, DICT_ERROR
from kinko.parser import IMPLICIT_TUPLE_ERROR, EXPLICIT_TUPLE_ERROR
from .base import NODE_EQ_PATCHER
from .test_tokenizer import tokenize
class LocationChecker(NodeVisitor):
def visit(self, node):
assert node.location
super(LocationChecker, self).visit(node)
def parse(src):
node = _parse(list(tokenize(src)))
LocationChecker().visit(node)
return node
def parse_raw(src):
node = parser().parse(list(tokenize(src)))
LocationChecker().visit(node)
return node
def check_parse(src, node):
left = parse(src)
with NODE_EQ_PATCHER:
assert left == node
def check_location(src, node, start, end, fragment):
assert node.location
assert node.location.start.offset == start
assert node.location.end.offset == end
assert src[start:end] == fragment
def check_error(src, msg, start, end, fragment):
errors = Errors()
try:
_parse(list(tokenize(src)), errors)
except ParseError:
error, = errors.list
assert error.message.startswith(msg)
assert (error.location.start.offset, error.location.end.offset) == \
(start, end)
assert src[start:end] == fragment
else:
raise AssertionError('Error not raised')
def test_symbol():
assert Symbol('foo').ns is None
assert Symbol('foo').rel == 'foo'
assert Symbol('foo').name == 'foo'
assert Symbol('./foo').ns == '.'
assert Symbol('./foo').rel == 'foo'
assert Symbol('./foo').name == './foo'
assert Symbol('foo/bar').ns == 'foo'
assert Symbol('foo/bar').rel == 'bar'
assert Symbol('foo/bar').name == 'foo/bar'
check_parse(
'print foo foo.bar foo.bar.baz ./foo',
List([
Tuple([Symbol('print'),
Symbol('foo'),
Tuple([Symbol('get'),
Symbol('foo'), Symbol('bar')]),
Tuple([Symbol('get'),
Tuple([Symbol('get'),
Symbol('foo'), Symbol('bar')]),
Symbol('baz')]),
Symbol('./foo')]),
]),
)
def test_placeholder():
check_parse(
'print #foo #foo.bar #foo.bar.baz',
List([
Tuple([Symbol('print'),
Placeholder('foo'),
Tuple([Symbol('get'),
Placeholder('foo'), Symbol('bar')]),
Tuple([Symbol('get'),
Tuple([Symbol('get'),
Placeholder('foo'), Symbol('bar')]),
Symbol('baz')])]),
]),
)
def test_implicit_tuple():
check_parse(
'foo :bar 5 "baz"',
List([
Tuple([Symbol('foo'),
Keyword('bar'), Number(5), String('baz')]),
]),
)
def test_explicit_tuple():
check_parse(
'foo (bar 5) "baz"',
List([
Tuple([Symbol('foo'), Tuple([Symbol('bar'), Number(5)]),
String('baz')]),
]),
)
def test_list():
check_parse(
'foo [:k1 v1 1 (foo 2)]',
List([
Tuple([Symbol('foo'),
List([Keyword('k1'),
Symbol('v1'),
Number(1),
Tuple([Symbol('foo'), Number(2)])])]),
]),
)
def test_dict():
check_parse(
'foo {:k1 v1 :k2 (v2 3)}',
List([
Tuple([Symbol('foo'),
Dict([Keyword('k1'), Symbol('v1'),
Keyword('k2'), Tuple([Symbol('v2'),
Number(3)])])]),
]),
)
def test_indent():
check_parse(
"""
foo
"bar"
""",
List([
Tuple([Symbol('foo'), String('bar')]),
]),
)
check_parse(
"""
foo
"bar"
5
"baz"
""",
List([
Tuple([Symbol('foo'),
Tuple([Symbol('join'),
List([String('bar'), Number(5),
String('baz')])])]),
]),
)
def test_nested_indent():
check_parse(
"""
foo
bar
1
baz
2
""",
List([
Tuple([Symbol('foo'),
Tuple([Symbol('join'),
List([Tuple([Symbol('bar'), Number(1)]),
Tuple([Symbol('baz'), Number(2)])])])]),
]),
)
def test_indented_keywords():
check_parse(
"""
foo :k1 v1
:k2 v2
:k3
v3
""",
List([
Tuple([Symbol('foo'),
Keyword('k1'), Symbol('v1'),
Keyword('k2'), Symbol('v2'),
Keyword('k3'), Tuple([Symbol('v3')])]),
]),
)
def test_mixed_indented_arguments():
check_parse(
"""
foo :k1 v1
:k2 v2
:k3
v3
v4
v5
""",
List([
Tuple([Symbol('foo'),
Keyword('k1'), Symbol('v1'),
Keyword('k2'), Symbol('v2'),
Keyword('k3'), Tuple([Symbol('v3')]),
Tuple([Symbol('join'),
List([Tuple([Symbol('v4')]),
Tuple([Symbol('v5')])])])]),
]),
)
def test_symbol_location():
src = 'a b.c\n d e.f'
a, bc, _d = parse_raw(src).values[0].values
d, ef = _d.values
check_location(src, a, 0, 1, 'a')
check_location(src, bc, 2, 5, 'b.c')
check_location(src, d, 8, 9, 'd')
check_location(src, ef, 10, 13, 'e.f')
def test_string_location():
src = 'a "b"\n c "d"'
a, b, _c = parse_raw(src).values[0].values
c, d = _c.values
check_location(src, b, 2, 5, '"b"')
check_location(src, d, 10, 13, '"d"')
def test_number_location():
src = 'a 123\n c 4.56'
a, n123, _c = parse_raw(src).values[0].values
c, n456 = _c.values
check_location(src, n123, 2, 5, '123')
check_location(src, n456, 10, 14, '4.56')
def test_keyword_location():
src = 'a :b 1\n c :d 2'
a, b, _, _c = parse_raw(src).values[0].values
c, d, _ = _c.values
check_location(src, b, 2, 4, ':b')
check_location(src, d, 11, 13, ':d')
def test_placeholder_location():
src = 'a #b\n c #d.e'
a, b, _c = parse_raw(src).values[0].values
c, de = _c.values
check_location(src, b, 2, 4, '#b')
check_location(src, de, 9, 13, '#d.e')
def test_implicit_tuple_location():
src = 'a b\n c\n :d\n e'
fn_a = parse_raw(src).values[0]
_, _, fn_c = fn_a.values
_, _, fn_e = fn_c.values
check_location(src, fn_a, 0, 22, 'a b\n c\n :d\n e')
check_location(src, fn_c, 6, 22, 'c\n :d\n e')
check_location(src, fn_e, 21, 22, 'e')
def test_explicit_tuple_location():
src = 'a (b c)'
fn_a = parse_raw(src).values[0]
_, fn_b = fn_a.values
check_location(src, fn_a, 0, 7, 'a (b c)')
check_location(src, fn_b, 2, 7, '(b c)')
def test_list_location():
src = 'a [b c]'
fn_a = parse_raw(src).values[0]
_, l = fn_a.values
check_location(src, fn_a, 0, 7, 'a [b c]')
check_location(src, l, 2, 7, '[b c]')
def test_dict_location():
src = 'a {:b c}'
fn_a = parse_raw(src).values[0]
_, d = fn_a.values
check_location(src, fn_a, 0, 8, 'a {:b c}')
check_location(src, d, 2, 8, '{:b c}')
def test_join_location():
src = 'a b\n c 1\n d 2'
fn_a = parse_raw(src).values[0]
_, _, j_fn = fn_a.values
j, l = j_fn.values
c_fn, d_fn = l.values
check_location(src, j_fn, 6, 15, 'c 1\n d 2')
check_location(src, j, 6, 6, '')
check_location(src, l, 6, 15, 'c 1\n d 2')
check_location(src, c_fn, 6, 9, 'c 1')
check_location(src, d_fn, 12, 15, 'd 2')
def test_invalid_indent():
check_error('a\n "b"\n "c"',
IMPLICIT_TUPLE_ERROR, 10, 12, ' ')
def test_implicit_tuple_error():
check_error('foo\n1\nbar',
IMPLICIT_TUPLE_ERROR, 4, 5, '1')
def test_explicit_tuple_error():
check_error('foo (1 2 3)',
EXPLICIT_TUPLE_ERROR, 5, 6, '1')
def test_dict_literal_error():
check_error('foo {1 2}', DICT_ERROR, 5, 6, '1')
|
vmagamedov/kinko
|
tests/test_parser.py
|
Python
|
bsd-3-clause
| 8,627
|
[
"VisIt"
] |
f3c079755b0deb5a95e314c3be802a2a3bec1fbdc720309eb153f90f6e0a6277
|
########################################################################
#
# File: cmdline.py
# Author: Alex Samuel
# Date: 2001-03-16
#
# Contents:
# QMTest command processing
#
# Copyright (c) 2001, 2002, 2003 by CodeSourcery, LLC. All rights reserved.
#
# For license terms see the file COPYING.
#
########################################################################
########################################################################
# Imports
########################################################################
import base
import database
import os
import qm
import qm.attachment
import qm.cmdline
import qm.platform
from qm.extension import get_extension_class_name, get_class_description
from qm.test import test
from qm.test.result import Result
from qm.test.context import *
from qm.test.execution_engine import *
from qm.test.result_stream import ResultStream
from qm.test.runnable import Runnable
from qm.test.suite import Suite
from qm.test.report import ReportGenerator
from qm.test.classes.dir_run_database import *
from qm.test.expectation_database import ExpectationDatabase
from qm.test.classes.previous_testrun import PreviousTestRun
from qm.trace import *
from qm.test.web.web import QMTestServer
import qm.structured_text
import qm.xmlutil
import Queue
import random
from result import *
import signal
import string
import sys
import xml.sax
########################################################################
# Variables
########################################################################
_the_qmtest = None
"""The global 'QMTest' object."""
########################################################################
# Functions
########################################################################
def _make_comma_separated_string (items, conjunction):
"""Return a string consisting of the 'items', separated by commas.
'items' -- A list of strings giving the items in the list.
'conjunction' -- A string to use before the final item, if there is
more than one.
returns -- A string consisting all of the 'items', separated by
commas, and with the 'conjunction' before the final item."""
s = ""
need_comma = 0
# Go through almost all of the items, adding them to the
# comma-separated list.
for i in items[:-1]:
# Add a comma if this isn't the first item in the list.
if need_comma:
s += ", "
else:
need_comma = 1
# Add this item.
s += "'%s'" % i
# The last item is special, because we need to include the "or".
if items:
i = items[-1]
if need_comma:
s += ", %s " % conjunction
s += "'%s'" % i
return s
########################################################################
# Classes
########################################################################
class QMTest:
"""An instance of QMTest."""
__extension_kinds_string \
= _make_comma_separated_string(base.extension_kinds, "or")
"""A string listing the available extension kinds."""
db_path_environment_variable = "QMTEST_DB_PATH"
"""The environment variable specifying the test database path."""
summary_formats = ("brief", "full", "stats", "batch", "none")
"""Valid formats for result summaries."""
context_file_name = "context"
"""The default name of a context file."""
expectations_file_name = "expectations.qmr"
"""The default name of a file containing expectations."""
results_file_name = "results.qmr"
"""The default name of a file containing results."""
target_file_name = "targets"
"""The default name of a file containing targets."""
help_option_spec = (
"h",
"help",
None,
"Display usage summary."
)
version_option_spec = (
None,
"version",
None,
"Display version information."
)
db_path_option_spec = (
"D",
"tdb",
"PATH",
"Path to the test database."
)
extension_output_option_spec = (
"o",
"output",
"FILE",
"Write the extension to FILE.",
)
extension_id_option_spec = (
"i",
"id",
"NAME",
"Write the extension to the database as NAME.",
)
output_option_spec = (
"o",
"output",
"FILE",
"Write test results to FILE (- for stdout)."
)
no_output_option_spec = (
None,
"no-output",
None,
"Don't generate test results."
)
outcomes_option_spec = (
"O",
"outcomes",
"FILE",
"Use expected outcomes in FILE."
)
expectations_option_spec = (
"e",
"expectations",
"FILE",
"Use expectations in FILE."
)
context_option_spec = (
"c",
"context",
"KEY=VALUE",
"Add or override a context property."
)
context_file_spec = (
"C",
"load-context",
"FILE",
"Read context from a file (- for stdin)."
)
daemon_option_spec = (
None,
"daemon",
None,
"Run as a daemon."
)
port_option_spec = (
"P",
"port",
"PORT",
"Server port number."
)
address_option_spec = (
"A",
"address",
"ADDRESS",
"Local address."
)
log_file_option_spec = (
None,
"log-file",
"PATH",
"Log file name."
)
no_browser_option_spec = (
None,
"no-browser",
None,
"Do not open a new browser window."
)
pid_file_option_spec = (
None,
"pid-file",
"PATH",
"Process ID file name."
)
concurrent_option_spec = (
"j",
"concurrency",
"COUNT",
"Execute tests in COUNT concurrent threads."
)
targets_option_spec = (
"T",
"targets",
"FILE",
"Use FILE as the target specification file."
)
random_option_spec = (
None,
"random",
None,
"Run the tests in a random order."
)
rerun_option_spec = (
None,
"rerun",
"FILE",
"Rerun the tests that failed."
)
seed_option_spec = (
None,
"seed",
"INTEGER",
"Seed the random number generator."
)
format_option_spec = (
"f",
"format",
"FORMAT",
"Specify the summary format."
)
result_stream_spec = (
None,
"result-stream",
"CLASS-NAME",
"Specify the results file format."
)
annotation_option_spec = (
"a",
"annotate",
"NAME=VALUE",
"Set an additional annotation to be written to the result stream(s)."
)
tdb_class_option_spec = (
"c",
"class",
"CLASS-NAME",
"Specify the test database class.",
)
attribute_option_spec = (
"a",
"attribute",
"NAME",
"Get an attribute of the extension class."
)
set_attribute_option_spec = (
"a",
"attribute",
"KEY=VALUE",
"Set an attribute of the extension class."
)
extension_kind_option_spec = (
"k",
"kind",
"EXTENSION-KIND",
"Specify the kind of extension class."
)
report_output_option_spec = (
"o",
"output",
"FILE",
"Write test report to FILE (- for stdout)."
)
report_flat_option_spec = (
"f",
"flat",
None,
"""Generate a flat listing of test results, instead of reproducing the
database directory tree in the report."""
)
results_option_spec = (
"R",
"results",
"DIRECTORY",
"Read in all results (*.qmr) files from DIRECTORY."
)
list_long_option_spec = (
"l",
"long",
None,
"Use a detailed output format."
)
list_details_option_spec = (
"d",
"details",
None,
"Display details for individual items."
)
list_recursive_option_spec = (
"R",
"recursive",
None,
"Recursively list the contents of directories."
)
# Groups of options that should not be used together.
conflicting_option_specs = (
( output_option_spec, no_output_option_spec ),
( concurrent_option_spec, targets_option_spec ),
( extension_output_option_spec, extension_id_option_spec ),
( expectations_option_spec, outcomes_option_spec ),
)
global_options_spec = [
help_option_spec,
version_option_spec,
db_path_option_spec,
]
commands_spec = [
("create",
"Create (or update) an extension.",
"EXTENSION-KIND CLASS-NAME(ATTR1 = 'VAL1', ATTR2 = 'VAL2', ...)",
"""Create (or update) an extension.
The EXTENSION-KIND indicates what kind of extension to
create; it must be one of """ + __extension_kinds_string + """.
The CLASS-NAME indicates the name of the extension class, or
the name of an existing extension object. If the CLASS-NAME
is the name of a extension in the test database, then the
In the former case, it must have the form 'MODULE.CLASS'. For
a list of available extension classes use "qmtest extensions".
If the extension class takes arguments, those arguments can be
specified after the CLASS-NAME as show above. In the latter
case,
Any "--attribute" options are processed before the arguments
specified after the class name. Therefore, the "--attribute"
options can be overridden by the arguments provided after the
CLASS-NAME. If no attributes are specified, the parentheses
following the 'CLASS-NAME' can be omitted.
If the "--id" option is given, the extension is written to the
database. Otherwise, if the "--output" option is given, the
extension is written as XML to the file indicated. If neither
option is given, the extension is written as XML to the
standard output.""",
( set_attribute_option_spec,
help_option_spec,
extension_id_option_spec,
extension_output_option_spec
),
),
("create-target",
"Create (or update) a target specification.",
"NAME CLASS [ GROUP ]",
"Create (or update) a target specification.",
( set_attribute_option_spec,
help_option_spec,
targets_option_spec
)
),
("create-tdb",
"Create a new test database.",
"",
"Create a new test database.",
( help_option_spec,
tdb_class_option_spec,
set_attribute_option_spec)
),
("gui",
"Start the QMTest GUI.",
"",
"Start the QMTest graphical user interface.",
(
address_option_spec,
concurrent_option_spec,
context_file_spec,
context_option_spec,
daemon_option_spec,
help_option_spec,
log_file_option_spec,
no_browser_option_spec,
pid_file_option_spec,
port_option_spec,
outcomes_option_spec,
targets_option_spec,
results_option_spec
)
),
("extensions",
"List extension classes.",
"",
"""
List the available extension classes.
Use the '--kind' option to limit the classes displayed to test classes,
resource classes, etc. The parameter to '--kind' can be one of """ + \
__extension_kinds_string + "\n",
(
extension_kind_option_spec,
help_option_spec,
)
),
("describe",
"Describe an extension.",
"EXTENSION-KIND NAME",
"""Display details for the specified extension.""",
(
attribute_option_spec,
list_long_option_spec,
help_option_spec,
)
),
("help",
"Display usage summary.",
"",
"Display usage summary.",
()
),
("ls",
"List database contents.",
"[ NAME ... ]",
"""
List items stored in the database.
If no arguments are provided, the contents of the root
directory of the database are displayed. Otherwise, each of
the database is searched for each of the NAMEs. If the item
found is a directory then the contents of the directory are
displayed.
""",
(
help_option_spec,
list_long_option_spec,
list_details_option_spec,
list_recursive_option_spec,
),
),
("register",
"Register an extension class.",
"KIND CLASS",
"""
Register an extension class with QMTest. KIND is the kind of extension
class to register; it must be one of """ + __extension_kinds_string + """
The CLASS gives the name of the class in the form 'module.class'.
QMTest will search the available extension class directories to find the
new CLASS. QMTest looks for files whose basename is the module name and
whose extension is either '.py', '.pyc', or '.pyo'.
QMTest will then attempt to load the extension class. If the extension
class cannot be loaded, QMTest will issue an error message to help you
debug the problem. Otherwise, QMTest will update the 'classes.qmc' file
in the directory containing the module to mention your new extension class.
""",
(help_option_spec,)
),
("remote",
"Run QMTest as a remote server.",
"",
"""
Runs QMTest as a remote server. This mode is only used by QMTest
itself when distributing tests across multiple machines. Users
should not directly invoke QMTest with this option.
""",
(help_option_spec,)
),
("report",
"Generate report from one or more test results.",
"[ result [-e expected] ]+",
"""
Generates a test report. The arguments are result files each optionally
followed by '-e' and an expectation file. This command attempts to reproduce
the test database structure, and thus requires the '--tdb' option. To generate
a flat test report specify the '--flat' option.
""",
(help_option_spec,
report_output_option_spec,
report_flat_option_spec)
),
("run",
"Run one or more tests.",
"[ ID ... ]",
"""
Runs tests. Optionally, generates a summary of the test run and a
record of complete test results. You may specify test IDs and test
suite IDs to run; omit arguments to run the entire test database.
Test results are written to "results.qmr". Use the '--output' option to
specify a different output file, or '--no-output' to supress results.
Use the '--format' option to specify the output format for the summary.
Valid formats are %s.
""" % _make_comma_separated_string(summary_formats, "and"),
(
annotation_option_spec,
concurrent_option_spec,
context_file_spec,
context_option_spec,
format_option_spec,
help_option_spec,
no_output_option_spec,
outcomes_option_spec,
expectations_option_spec,
output_option_spec,
random_option_spec,
rerun_option_spec,
result_stream_spec,
seed_option_spec,
targets_option_spec,
)
),
("summarize",
"Summarize results from a test run.",
"[FILE [ ID ... ]]",
"""
Loads a test results file and summarizes the results. FILE is the path
to the results file. Optionally, specify one or more test or suite IDs
whose results are shown. If none are specified, shows all tests that
did not pass.
Use the '--format' option to specify the output format for the summary.
Valid formats are %s.
""" % _make_comma_separated_string(summary_formats, "and"),
( help_option_spec,
format_option_spec,
outcomes_option_spec,
expectations_option_spec,
output_option_spec,
result_stream_spec)
),
]
__version_output = \
("QMTest %s\n"
"Copyright (C) 2002 - 2007 CodeSourcery, Inc.\n"
"QMTest comes with ABSOLUTELY NO WARRANTY\n"
"For more information about QMTest visit http://www.qmtest.com\n")
"""The string printed when the --version option is used.
There is one fill-in, for a string, which should contain the version
number."""
def __init__(self, argument_list, path):
"""Construct a new QMTest.
Parses the argument list but does not execute the command.
'argument_list' -- The arguments to QMTest, not including the
initial argv[0].
'path' -- The path to the QMTest executable."""
global _the_qmtest
_the_qmtest = self
# Use the stadard stdout and stderr streams to emit messages.
self._stdout = sys.stdout
self._stderr = sys.stderr
# Build a trace object.
self.__tracer = Tracer()
# Build a command-line parser for this program.
self.__parser = qm.cmdline.CommandParser(
"qmtest",
self.global_options_spec,
self.commands_spec,
self.conflicting_option_specs)
# Parse the command line.
components = self.__parser.ParseCommandLine(argument_list)
# Unpack the results.
( self.__global_options,
self.__command,
self.__command_options,
self.__arguments
) = components
# If available, record the path to the qmtest executable.
self.__qmtest_path = path
# We have not yet computed the set of available targets.
self.targets = None
# The result stream class used for results files is the pickling
# version.
self.__file_result_stream_class_name \
= "pickle_result_stream.PickleResultStream"
# The result stream class used for textual feed back.
self.__text_result_stream_class_name \
= "text_result_stream.TextResultStream"
# The expected outcomes have not yet been loaded.
self.__expected_outcomes = None
def __del__(self):
"""Clean up global variables."""
test.set_targets([])
def HasGlobalOption(self, option):
"""Return true if 'option' was specified as a global command.
'command' -- The long name of the option, but without the
preceding "--".
returns -- True if the option is present."""
return option in map(lambda x: x[0], self.__global_options)
def GetGlobalOption(self, option, default=None):
"""Return the value of global 'option', or 'default' if omitted."""
for opt, opt_arg in self.__global_options:
if opt == option:
return opt_arg
return default
def HasCommandOption(self, option):
"""Return true if command 'option' was specified."""
for opt, opt_arg in self.__command_options:
if opt == option:
return 1
return 0
def GetCommandOption(self, option, default = None):
"""Return the value of command 'option'.
'option' -- The long form of an command-specific option.
'default' -- The default value to be returned if the 'option'
was not specified. This option should be the kind of an option
that takes an argument.
returns -- The value specified by the option, or 'default' if
the option was not specified."""
for opt, opt_arg in self.__command_options:
if opt == option:
return opt_arg
return default
def Execute(self):
"""Execute the command.
returns -- 0 if the command was executed successfully. 1 if
there was a problem or if any tests run had unexpected outcomes."""
# If --version was given, print the version number and exit.
# (The GNU coding standards require that the program take no
# further action after seeing --version.)
if self.HasGlobalOption("version"):
self._stdout.write(self.__version_output % qm.version)
return 0
# If the global help option was specified, display it and stop.
if (self.GetGlobalOption("help") is not None
or self.__command == "help"):
self._stdout.write(self.__parser.GetBasicHelp())
return 0
# If the command help option was specified, display it and stop.
if self.GetCommandOption("help") is not None:
self.__WriteCommandHelp(self.__command)
return 0
# Make sure a command was specified.
if self.__command == "":
raise qm.cmdline.CommandError, qm.error("missing command")
# Look in several places to find the test database:
#
# 1. The command-line.
# 2. The QMTEST_DB_PATH environment variable.
# 3. The current directory.
db_path = self.GetGlobalOption("tdb")
if not db_path:
if os.environ.has_key(self.db_path_environment_variable):
db_path = os.environ[self.db_path_environment_variable]
else:
db_path = "."
# If the path is not already absolute, make it into an
# absolute path at this point.
if not os.path.isabs(db_path):
db_path = os.path.join(os.getcwd(), db_path)
# Normalize the path so that it is easy for the user to read
# if it is emitted in an error message.
self.__db_path = os.path.normpath(db_path)
database.set_path(self.__db_path)
error_occurred = 0
# Dispatch to the appropriate method.
if self.__command == "create-tdb":
return self.__ExecuteCreateTdb(db_path)
method = {
"create" : self.__ExecuteCreate,
"create-target" : self.__ExecuteCreateTarget,
"describe" : self.__ExecuteDescribe,
"extensions" : self.__ExecuteExtensions,
"gui" : self.__ExecuteServer,
"ls" : self.__ExecuteList,
"register" : self.__ExecuteRegister,
"remote" : self.__ExecuteRemote,
"run" : self.__ExecuteRun,
"report" : self.__ExecuteReport,
"summarize": self.__ExecuteSummarize,
}[self.__command]
return method()
def GetDatabase(self):
"""Return the test database to use.
returns -- The 'Database' to use for this execution. Raises an
exception if no 'Database' is available."""
return database.get_database()
def GetDatabaseIfAvailable(self):
"""Return the test database to use.
returns -- The 'Database' to use for this execution, or 'None'
if no 'Database' is available."""
try:
return self.GetDatabase()
except:
return None
def GetTargetFileName(self):
"""Return the path to the file containing target specifications.
returns -- The path to the file containing target specifications."""
# See if the user requested a specific target file.
target_file_name = self.GetCommandOption("targets")
if target_file_name:
return target_file_name
# If there was no explicit option, use the "targets" file in the
# database directory.
return os.path.join(self.GetDatabase().GetConfigurationDirectory(),
self.target_file_name)
def GetTargetsFromFile(self, file_name):
"""Return the 'Target's specified in 'file_name'.
returns -- A list of the 'Target' objects specified in the
target specification file 'file_name'."""
try:
document = qm.xmlutil.load_xml_file(file_name)
targets_element = document.documentElement
if targets_element.tagName != "targets":
raise QMException, \
qm.error("could not load target file",
file = file_name)
targets = []
for node in targets_element.getElementsByTagName("extension"):
# Parse the DOM node.
target_class, arguments \
= (qm.extension.parse_dom_element
(node,
lambda n: get_extension_class(n, "target",
self.GetDatabase())))
# Build the target.
target = target_class(self.GetDatabase(), arguments)
# Accumulate targets.
targets.append(target)
return targets
except Context:
raise QMException, \
qm.error("could not load target file",
file=file_name)
def GetTargets(self):
"""Return the 'Target' objects specified by the user.
returns -- A sequence of 'Target' objects."""
if not test.get_targets():
file_name = self.GetTargetFileName()
if os.path.exists(file_name):
test.set_targets(self.GetTargetsFromFile(file_name))
else:
# The target file does not exist.
concurrency = self.GetCommandOption("concurrency")
if concurrency is None:
# No concurrency specified. Run single-threaded.
concurrency = 1
else:
# Convert the concurrency to an integer.
try:
concurrency = int(concurrency)
except ValueError:
raise qm.cmdline.CommandError, \
qm.error("concurrency not integer",
value=concurrency)
# Construct the target.
arguments = {}
arguments["name"] = "local"
arguments["group"] = "local"
if concurrency > 1:
class_name = "thread_target.ThreadTarget"
arguments["threads"] = concurrency
else:
class_name = "serial_target.SerialTarget"
target_class = get_extension_class(class_name,
'target', self.GetDatabase())
test.set_targets([target_class(self.GetDatabase(), arguments)])
return test.get_targets()
def GetTracer(self):
"""Return the 'Tracer' associated with this instance of QMTest.
returns -- The 'Tracer' associated with this instance of QMTest."""
return self.__tracer
def MakeContext(self):
"""Construct a 'Context' object for running tests."""
context = Context()
# First, see if a context file was specified on the command
# line.
use_implicit_context_file = 1
for option, argument in self.__command_options:
if option == "load-context":
use_implicit_context_file = 0
break
# If there is no context file, read the default context file.
if (use_implicit_context_file
and os.path.isfile(self.context_file_name)):
context.Read(self.context_file_name)
for option, argument in self.__command_options:
# Look for the '--load-context' option.
if option == "load-context":
context.Read(argument)
# Look for the '--context' option.
elif option == "context":
# Parse the argument.
name, value = qm.common.parse_assignment(argument)
try:
# Insert it into the context.
context[name] = value
except ValueError, msg:
# The format of the context key is invalid, but
# raise a 'CommandError' instead.
raise qm.cmdline.CommandError, msg
return context
def GetExecutablePath(self):
"""Return the path to the QMTest executable.
returns -- A string giving the path to the QMTest executable.
This is the path that should be used to invoke QMTest
recursively. Returns 'None' if the path to the QMTest
executable is uknown."""
return self.__qmtest_path
def GetFileResultStreamClass(self):
"""Return the 'ResultStream' class used for results files.
returns -- The 'ResultStream' class used for results files."""
return get_extension_class(self.__file_result_stream_class_name,
"result_stream",
self.GetDatabaseIfAvailable())
def GetTextResultStreamClass(self):
"""Return the 'ResultStream' class used for textual feedback.
returns -- the 'ResultStream' class used for textual
feedback."""
return get_extension_class(self.__text_result_stream_class_name,
"result_stream",
self.GetDatabaseIfAvailable())
def __GetAttributeOptions(self, expect_value = True):
"""Return the attributes specified on the command line.
'expect_value' -- True if the attribute is to be parsed as
an assignment.
returns -- A dictionary. If expect_value is True, it
maps attribute names (strings) to values (strings).
Else it contains the raw attribute strings, mapping to None.
There is an entry for each attribute specified with
'--attribute' on the command line."""
# There are no attributes yet.
attributes = {}
# Go through the command line looking for attribute options.
for option, argument in self.__command_options:
if option == "attribute":
if expect_value:
name, value = qm.common.parse_assignment(argument)
attributes[name] = value
else:
attributes[argument] = None
return attributes
def __GetAnnotateOptions(self):
"""Return all annotate options.
returns -- A dictionary containing the annotation name / value pairs."""
annotations = {}
for option, argument in self.__command_options:
if option == "annotate":
name, value = qm.common.parse_assignment(argument)
annotations[name] = value
return annotations
def __ExecuteCreate(self):
"""Create a new extension file."""
# Check that the right number of arguments are present.
if len(self.__arguments) != 2:
self.__WriteCommandHelp("create")
return 2
# Figure out what database (if any) we are using.
database = self.GetDatabaseIfAvailable()
# Get the extension kind.
kind = self.__arguments[0]
self.__CheckExtensionKind(kind)
extension_id = self.GetCommandOption("id")
if extension_id is not None:
if not database:
raise QMException, qm.error("no db specified")
if not database.IsModifiable():
raise QMException, qm.error("db not modifiable")
extension_loader = database.GetExtension
else:
extension_loader = None
class_loader = lambda n: get_extension_class(n, kind, database)
# Process the descriptor.
(extension_class, more_arguments) \
= (qm.extension.parse_descriptor
(self.__arguments[1], class_loader, extension_loader))
# Validate the --attribute options.
arguments = self.__GetAttributeOptions()
arguments = qm.extension.validate_arguments(extension_class,
arguments)
# Override the --attribute options with the arguments provided
# as part of the descriptor.
arguments.update(more_arguments)
if extension_id is not None:
# Create the extension instance. Objects derived from
# Runnable require magic additional arguments.
if issubclass(extension_class, (Runnable, Suite)):
extras = { extension_class.EXTRA_ID : extension_id,
extension_class.EXTRA_DATABASE : database }
else:
extras = {}
extension = extension_class(arguments, **extras)
# Write the extension to the database.
database.WriteExtension(extension_id, extension)
else:
# Figure out what file to use.
filename = self.GetCommandOption("output")
if filename is not None:
file = open(filename, "w")
else:
file = sys.stdout
# Write out the file.
qm.extension.write_extension_file(extension_class, arguments,
file)
return 0
def __ExecuteCreateTdb(self, db_path):
"""Handle the command for creating a new test database.
'db_path' -- The path at which to create the new test database."""
if len(self.__arguments) != 0:
self.__WriteCommandHelp("create-tdb")
return 2
# Create the directory if it does not already exists.
if not os.path.isdir(db_path):
os.mkdir(db_path)
# Create the configuration directory.
config_dir = database.get_configuration_directory(db_path)
if not os.path.isdir(config_dir):
os.mkdir(config_dir)
# Reformulate this command in terms of "qmtest create". Start by
# adding "--output <path>".
self.__command_options.append(("output",
database.get_configuration_file(db_path)))
# Figure out what database class to use.
class_name \
= self.GetCommandOption("class", "xml_database.XMLDatabase")
# Add the extension kind and descriptor.
self.__arguments.append("database")
self.__arguments.append(class_name)
# Now process this just like "qmtest create".
self.__ExecuteCreate()
# Print a helpful message.
self._stdout.write(qm.message("new db message", path=db_path) + "\n")
return 0
def __ExecuteCreateTarget(self):
"""Create a new target file."""
# Make sure that the arguments are correct.
if (len(self.__arguments) < 2 or len(self.__arguments) > 3):
self.__WriteCommandHelp("create-target")
return 2
# Pull the required arguments out of the command line.
target_name = self.__arguments[0]
class_name = self.__arguments[1]
if (len(self.__arguments) > 2):
target_group = self.__arguments[2]
else:
target_group = ""
# Load the database.
database = self.GetDatabase()
# Load the target class.
target_class = get_extension_class(class_name, "target", database)
# Get the dictionary of class arguments.
field_dictionary \
= qm.extension.get_class_arguments_as_dictionary(target_class)
# Get the name of the target file.
file_name = self.GetTargetFileName()
# If the file already exists, read it in.
if os.path.exists(file_name):
# Load the document.
document = qm.xmlutil.load_xml_file(file_name)
# If there is a previous entry for this target, discard it.
targets_element = document.documentElement
duplicates = []
for target_element \
in targets_element.getElementsByTagName("extension"):
for attribute \
in target_element.getElementsByTagName("argument"):
if attribute.getAttribute("name") == "name":
name = field_dictionary["name"].\
GetValueFromDomNode(attribute.childNodes[0],
None)
if name == target_name:
duplicates.append(target_element)
break
for duplicate in duplicates:
targets_element.removeChild(duplicate)
duplicate.unlink()
else:
document = (qm.xmlutil.create_dom_document
(public_id = "QMTest/Target",
document_element_tag = "targets"))
targets_element = document.documentElement
# Get the attributes.
attributes = self.__GetAttributeOptions()
attributes["name"] = target_name
attributes["group"] = target_group
attributes = qm.extension.validate_arguments(target_class,
attributes)
# Create the target element.
target_element = qm.extension.make_dom_element(target_class,
attributes,
document)
targets_element.appendChild(target_element)
# Write out the XML file.
document.writexml(open(self.GetTargetFileName(), "w"))
return 0
def __ExecuteExtensions(self):
"""List the available extension classes."""
# Check that the right number of arguments are present.
if len(self.__arguments) != 0:
self.__WriteCommandHelp("extensions")
return 2
database = self.GetDatabaseIfAvailable()
# Figure out what kinds of extensions we're going to list.
kind = self.GetCommandOption("kind")
if kind:
self.__CheckExtensionKind(kind)
kinds = [kind]
else:
kinds = base.extension_kinds
for kind in kinds:
# Get the available classes.
names = qm.test.base.get_extension_class_names(kind,
database,
self.__db_path)
# Build structured text describing the classes.
description = "** Available %s classes **\n\n" % kind
for n in names:
description += " * " + n + "\n\n "
# Try to load the class to get more information.
try:
extension_class = get_extension_class(n, kind, database)
description \
+= qm.extension.get_class_description(extension_class,
brief=1)
except:
description += ("No description available: "
"could not load class.")
description += "\n\n"
self._stdout.write(qm.structured_text.to_text(description))
return 0
def __ExecuteDescribe(self):
"""Describe an extension."""
# Check that the right number of arguments are present.
if len(self.__arguments) != 2:
self.__WriteCommandHelp("describe")
return 2
kind = self.__arguments[0]
long_format = self.GetCommandOption("long") != None
database = self.GetDatabaseIfAvailable()
class_ = get_extension_class(self.__arguments[1], kind, database)
attributes = (self.__GetAttributeOptions(False)
or class_._argument_dictionary)
print ""
print "class name:", get_extension_class_name(class_)
print " ", get_class_description(class_, brief=not long_format)
print ""
print "class attributes:"
tab = max([len(name) for name in attributes])
for name in attributes:
field = class_._argument_dictionary.get(name)
if not field:
self._stderr.write("Unknown attribute '%s'.\n"%name)
return 2
value = field.GetDefaultValue()
description = field.GetDescription()
if not long_format:
description = qm.structured_text.get_first(description)
print " %-*s %s"%(tab, name, description)
def __ExecuteList(self):
"""List the contents of the database."""
database = self.GetDatabase()
long_format = self.HasCommandOption("long")
details_format = self.HasCommandOption("details")
recursive = self.HasCommandOption("recursive")
# If no arguments are specified, list the root directory.
args = self.__arguments or ("",)
# Get all the extensions to list.
extensions = {}
for arg in args:
extension = database.GetExtension(arg)
if not extension:
raise QMException, qm.error("no such ID", id = arg)
if isinstance(extension, qm.test.suite.Suite):
if recursive:
test_ids, suite_ids = extension.GetAllTestAndSuiteIds()
extensions.update([(i, database.GetExtension(i))
for i in test_ids + suite_ids])
else:
ids = extension.GetTestIds() + extension.GetSuiteIds()
extensions.update([(i, database.GetExtension(i))
for i in ids])
else:
extensions[arg] = extension
# Get the labels for the extensions, in alphabetical order.
ids = extensions.keys()
ids.sort()
# In the short format, just print the labels.
if not long_format:
for id in ids:
print >> sys.stdout, id
return 0
# In the long format, print three columns: the extension kind,
# class name, and the label. We make two passes over the
# extensions so that the output will be tidy. In the first pass,
# calculate the width required for the first two columns in the
# output. The actual output occurs in the second pass.
longest_kind = 0
longest_class = 0
for i in (0, 1):
for id in ids:
extension = extensions[id]
if isinstance(extension,
qm.test.directory_suite.DirectorySuite):
kind = "directory"
class_name = ""
else:
kind = extension.__class__.kind
class_name = extension.GetClassName()
if i == 0:
kind_len = len(kind) + 1
if kind_len > longest_kind:
longest_kind = kind_len
class_len = len(class_name) + 1
if class_len > longest_class:
longest_class = class_len
else:
print >> sys.stdout, \
"%-*s%-*s%s" % (longest_kind, kind,
longest_class, class_name, id)
if details_format:
tab = max([len(name)
for name in extension._argument_dictionary])
for name in extension._argument_dictionary:
value = str(getattr(extension, name))
print " %-*s %s"%(tab, name, value)
return 0
def __ExecuteRegister(self):
"""Register a new extension class."""
# Make sure that the KIND and CLASS were specified.
if (len(self.__arguments) != 2):
self.__WriteCommandHelp("register")
return 2
kind = self.__arguments[0]
class_name = self.__arguments[1]
# Check that the KIND is valid.
self.__CheckExtensionKind(kind)
# Check that the CLASS_NAME is well-formed.
if class_name.count('.') != 1:
raise qm.cmdline.CommandError, \
qm.error("invalid class name",
class_name = class_name)
module, name = class_name.split('.')
# Try to load the database. It may provide additional
# directories to search.
database = self.GetDatabaseIfAvailable()
# Hunt through all of the extension class directories looking
# for an appropriately named module.
found = None
directories = get_extension_directories(kind, database,
self.__db_path)
for directory in directories:
for ext in (".py", ".pyc", ".pyo"):
file_name = os.path.join(directory, module + ext)
if os.path.exists(file_name):
found = file_name
break
if found:
break
# If we could not find the module, issue an error message.
if not found:
raise qm.QMException, \
qm.error("module does not exist",
module = module)
# Inform the user of the location in which QMTest found the
# module. (Sometimes, there might be another module with the
# same name in the path. Telling the user where we've found
# the module will help the user to deal with this situation.)
self._stdout.write(qm.structured_text.to_text
(qm.message("loading class",
class_name = name,
file_name = found)))
# We have found the module. Try loading it.
extension_class = get_extension_class_from_directory(class_name,
kind,
directory,
directories)
# Create or update the classes.qmc file.
classes_file_name = os.path.join(directory, "classes.qmc")
# Create a new DOM document for the class directory.
document = (qm.xmlutil.create_dom_document
(public_id = "Class-Directory",
document_element_tag="class-directory"))
# Copy entries from the old file to the new one.
extensions = get_extension_class_names_in_directory(directory)
for k, ns in extensions.iteritems():
for n in ns:
# Remove previous entries for the class being added.
if k == kind and n == class_name:
continue
element = document.createElement("class")
element.setAttribute("kind", k)
element.setAttribute("name", n)
document.documentElement.appendChild(element)
# Add an entry for the new element.
element = document.createElement("class")
element.setAttribute("kind", kind)
element.setAttribute("name", class_name)
document.documentElement.appendChild(element)
# Write out the file.
document.writexml(open(classes_file_name, "w"),
addindent = " ", newl = "\n")
return 0
def __ExecuteSummarize(self):
"""Read in test run results and summarize."""
# If no results file is specified, use a default value.
if len(self.__arguments) == 0:
results_path = self.results_file_name
else:
results_path = self.__arguments[0]
database = self.GetDatabaseIfAvailable()
# The remaining arguments, if any, are test and suite IDs.
id_arguments = self.__arguments[1:]
# Are there any?
# '.' is an alias for <all>, and thus shadows other selectors.
if len(id_arguments) > 0 and not '.' in id_arguments:
ids = set()
# Expand arguments into test/resource IDs.
if database:
for id in id_arguments:
extension = database.GetExtension(id)
if not extension:
raise qm.cmdline.CommandError, \
qm.error("no such ID", id = id)
if extension.kind == database.SUITE:
ids.update(extension.GetAllTestAndSuiteIds()[0])
else:
ids.add(id)
else:
ids = set(id_arguments)
else:
# No IDs specified. Show all test and resource results.
# Don't show any results by test suite though.
ids = None
# Get an iterator over the results.
try:
results = base.load_results(results_path, database)
except Exception, exception:
raise QMException, \
qm.error("invalid results file",
path=results_path,
problem=str(exception))
any_unexpected_outcomes = 0
# Load expectations.
expectations = (self.GetCommandOption('expectations') or
self.GetCommandOption('outcomes'))
expectations = base.load_expectations(expectations,
database,
results.GetAnnotations())
# Compute the list of result streams to which output should be
# written.
streams = self.__CreateResultStreams(self.GetCommandOption("output"),
results.GetAnnotations(),
expectations)
resource_results = {}
for r in results:
if r.GetKind() != Result.TEST:
if ids is None or r.GetId() in ids:
for s in streams:
s.WriteResult(r)
elif r.GetKind() == Result.RESOURCE_SETUP:
resource_results[r.GetId()] = r
continue
# We now known that r is test result. If it's not one
# that interests us, we're done.
if ids is not None and r.GetId() not in ids:
continue
# If we're filtering, and this test was not run because it
# depended on a resource that was not set up, emit the
# resource result here.
if (ids is not None
and r.GetOutcome() == Result.UNTESTED
and r.has_key(Result.RESOURCE)):
rid = r[Result.RESOURCE]
rres = resource_results.get(rid)
if rres:
del resource_results[rid]
for s in streams:
s.WriteResult(rres)
# Write out the test result.
for s in streams:
s.WriteResult(r)
if (not any_unexpected_outcomes
and r.GetOutcome() != expectations.Lookup(r.GetId())):
any_unexpected_outcomes = 1
# Shut down the streams.
for s in streams:
s.Summarize()
return any_unexpected_outcomes
def __ExecuteRemote(self):
"""Execute the 'remote' command."""
database = self.GetDatabase()
# Get the target class. For now, we always run in serial when
# running remotely.
target_class = get_extension_class("serial_target.SerialTarget",
'target', database)
# Build the target.
target = target_class(database, { "name" : "child" })
# Start the target.
response_queue = Queue.Queue(0)
target.Start(response_queue)
# Read commands from standard input, and reply to standard
# output.
while 1:
# Read the command.
command = cPickle.load(sys.stdin)
# If the command is just a string, it should be
# the 'Stop' command.
if isinstance(command, types.StringType):
assert command == "Stop"
target.Stop()
break
# Decompose command.
method, id, context = command
# Get the descriptor.
descriptor = database.GetTest(id)
# Run it.
target.RunTest(descriptor, context)
# There are no results yet.
results = []
# Read all of the results.
while 1:
try:
result = response_queue.get(0)
results.append(result)
except Queue.Empty:
# There are no more results.
break
# Pass the results back.
cPickle.dump(results, sys.stdout)
# The standard output stream is bufferred, but the master
# will block waiting for a response, so we must flush
# the buffer here.
sys.stdout.flush()
return 0
def __ExecuteReport(self):
"""Execute a 'report' command."""
output = self.GetCommandOption("output")
flat = self.GetCommandOption("flat") != None
# Check that at least one result file is present.
if not output or len(self.__arguments) < 1:
self.__WriteCommandHelp("report")
return 2
# If the database can be loaded, use it to find all
# available tests. The default (non-flat) format requires a database.
if flat:
database = self.GetDatabaseIfAvailable()
else:
database = self.GetDatabase()
report_generator = ReportGenerator(output, database)
report_generator.GenerateReport(flat, self.__arguments)
def __ExecuteRun(self):
"""Execute a 'run' command."""
database = self.GetDatabase()
# Handle the 'seed' option. First create the random number
# generator we will use.
seed = self.GetCommandOption("seed")
if seed:
# A seed was specified. It should be an integer.
try:
seed = int(seed)
except ValueError:
raise qm.cmdline.CommandError, \
qm.error("seed not integer", seed=seed)
# Use the specified seed.
random.seed(seed)
# Figure out what tests to run.
if len(self.__arguments) == 0:
# No IDs specified; run the entire test database.
self.__arguments.append("")
elif '.' in self.__arguments:
# '.' is an alias for <all>, and thus shadows other selectors.
self.__arguments = [""]
# Expand arguments in test IDs.
try:
test_ids, test_suites \
= self.GetDatabase().ExpandIds(self.__arguments)
except (qm.test.database.NoSuchTestError,
qm.test.database.NoSuchSuiteError), exception:
raise qm.cmdline.CommandError, str(exception)
except ValueError, exception:
raise qm.cmdline.CommandError, \
qm.error("no such ID", id=str(exception))
# Handle the --annotate options.
annotations = self.__GetAnnotateOptions()
# Load expectations.
expectations = (self.GetCommandOption('expectations') or
self.GetCommandOption('outcomes'))
expectations = base.load_expectations(expectations,
database,
annotations)
# Filter the set of tests to be run, eliminating any that should
# be skipped.
test_ids = self.__FilterTestsToRun(test_ids, expectations)
# Figure out which targets to use.
targets = self.GetTargets()
# Compute the context in which the tests will be run.
context = self.MakeContext()
# Handle the --output option.
if self.HasCommandOption("no-output"):
# User specified no output.
result_file_name = None
else:
result_file_name = self.GetCommandOption("output")
if result_file_name is None:
# By default, write results to a default file.
result_file_name = self.results_file_name
# Compute the list of result streams to which output should be
# written.
result_streams = self.__CreateResultStreams(result_file_name,
annotations,
expectations)
if self.HasCommandOption("random"):
# Randomize the order of the tests.
random.shuffle(test_ids)
else:
test_ids.sort()
# Run the tests.
engine = ExecutionEngine(database, test_ids, context, targets,
result_streams,
expectations)
if engine.Run():
return 1
return 0
def __ExecuteServer(self):
"""Process the server command."""
database = self.GetDatabase()
# Get the port number specified by a command option, if any.
# Otherwise use a default value.
port_number = self.GetCommandOption("port", default=0)
try:
port_number = int(port_number)
except ValueError:
raise qm.cmdline.CommandError, qm.error("bad port number")
# Get the local address specified by a command option, if any.
# If not was specified, use the loopback address. The loopback
# address is used by default for security reasons; it restricts
# access to the QMTest server to users on the local machine.
address = self.GetCommandOption("address", default="127.0.0.1")
# If a log file was requested, open it now.
log_file_path = self.GetCommandOption("log-file")
if log_file_path == "-":
# A hyphen path name means standard output.
log_file = sys.stdout
elif log_file_path is None:
# No log file.
log_file = None
else:
# Otherwise, it's a file name. Open it for append.
log_file = open(log_file_path, "a+")
# If a PID file was requested, create it now.
pid_file_path = self.GetCommandOption("pid-file")
if pid_file_path is not None:
# If a PID file was requested, but no explicit path was
# given, use a default value.
if not pid_file_path:
pid_file_path = qm.common.rc.Get("pid-file",
"/var/run/qmtest.pid",
"qmtest")
try:
pid_file = open(pid_file_path, "w")
except IOError, e:
raise qm.cmdline.CommandError, str(e)
else:
pid_file = None
# Create a run database, if requested.
run_db = None
directory = self.GetCommandOption("results", default="")
if directory:
directory = os.path.normpath(directory)
run_db = DirRunDatabase(directory, database)
# Load expectations. Only support the 'outcome' option here,
# as 'expectations' in general are unsupported with this GUI.
expectations = self.GetCommandOption('outcomes')
expectations = base.load_expectations(expectations, database)
# Make sure this is either an ExpectationDatabase or a
# PreviousRun
if not type(expectations) in (ExpectationDatabase, PreviousTestRun):
raise qm.cmdline.CommandError, 'not a valid results file'
# Figure out which targets to use.
targets = self.GetTargets()
# Compute the context in which the tests will be run.
context = self.MakeContext()
# Set up the server.
server = QMTestServer(database,
port_number, address,
log_file, targets, context,
expectations,
run_db)
port_number = server.GetServerAddress()[1]
# Construct the URL to the main page on the server.
if address == "":
url_address = qm.platform.get_host_name()
else:
url_address = address
if run_db:
url = "http://%s:%d/report/dir" % (url_address, port_number)
else:
url = "http://%s:%d/test/dir" % (url_address, port_number)
if not self.HasCommandOption("no-browser"):
# Now that the server is bound to its address, start the
# web browser.
qm.platform.open_in_browser(url)
message = qm.message("server url", url=url)
sys.stderr.write(message + "\n")
# Become a daemon, if appropriate.
if self.GetCommandOption("daemon") is not None:
# Fork twice.
if os.fork() != 0:
os._exit(0)
if os.fork() != 0:
os._exit(0)
# This process is now the grandchild of the original
# process.
# Write out the PID file. The correct PID is not known until
# after the transformation to a daemon has taken place.
try:
if pid_file:
pid_file.write(str(os.getpid()))
pid_file.close()
# Accept requests.
try:
server.Run()
except qm.platform.SignalException, se:
if se.GetSignalNumber() == signal.SIGTERM:
# If we receive SIGTERM, shut down.
pass
else:
# Other signals propagate outwards.
raise
except KeyboardInterrupt:
# If we receive a keyboard interrupt (Ctrl-C), shut down.
pass
finally:
if pid_file:
os.remove(pid_file_path)
return 0
def __WriteCommandHelp(self, command):
"""Write out help information about 'command'.
'command' -- The name of the command for which help information
is required."""
self._stderr.write(self.__parser.GetCommandHelp(command))
def __FilterTestsToRun(self, test_ids, expectations):
"""Return those tests from 'test_ids' that should be run.
'test_ids' -- A sequence of test ids.
'expectations' -- An ExpectationDatabase.
returns -- Those elements of 'test_names' that are not to be
skipped. If 'a' precedes 'b' in 'test_ids', and both 'a' and
'b' are present in the result, 'a' will precede 'b' in the
result."""
# The --rerun option indicates that only failing tests should
# be rerun.
rerun_file_name = self.GetCommandOption("rerun")
if rerun_file_name:
# Load the outcomes from the file specified.
outcomes = base.load_outcomes(rerun_file_name,
self.GetDatabase())
# Filter out tests that have unexpected outcomes.
test_ids = [t for t in test_ids
if outcomes.get(t, Result.PASS)
!= expectations.Lookup(t).GetOutcome()]
return test_ids
def __CheckExtensionKind(self, kind):
"""Check that 'kind' is a valid extension kind.
'kind' -- A string giving the name of an extension kind. If the
'kind' does not name a valid extension kind, an appropriate
exception is raised."""
if kind not in base.extension_kinds:
raise qm.cmdline.CommandError, \
qm.error("invalid extension kind",
kind = kind)
def __CreateResultStreams(self, output_file, annotations, expectations):
"""Return the result streams to use.
'output_file' -- If not 'None', the name of a file to which
the standard results file format should be written.
'annotations' -- A dictionary with annotations for this test run.
'expectations' -- An ExpectationDatabase.
returns -- A list of 'ResultStream' objects, as indicated by the
user."""
database = self.GetDatabaseIfAvailable()
result_streams = []
arguments = {}
arguments['expected_outcomes'] = expectations.GetExpectedOutcomes()
# Look up the summary format.
format = self.GetCommandOption("format", "")
if format and format not in self.summary_formats:
# Invalid format. Complain.
valid_format_string = string.join(
map(lambda f: '"%s"' % f, self.summary_formats), ", ")
raise qm.cmdline.CommandError, \
qm.error("invalid results format",
format=format,
valid_formats=valid_format_string)
if format != "none":
args = { "format" : format }
args.update(arguments)
stream = self.GetTextResultStreamClass()(args)
result_streams.append(stream)
f = lambda n: get_extension_class(n, "result_stream", database)
# Look for all of the "--result-stream" options.
for opt, opt_arg in self.__command_options:
if opt == "result-stream":
ec, args = qm.extension.parse_descriptor(opt_arg, f)
args.update(arguments)
result_streams.append(ec(args))
# If there is an output file, create a standard results file on
# that file.
if output_file is not None:
rs = (self.GetFileResultStreamClass()
({ "filename" : output_file}))
result_streams.append(rs)
for name, value in annotations.iteritems():
for rs in result_streams:
rs.WriteAnnotation(name, value)
return result_streams
########################################################################
# Functions
########################################################################
def get_qmtest():
"""Returns the global QMTest object.
returns -- The 'QMTest' object that corresponds to the currently
executing thread.
At present, there is only one QMTest object per process. In the
future, however, there may be more than one. Then, this function
will return different values in different threads."""
return _the_qmtest
########################################################################
# Local Variables:
# mode: python
# indent-tabs-mode: nil
# fill-column: 72
# End:
|
MentorEmbedded/qmtest
|
qm/test/cmdline.py
|
Python
|
gpl-2.0
| 68,308
|
[
"VisIt"
] |
1f24ab87349536922d2bc408adb2b46e14297300a9d5074549be3c44905bbb9b
|
""" StorageManagementDB is a front end to the Stager Database.
There are five tables in the StorageManagementDB: Tasks, CacheReplicas, TaskReplicas, StageRequests.
The Tasks table is the place holder for the tasks that have requested files to be staged.
These can be from different systems and have different associated call back methods.
The CacheReplicas table keeps the information on all the CacheReplicas in the system.
It maps all the file information LFN, PFN, SE to an assigned ReplicaID.
The TaskReplicas table maps the TaskIDs from the Tasks table to the ReplicaID from the CacheReplicas table.
The StageRequests table contains each of the prestage request IDs for each of the replicas.
"""
__RCSID__ = "$Id$"
import inspect
import types
import sys
from DIRAC import gLogger, S_OK, S_ERROR
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities.List import intListToString, stringListToString
# Stage Request are issue with a length of "PinLength"
# However, once Staged, the entry in the StageRequest will set a PinExpiryTime only for "PinLength" / THROTTLING_STEPS
# As PinExpiryTime arrives, StageRequest and their corresponding CacheReplicas entries are cleaned
# This allows to throttle the submission of Stage Requests up to a maximum of "DiskCacheTB" per "PinLength"
# After "PinLength" / THROTTLING_STEPS seconds, entries are removed, so new requests for the same replica will trigger
# a new Stage Request to the SE, and thus an update of the Pinning on the SE.
#
# - "PinLength" is an Option of the StageRequest Agent that defaults to THROTTLING_TIME
# - "DiskCacheTB" is an Option of the StorageElement that defaults to 1 (TB)
#
THROTTLING_TIME = 86400
THROTTLING_STEPS = 12
class StorageManagementDB( DB ):
_tablesDict = {}
# Tasks table
_tablesDict [ 'Tasks' ] = {
'Fields' :
{
'TaskID' : 'INTEGER AUTO_INCREMENT',
'Status' : 'VARCHAR(32) DEFAULT "New"',
'Source' : 'VARCHAR(32) NOT NULL',
'SubmitTime' : 'DATETIME NOT NULL',
'LastUpdate' : 'DATETIME',
'CompleteTime' : 'DATETIME',
'CallBackMethod' : 'VARCHAR(255)',
'SourceTaskID' : 'VARCHAR(32)'
},
'Indexes' : { 'TaskID,Status' : [ 'TaskID', 'Status' ] },
'PrimaryKey' : [ 'TaskID', 'Status' ]
}
# TaskReplicas table
_tablesDict[ 'TaskReplicas' ] = {
'Fields' :
{
'TaskID' : 'INTEGER(8) NOT NULL REFERENCES Tasks(TaskID)',
'ReplicaID' : 'INTEGER(8) NOT NULL REFERENCES CacheReplicas(ReplicaID)'
},
'Indexes' : { 'TaskID,ReplicaID' : [ 'TaskID', 'ReplicaID' ] },
'PrimaryKey' : [ 'TaskID', 'ReplicaID' ]
}
# FIXME: we have 2 triggers here !!
#CREATE TRIGGER taskreplicasAfterInsert AFTER INSERT ON TaskReplicas FOR EACH ROW UPDATE CacheReplicas SET CacheReplicas.Links=CacheReplicas.Links+1 WHERE CacheReplicas.ReplicaID=NEW.ReplicaID;
#CREATE TRIGGER taskreplicasAfterDelete AFTER DELETE ON TaskReplicas FOR EACH ROW UPDATE CacheReplicas SET CacheReplicas.Links=CacheReplicas.Links-1 WHERE CacheReplicas.ReplicaID=OLD.ReplicaID;
# CacheReplicas table
_tablesDict[ 'CacheReplicas' ] = {
'Fields' :
{
'ReplicaID' : 'INTEGER AUTO_INCREMENT',
'Type' : 'VARCHAR(32) NOT NULL',
'Status' : 'VARCHAR(32) DEFAULT "New"',
'SE' : 'VARCHAR(32) NOT NULL',
'LFN' : 'VARCHAR(255) NOT NULL',
'PFN' : 'VARCHAR(255)',
'Size' : 'BIGINT(60) DEFAULT 0',
'FileChecksum' : 'VARCHAR(255) NOT NULL',
'GUID' : 'VARCHAR(255) NOT NULL',
'SubmitTime' : 'DATETIME NOT NULL',
'LastUpdate' : 'DATETIME',
'Reason' : 'VARCHAR(255)',
'Links' : 'INTEGER DEFAULT 0'
},
'Indexes' : { 'ReplicaID,Status,SE' : [ 'ReplicaID', 'Status', 'SE' ] },
'PrimaryKey' : [ 'ReplicaID', 'LFN', 'SE' ]
}
# StageRequests table
_tablesDict[ 'StageRequests' ] = {
'Fields' :
{
'ReplicaID' : 'INTEGER(8) NOT NULL REFERENCES CacheReplicas(ReplicaID)',
'StageStatus' : 'VARCHAR(32) DEFAULT "StageSubmitted"',
'RequestID' : 'VARCHAR(64) DEFAULT ""',
'StageRequestSubmitTime' : 'DATETIME NOT NULL',
'StageRequestCompletedTime' : 'DATETIME',
'PinLength' : 'INTEGER(8)',
'PinExpiryTime' : 'DATETIME'
},
'Indexes' : { 'StageStatus' : [ 'StageStatus' ] },
'ForeignKeys' : { 'ReplicaID' : 'CacheReplicas.ReplicaID' }
}
def __init__( self, systemInstance = 'Default', maxQueueSize = 10, checkTables = False ):
DB.__init__( self, 'StorageManagementDB', 'StorageManagement/StorageManagementDB', maxQueueSize )
# FIXME: substitute with self._tablesDict ( but watch out, the order will not be the same ! )
self.TASKPARAMS = ['TaskID', 'Status', 'Source', 'SubmitTime', 'LastUpdate', 'CompleteTime', 'CallBackMethod', 'SourceTaskID']
self.REPLICAPARAMS = ['ReplicaID', 'Type', 'Status', 'SE', 'LFN', 'PFN', 'Size', 'FileChecksum', 'GUID', 'SubmitTime', 'LastUpdate', 'Reason', 'Links']
self.STAGEPARAMS = ['ReplicaID', 'StageStatus', 'RequestID', 'StageRequestSubmitTime', 'StageRequestCompletedTime', 'PinLength', 'PinExpiryTime']
self.STATES = ['Failed', 'New', 'Waiting', 'Offline', 'StageSubmitted', 'Staged']
if checkTables:
result = self._createTables( self._tablesDict )
if not result['OK']:
error = 'Failed to check/create tables'
self.log.fatal( 'StorageManagementDB: %s' % error )
sys.exit( error )
return
if result['Value']:
self.log.info( "StorageManagementDB: created tables %s" % result['Value'] )
def __getConnection( self, connection ):
if connection:
return connection
res = self._getConnection()
if res['OK']:
return res['Value']
gLogger.warn( "Failed to get MySQL connection", res['Message'] )
return connection
def _caller( self ):
return inspect.stack()[2][3]
################################################################
#
# State machine management
#
def updateTaskStatus( self, taskIDs, newTaskStatus, connection = False ):
return self.__updateTaskStatus( taskIDs, newTaskStatus, connection = connection )
def __updateTaskStatus( self, taskIDs, newTaskStatus, force = False, connection = False ):
connection = self.__getConnection( connection )
if not taskIDs:
return S_OK( taskIDs )
if force:
toUpdate = taskIDs
else:
res = self._checkTaskUpdate( taskIDs, newTaskStatus, connection = connection )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newTaskStatus )
reqSelect = "SELECT TaskID FROM Tasks WHERE TaskID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newTaskStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), '__updateTaskStatus', reqSelect, resSelect['Message'] ) )
req = "UPDATE Tasks SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE TaskID IN (%s) AND Status != '%s';" % ( newTaskStatus, intListToString( toUpdate ), newTaskStatus )
res = self._update( req, connection )
if not res['OK']:
return res
taskIDs = []
for record in resSelect['Value']:
taskIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update Tasks = %s" % ( self._caller(), '__updateTaskStatus', record ) )
if len( taskIDs ) > 0:
reqSelect1 = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1["OK"]:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), '__updateTaskStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated Tasks = %s" % ( self._caller(), '__updateTaskStatus', record ) )
return S_OK( toUpdate )
def _checkTaskUpdate( self, taskIDs, newTaskState, connection = False ):
connection = self.__getConnection( connection )
if not taskIDs:
return S_OK( taskIDs )
# * -> Failed
if newTaskState == 'Failed':
oldTaskState = []
# StageCompleting -> Done
elif newTaskState == 'Done':
oldTaskState = ['StageCompleting']
# StageSubmitted -> StageCompleting
elif newTaskState == 'StageCompleting':
oldTaskState = ['StageSubmitted']
# Waiting -> StageSubmitted
elif newTaskState == 'StageSubmitted':
oldTaskState = ['Waiting', 'Offline']
# New -> Waiting
elif newTaskState == 'Waiting':
oldTaskState = ['New']
elif newTaskState == 'Offline':
oldTaskState = ['Waiting']
else:
return S_ERROR( "Task status not recognized" )
if not oldTaskState:
toUpdate = taskIDs
else:
req = "SELECT TaskID FROM Tasks WHERE Status in (%s) AND TaskID IN (%s)" % ( stringListToString( oldTaskState ), intListToString( taskIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def updateReplicaStatus( self, replicaIDs, newReplicaStatus, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
res = self._checkReplicaUpdate( replicaIDs, newReplicaStatus )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newReplicaStatus )
reqSelect = "SELECT ReplicaID FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newReplicaStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaStatus', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE ReplicaID IN (%s) AND Status != '%s';" % ( newReplicaStatus, intListToString( toUpdate ), newReplicaStatus )
res = self._update( req, connection )
if not res['OK']:
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaStatus', record ) )
if len( replicaIDs ) > 0:
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateReplicaStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaStatus', record ) )
res = self._updateTasksForReplica( replicaIDs, connection = connection )
if not res['OK']:
return res
return S_OK( toUpdate )
def _updateTasksForReplica( self, replicaIDs, connection = False ):
tasksInStatus = {}
for state in self.STATES:
tasksInStatus[state] = []
req = "SELECT T.TaskID,T.Status FROM Tasks AS T, TaskReplicas AS R WHERE R.ReplicaID IN ( %s ) AND R.TaskID = T.TaskID GROUP BY T.TaskID;" % intListToString( replicaIDs )
res = self._query( req, connection )
if not res['OK']:
return res
for taskId, status in res['Value']:
subreq = "SELECT DISTINCT(C.Status) FROM TaskReplicas AS R, CacheReplicas AS C WHERE R.TaskID=%s AND R.ReplicaID = C.ReplicaID;" % taskId
subres = self._query( subreq, connection )
if not subres['OK']:
return subres
cacheStatesForTask = [row[0] for row in subres['Value']]
if not cacheStatesForTask:
tasksInStatus['Failed'].append( taskId )
continue
wrongState = False
for state in cacheStatesForTask:
if state not in self.STATES:
wrongState = True
break
if wrongState:
tasksInStatus['Failed'].append( taskId )
continue
for state in self.STATES:
if state in cacheStatesForTask:
if status != state:
tasksInStatus[state].append( taskId )
break
for newStatus in tasksInStatus.keys():
if tasksInStatus[newStatus]:
res = self.__updateTaskStatus( tasksInStatus[newStatus], newStatus, True, connection = connection )
if not res['OK']:
gLogger.warn( "Failed to update task associated to replicas", res['Message'] )
# return res
return S_OK( tasksInStatus )
def getAssociatedReplicas( self, replicaIDs ):
""" Retrieve the list of Replicas that belong to the same Tasks as the provided list
"""
res = self._getReplicaIDTasks( replicaIDs )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getAssociatedReplicas: Failed to get Tasks.', res['Message'] )
return res
taskIDs = res['Value']
return self.getCacheReplicas( {'TaskID':taskIDs} )
def _checkReplicaUpdate( self, replicaIDs, newReplicaState, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
# * -> Failed
if newReplicaState == 'Failed':
oldReplicaState = []
# New -> Waiting
elif newReplicaState == 'Waiting':
oldReplicaState = ['New']
# Waiting -> StageSubmitted
elif newReplicaState == 'StageSubmitted':
oldReplicaState = ['Waiting', 'Offline']
# StageSubmitted -> Staged
elif newReplicaState == 'Staged':
oldReplicaState = ['StageSubmitted']
elif newReplicaState == 'Offline':
oldReplicaState = ['Waiting']
else:
return S_ERROR( "Replica status not recognized" )
if not oldReplicaState:
toUpdate = replicaIDs
else:
req = "SELECT ReplicaID FROM CacheReplicas WHERE Status IN (%s) AND ReplicaID IN (%s)" % ( stringListToString( oldReplicaState ), intListToString( replicaIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def __getTaskStateFromReplicaState( self, replicaState ):
# For the moment the task state just references to the replicaState
return replicaState
def updateStageRequestStatus( self, replicaIDs, newStageStatus, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
res = self._checkStageUpdate( replicaIDs, newStageStatus, connection = connection )
if not res['OK']:
return res
toUpdate = res['Value']
if not toUpdate:
return S_OK( toUpdate )
# reqSelect = "Select * FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newStageStatus )
reqSelect = "Select ReplicaID FROM CacheReplicas WHERE ReplicaID IN (%s) AND Status != '%s';" % ( intListToString( toUpdate ), newStageStatus )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateStageRequestStatus', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Status='%s',LastUpdate=UTC_TIMESTAMP() WHERE ReplicaID IN (%s) AND Status != '%s';" % ( newStageStatus, intListToString( toUpdate ), newStageStatus )
res = self._update( req, connection )
if not res['OK']:
return res
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateStageRequestStatus', record ) )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1, connection )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateStageRequestStatus', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateStageRequestStatus', record ) )
# Now update the replicas associated to the replicaIDs
newReplicaStatus = self.__getReplicaStateFromStageState( newStageStatus )
res = self.updateReplicaStatus( toUpdate, newReplicaStatus, connection = connection )
if not res['OK']:
gLogger.warn( "Failed to update cache replicas associated to stage requests", res['Message'] )
return S_OK( toUpdate )
def _checkStageUpdate( self, replicaIDs, newStageState, connection = False ):
connection = self.__getConnection( connection )
if not replicaIDs:
return S_OK( replicaIDs )
# * -> Failed
if newStageState == 'Failed':
oldStageState = []
elif newStageState == 'Staged':
oldStageState = ['StageSubmitted']
else:
return S_ERROR( "StageRequest status not recognized" )
if not oldStageState:
toUpdate = replicaIDs
else:
req = "SELECT ReplicaID FROM StageRequests WHERE StageStatus = '%s' AND ReplicaID IN (%s)" % ( oldStageState, intListToString( replicaIDs ) )
res = self._query( req, connection )
if not res['OK']:
return res
toUpdate = [row[0] for row in res['Value']]
return S_OK( toUpdate )
def __getReplicaStateFromStageState( self, stageState ):
# For the moment the replica state just references to the stage state
return stageState
#
# End of state machine management
#
################################################################
################################################################
#
# Monitoring of stage tasks
#
def getTaskStatus( self, taskID, connection = False ):
""" Obtain the task status from the Tasks table. """
connection = self.__getConnection( connection )
res = self.getTaskInfo( taskID, connection = connection )
if not res['OK']:
return res
taskInfo = res['Value'][taskID]
return S_OK( taskInfo['Status'] )
def getTaskInfo( self, taskID, connection = False ):
""" Obtain all the information from the Tasks table for a supplied task. """
connection = self.__getConnection( connection )
req = "SELECT TaskID,Status,Source,SubmitTime,CompleteTime,CallBackMethod,SourceTaskID from Tasks WHERE TaskID IN (%s);" % intListToString( taskID )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getTaskInfo: Failed to get task information.', res['Message'] )
return res
resDict = {}
for taskID, status, source, submitTime, completeTime, callBackMethod, sourceTaskID in res['Value']:
resDict[sourceTaskID] = {'Status':status, 'Source':source, 'SubmitTime':submitTime, 'CompleteTime':completeTime, 'CallBackMethod':callBackMethod, 'SourceTaskID':sourceTaskID}
if not resDict:
gLogger.error( 'StorageManagementDB.getTaskInfo: The supplied task %s did not exist' % taskID )
return S_ERROR( 'The supplied task %s did not exist' % taskID )
return S_OK( resDict )
def _getTaskIDForJob ( self, jobID, connection = False ):
# Stager taskID is retrieved from the source DIRAC jobID
connection = self.__getConnection( connection )
req = "SELECT TaskID from Tasks WHERE SourceTaskID=%s;" % int( jobID )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), '_getTaskIDForJob', req, res['Message'] ) )
return S_ERROR( 'The supplied JobID does not exist!' )
taskID = [ row[0] for row in res['Value'] ]
return S_OK( taskID )
def getTaskSummary( self, jobID, connection = False ):
""" Obtain the task summary from the database. """
connection = self.__getConnection( connection )
res = self._getTaskIDForJob( jobID, connection = connection )
if not res['OK']:
return res
if res['Value']:
taskID = res['Value']
else:
return S_OK()
res = self.getTaskInfo( taskID, connection = connection )
if not res['OK']:
return res
taskInfo = res['Value']
req = "SELECT R.LFN,R.SE,R.PFN,R.Size,R.Status,R.LastUpdate,R.Reason FROM CacheReplicas AS R, TaskReplicas AS TR WHERE TR.TaskID in (%s) AND TR.ReplicaID=R.ReplicaID;" % intListToString( taskID )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getTaskSummary: Failed to get Replica summary for task.', res['Message'] )
return res
replicaInfo = {}
for lfn, storageElement, pfn, fileSize, status, lastupdate, reason in res['Value']:
replicaInfo[lfn] = {'StorageElement':storageElement, 'PFN':pfn, 'FileSize':fileSize,
'Status':status, 'LastUpdate':lastupdate, 'Reason':reason}
resDict = {'TaskInfo':taskInfo, 'ReplicaInfo':replicaInfo}
return S_OK( resDict )
def getTasks( self, condDict = {}, older = None, newer = None, timeStamp = 'SubmitTime', orderAttribute = None,
limit = None, connection = False ):
""" Get stage requests for the supplied selection with support for web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM Tasks" % ( intListToString( self.TASKPARAMS ) )
if condDict or older or newer:
if condDict.has_key( 'ReplicaID' ):
replicaIDs = condDict.pop( 'ReplicaID' )
if type( replicaIDs ) not in ( types.ListType, types.TupleType ):
replicaIDs = [replicaIDs]
res = self._getReplicaIDTasks( replicaIDs, connection = connection )
if not res['OK']:
return res
condDict['TaskID'] = res['Value']
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
tasks = res['Value']
resultDict = {}
for row in tasks:
resultDict[row[0]] = dict( zip( self.TASKPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = tasks
result['ParameterNames'] = self.TASKPARAMS
return result
def getCacheReplicas( self, condDict = {}, older = None, newer = None, timeStamp = 'LastUpdate', orderAttribute = None, limit = None, connection = False ):
""" Get cache replicas for the supplied selection with support for the web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM CacheReplicas" % ( intListToString( self.REPLICAPARAMS ) )
if condDict or older or newer:
if condDict.has_key( 'TaskID' ):
taskIDs = condDict.pop( 'TaskID' )
if type( taskIDs ) not in ( types.ListType, types.TupleType ):
taskIDs = [taskIDs]
res = self._getTaskReplicaIDs( taskIDs, connection = connection )
if not res['OK']:
return res
if res['Value']:
condDict['ReplicaID'] = res['Value']
else:
condDict['ReplicaID'] = [-1]
# BUG: limit is ignored unless there is a nonempty condition dictionary OR older OR newer is nonemtpy
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
cacheReplicas = res['Value']
resultDict = {}
for row in cacheReplicas:
resultDict[row[0]] = dict( zip( self.REPLICAPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = cacheReplicas
result['ParameterNames'] = self.REPLICAPARAMS
return result
def getStageRequests( self, condDict = {}, older = None, newer = None, timeStamp = 'StageRequestSubmitTime', orderAttribute = None, limit = None, connection = False ):
""" Get stage requests for the supplied selection with support for web standard structure """
connection = self.__getConnection( connection )
req = "SELECT %s FROM StageRequests" % ( intListToString( self.STAGEPARAMS ) )
if condDict or older or newer:
if condDict.has_key( 'TaskID' ):
taskIDs = condDict.pop( 'TaskID' )
if type( taskIDs ) not in ( types.ListType, types.TupleType ):
taskIDs = [taskIDs]
res = self._getTaskReplicaIDs( taskIDs, connection = connection )
if not res['OK']:
return res
if res['Value']:
condDict['ReplicaID'] = res['Value']
else:
condDict['ReplicaID'] = [-1]
req = "%s %s" % ( req, self.buildCondition( condDict, older, newer, timeStamp, orderAttribute, limit ) )
res = self._query( req, connection )
if not res['OK']:
return res
stageRequests = res['Value']
resultDict = {}
for row in stageRequests:
resultDict[row[0]] = dict( zip( self.STAGEPARAMS[1:], row[1:] ) )
result = S_OK( resultDict )
result['Records'] = stageRequests
result['ParameterNames'] = self.STAGEPARAMS
return result
def _getTaskReplicaIDs( self, taskIDs, connection = False ):
if not taskIDs:
return S_OK( [] )
req = "SELECT DISTINCT(ReplicaID) FROM TaskReplicas WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._query( req, connection )
if not res['OK']:
return res
replicaIDs = [row[0] for row in res['Value']]
return S_OK( replicaIDs )
def _getReplicaIDTasks( self, replicaIDs, connection = False ):
if not replicaIDs:
return S_OK( [] )
req = "SELECT DISTINCT(TaskID) FROM TaskReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
res = self._query( req, connection )
if not res['OK']:
return res
taskIDs = [row[0] for row in res['Value']]
return S_OK( taskIDs )
#
# End of monitoring of stage tasks
#
################################################################
####################################################################
#
# Submission of stage requests
#
def setRequest( self, lfnDict, source, callbackMethod, sourceTaskID, connection = False ):
""" This method populates the StorageManagementDB Tasks table with the requested files. """
connection = self.__getConnection( connection )
if not lfnDict:
return S_ERROR( "No files supplied in request" )
# The first step is to create the task in the Tasks table
res = self._createTask( source, callbackMethod, sourceTaskID, connection = connection )
if not res['OK']:
return res
taskID = res['Value']
# Get the Replicas which already exist in the CacheReplicas table
allReplicaIDs = []
taskStates = []
for se, lfns in lfnDict.items():
if type( lfns ) in types.StringTypes:
lfns = [lfns]
res = self._getExistingReplicas( se, lfns, connection = connection )
if not res['OK']:
return res
existingReplicas = res['Value']
# Insert the CacheReplicas that do not already exist
for lfn in lfns:
if lfn in existingReplicas.keys():
gLogger.verbose( 'StorageManagementDB.setRequest: Replica already exists in CacheReplicas table %s @ %s' % ( lfn, se ) )
existingFileState = existingReplicas[lfn][1]
taskState = self.__getTaskStateFromReplicaState( existingFileState )
else:
res = self._insertReplicaInformation( lfn, se, 'Stage', connection = connection )
if not res['OK']:
self._cleanTask( taskID, connection = connection )
return res
existingReplicas[lfn] = ( res['Value'], 'New' )
newFileState = existingReplicas[lfn][1]
taskState = self.__getTaskStateFromReplicaState( newFileState )
if not taskState in taskStates:
taskStates.append( taskState )
allReplicaIDs.extend( existingReplicas.values() )
# Insert all the replicas into the TaskReplicas table
res = self._insertTaskReplicaInformation( taskID, allReplicaIDs, connection = connection )
if not res['OK']:
self._cleanTask( taskID, connection = connection )
return res
# Check whether the the task status is Done based on the existing file states
# If all the files for a particular Task are 'Staged', update the Task
if taskStates == ['Staged']:
# so if the tasks are for LFNs from the lfns dictionary, which are already staged,
# they immediately change state New->Done. Fixed it to translate such tasks to 'Staged' state
self.__updateTaskStatus( [taskID], 'Staged', True, connection = connection )
if 'Failed' in taskStates:
self.__updateTaskStatus( [taskID], 'Failed', True, connection = connection )
return S_OK( taskID )
def _cleanTask( self, taskID, connection = False ):
""" Remove a task and any related information """
connection = self.__getConnection( connection )
self.removeTasks( [taskID], connection = connection )
self.removeUnlinkedReplicas( connection = connection )
def _createTask( self, source, callbackMethod, sourceTaskID, connection = False ):
""" Enter the task details into the Tasks table """
connection = self.__getConnection( connection )
req = "INSERT INTO Tasks (Source,SubmitTime,CallBackMethod,SourceTaskID) VALUES ('%s',UTC_TIMESTAMP(),'%s','%s');" % ( source, callbackMethod, sourceTaskID )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB._createTask: Failed to create task.", res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_createTask',req))
taskID = res['lastRowId']
reqSelect = "SELECT * FROM Tasks WHERE TaskID = %s;" % ( taskID )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.info( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), '_createTask', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted Tasks = %s" % ( self._caller(), '_createTask', resSelect['Value'][0] ) )
# gLogger.info("StorageManagementDB._createTask: Created task with ('%s','%s','%s') and obtained TaskID %s" % (source,callbackMethod,sourceTaskID,taskID))
return S_OK( taskID )
def _getExistingReplicas( self, storageElement, lfns, connection = False ):
""" Obtains the ReplicasIDs for the replicas already entered in the CacheReplicas table """
connection = self.__getConnection( connection )
req = "SELECT ReplicaID,LFN,Status FROM CacheReplicas WHERE SE = '%s' AND LFN IN (%s);" % ( storageElement, stringListToString( lfns ) )
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB._getExistingReplicas: Failed to get existing replicas.', res['Message'] )
return res
existingReplicas = {}
for replicaID, lfn, status in res['Value']:
existingReplicas[lfn] = ( replicaID, status )
return S_OK( existingReplicas )
def _insertReplicaInformation( self, lfn, storageElement, rType, connection = False ):
""" Enter the replica into the CacheReplicas table """
connection = self.__getConnection( connection )
req = "INSERT INTO CacheReplicas (Type,SE,LFN,PFN,Size,FileChecksum,GUID,SubmitTime,LastUpdate) VALUES ('%s','%s','%s','',0,'','',UTC_TIMESTAMP(),UTC_TIMESTAMP());" % ( rType, storageElement, lfn )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "_insertReplicaInformation: Failed to insert to CacheReplicas table.", res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_insertReplicaInformation',req))
replicaID = res['lastRowId']
reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID = %s;" % ( replicaID )
resSelect = self._query( reqSelect, connection )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), '_insertReplicaInformation', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted CacheReplicas = %s" % ( self._caller(), '_insertReplicaInformation', resSelect['Value'][0] ) )
# gLogger.verbose("_insertReplicaInformation: Inserted Replica ('%s','%s') and obtained ReplicaID %s" % (lfn,storageElement,replicaID))
return S_OK( replicaID )
def _insertTaskReplicaInformation( self, taskID, replicaIDs, connection = False ):
""" Enter the replicas into TaskReplicas table """
connection = self.__getConnection( connection )
req = "INSERT INTO TaskReplicas (TaskID,ReplicaID) VALUES "
for replicaID, _status in replicaIDs:
replicaString = "(%s,%s)," % ( taskID, replicaID )
req = "%s %s" % ( req, replicaString )
req = req.rstrip( ',' )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB._insertTaskReplicaInformation: Failed to insert to TaskReplicas table.', res['Message'] )
return res
# gLogger.info( "%s_DB:%s" % ('_insertTaskReplicaInformation',req))
gLogger.verbose( "StorageManagementDB._insertTaskReplicaInformation: Successfully added %s CacheReplicas to Task %s." % ( res['Value'], taskID ) )
return S_OK()
#
# End of insertion methods
#
################################################################
####################################################################
def getStagedReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getStagedReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in goodTasks:
continue
elif status in ( 'Staged', 'StageSubmitted' ):
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Staged', 'TaskID':goodTasks}, connection = connection )
def getWaitingReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getWaitingReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
badTasks = []
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in badTasks:
continue
elif status in ( 'New', 'Failed' ):
badTasks.append( taskID )
elif status == 'Waiting':
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Waiting', 'TaskID':goodTasks}, connection = connection )
####################################################################
def getOfflineReplicas( self, connection = False ):
connection = self.__getConnection( connection )
req = "SELECT TR.TaskID, R.Status, COUNT(*) from TaskReplicas as TR, CacheReplicas as R where TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID,R.Status;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getOfflineReplicas: Failed to get eligible TaskReplicas', res['Message'] )
return res
badTasks = []
goodTasks = []
for taskID, status, _count in res['Value']:
if taskID in badTasks:
continue
elif status in ( 'New', 'Failed' ):
badTasks.append( taskID )
elif status == 'Offline':
goodTasks.append( taskID )
return self.getCacheReplicas( {'Status':'Offline', 'TaskID':goodTasks}, connection = connection )
####################################################################
def getTasksWithStatus( self, status ):
""" This method retrieves the TaskID from the Tasks table with the supplied Status. """
req = "SELECT TaskID,Source,CallBackMethod,SourceTaskID from Tasks WHERE Status = '%s';" % status
res = self._query( req )
if not res['OK']:
return res
taskIDs = {}
for taskID, source, callback, sourceTask in res['Value']:
taskIDs[taskID] = ( source, callback, sourceTask )
return S_OK( taskIDs )
####################################################################
#
# The state transition of the CacheReplicas from *->Failed
#
def updateReplicaFailure( self, terminalReplicaIDs ):
""" This method sets the status to Failure with the failure reason for the supplied Replicas. """
res = self.updateReplicaStatus( terminalReplicaIDs.keys(), 'Failed' )
if not res['OK']:
return res
updated = res['Value']
if not updated:
return S_OK( updated )
for replicaID in updated:
# FIXME: I really do not get the purpose of the two paragraphs commented out.
# reqSelect = "Select * FROM CacheReplicas WHERE ReplicaID = %d" % ( replicaID )
# resSelect = self._query( reqSelect )
# if not resSelect['OK']:
# gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaFailure', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET Reason = '%s' WHERE ReplicaID = %d" % ( terminalReplicaIDs[replicaID], replicaID )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.updateReplicaFailure: Failed to update replica fail reason.', res['Message'] )
return res
# replicaIDs = []
# for record in resSelect['Value']:
# replicaIDs.append( record[0] )
# gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaFailure', record ) )
#reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID = %d;" % replicaID
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'updateReplicaFailure', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaFailure', record ) )
return S_OK( updated )
####################################################################
#
# The state transition of the CacheReplicas from New->Waiting
#
def updateReplicaInformation( self, replicaTuples ):
""" This method set the replica size information and pfn for the requested storage element. """
for replicaID, pfn, size in replicaTuples:
# reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID = %s and Status != 'Cancelled';" % ( replicaID )
reqSelect = "SELECT ReplicaID FROM CacheReplicas WHERE ReplicaID = %s and Status != 'Cancelled';" % ( replicaID )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaInformation', reqSelect, resSelect['Message'] ) )
req = "UPDATE CacheReplicas SET PFN = '%s', Size = %s, Status = 'Waiting' WHERE ReplicaID = %s and Status != 'Cancelled';" % ( pfn, size, replicaID )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StagerDB.updateReplicaInformation: Failed to insert replica information.', res['Message'] )
replicaIDs = []
for record in resSelect['Value']:
replicaIDs.append( record[0] )
gLogger.verbose( "%s.%s_DB: to_update CacheReplicas = %s" % ( self._caller(), 'updateReplicaInformation', record ) )
reqSelect1 = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'updateReplicaInformation', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated CacheReplicas = %s" % ( self._caller(), 'updateReplicaInformation', record ) )
gLogger.debug( 'StagerDB.updateReplicaInformation: Successfully updated CacheReplicas record With Status=Waiting, for ReplicaID= %s' % ( replicaID ) )
return S_OK()
####################################################################
#
# The state transition of the CacheReplicas from Waiting->StageSubmitted
#
def getSubmittedStagePins( self ):
# change the query to take into account pin expiry time
req = "SELECT SE,COUNT(*),SUM(Size) from CacheReplicas WHERE Status NOT IN ('New','Waiting','Offline','Failed') GROUP BY SE;"
# req = "SELECT SE,Count(*),SUM(Size) from CacheReplicas,StageRequests WHERE Status NOT IN ('New','Waiting','Failed') and CacheReplicas.ReplicaID=StageRequests.ReplicaID and PinExpiryTime>UTC_TIMESTAMP() GROUP BY SE;"
res = self._query( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.getSubmittedStagePins: Failed to obtain submitted requests.', res['Message'] )
return res
storageRequests = {}
for storageElement, replicas, totalSize in res['Value']:
storageRequests[storageElement] = {'Replicas':int( replicas ), 'TotalSize':int( totalSize )}
return S_OK( storageRequests )
def insertStageRequest( self, requestDict, pinLifeTime ):
req = "INSERT INTO StageRequests (ReplicaID,RequestID,StageRequestSubmitTime,PinLength) VALUES "
for requestID, replicaIDs in requestDict.items():
for replicaID in replicaIDs:
replicaString = "(%s,'%s',UTC_TIMESTAMP(),%d)," % ( replicaID, requestID, pinLifeTime )
req = "%s %s" % ( req, replicaString )
req = req.rstrip( ',' )
res = self._update( req )
if not res['OK']:
gLogger.error( 'StorageManagementDB.insertStageRequest: Failed to insert to StageRequests table.', res['Message'] )
return res
for requestID, replicaIDs in requestDict.items():
for replicaID in replicaIDs:
# fix, no individual queries
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID = %s AND RequestID = '%s';" % ( replicaID, requestID )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'insertStageRequest', reqSelect, resSelect['Message'] ) )
else:
gLogger.verbose( "%s.%s_DB: inserted StageRequests = %s" % ( self._caller(), 'insertStageRequest', resSelect['Value'][0] ) )
# gLogger.info( "%s_DB: howmany = %s" % ('insertStageRequest',res))
# gLogger.info( "%s_DB:%s" % ('insertStageRequest',req))
gLogger.debug( "StorageManagementDB.insertStageRequest: Successfully added %s StageRequests with RequestID %s." % ( res['Value'], requestID ) )
return S_OK()
####################################################################
#
# The state transition of the CacheReplicas from StageSubmitted->Staged
#
def setStageComplete( self, replicaIDs ):
# Daniela: FIX wrong PinExpiryTime (84000->86400 seconds = 1 day)
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setStageComplete', reqSelect, resSelect['Message'] ) )
return resSelect
req = "UPDATE StageRequests SET StageStatus='Staged',StageRequestCompletedTime = UTC_TIMESTAMP(),PinExpiryTime = DATE_ADD(UTC_TIMESTAMP(),INTERVAL ( PinLength / %s ) SECOND) WHERE ReplicaID IN (%s);" % ( THROTTLING_STEPS, intListToString( replicaIDs ) )
res = self._update( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.setStageComplete: Failed to set StageRequest completed.", res['Message'] )
return res
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_update StageRequests = %s" % ( self._caller(), 'setStageComplete', record ) )
reqSelect1 = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setStageComplete', reqSelect1, resSelect1['Message'] ) )
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated StageRequests = %s" % ( self._caller(), 'setStageComplete', record ) )
gLogger.debug( "StorageManagementDB.setStageComplete: Successfully updated %s StageRequests table with StageStatus=Staged for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
return res
def wakeupOldRequests( self, replicaIDs , retryInterval, connection = False ):
"""
get only StageRequests with StageRequestSubmitTime older than 1 day AND are still not staged
delete these requests
reset Replicas with corresponding ReplicaIDs to Status='New'
"""
try:
retryInterval = max( retryInterval, 2 )
retryInterval = min( retryInterval, 24 )
retryInterval = int( retryInterval )
except Exception:
errorString = 'Wrong argument type'
gLogger.exception( errorString )
return S_ERROR( errorString )
if( replicaIDs ) > 0:
req = "SELECT ReplicaID FROM StageRequests WHERE ReplicaID IN (%s) AND StageStatus='StageSubmitted' AND DATE_ADD( StageRequestSubmitTime, INTERVAL %s HOUR ) < UTC_TIMESTAMP();" % ( intListToString( replicaIDs ), retryInterval )
res = self._query( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests: Failed to select old StageRequests.", res['Message'] )
return res
old_replicaIDs = [ row[0] for row in res['Value'] ]
if len( old_replicaIDs ) > 0:
req = "UPDATE CacheReplicas SET Status='New',LastUpdate = UTC_TIMESTAMP(), Reason = 'wakeupOldRequests' WHERE ReplicaID in (%s);" % intListToString( old_replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests: Failed to roll CacheReplicas back to Status=New.", res['Message'] )
return res
req = "DELETE FROM StageRequests WHERE ReplicaID in (%s);" % intListToString( old_replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.wakeupOldRequests. Problem removing entries from StageRequests." )
return res
return S_OK()
####################################################################
#
# This code handles the finalization of stage tasks
#
# Daniela: useless method
'''
def updateStageCompletingTasks(self):
""" This will select all the Tasks in StageCompleting status and check whether all the associated files are Staged. """
req = "SELECT TR.TaskID,COUNT(if(R.Status NOT IN ('Staged'),1,NULL)) FROM Tasks AS T, TaskReplicas AS TR, CacheReplicas AS R WHERE T.Status='StageCompleting' AND T.TaskID=TR.TaskID AND TR.ReplicaID=R.ReplicaID GROUP BY TR.TaskID;"
res = self._query(req)
if not res['OK']:
return res
taskIDs = []
for taskID,count in res['Value']:
if int(count) == 0:
taskIDs.append(taskID)
if not taskIDs:
return S_OK(taskIDs)
req = "UPDATE Tasks SET Status = 'Staged' WHERE TaskID IN (%s);" % intListToString(taskIDs)
res = self._update(req)
if not res['OK']:
return res
return S_OK(taskIDs)
'''
def setTasksDone( self, taskIDs ):
""" This will update the status for a list of taskIDs to Done. """
reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setTasksDone', reqSelect, resSelect['Message'] ) )
req = "UPDATE Tasks SET Status = 'Done', CompleteTime = UTC_TIMESTAMP() WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._update( req )
if not res['OK']:
gLogger.error( "StorageManagementDB.setTasksDone: Failed to set Tasks status to Done.", res['Message'] )
return res
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_update Tasks = %s" % ( self._caller(), 'setTasksDone', record ) )
# fix, no individual queries
reqSelect1 = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect1 = self._query( reqSelect1 )
if not resSelect1['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'setTasksDone', reqSelect1, resSelect1['Message'] ) )
else:
for record in resSelect1['Value']:
gLogger.verbose( "%s.%s_DB: updated Tasks = %s" % ( self._caller(), 'setTasksDone', record ) )
gLogger.debug( "StorageManagementDB.setTasksDone: Successfully updated %s Tasks with StageStatus=Done for taskIDs: %s." % ( res['Value'], taskIDs ) )
return res
def killTasksBySourceTaskID(self, sourceTaskIDs, connection = False):
""" Given SourceTaskIDs (jobs), this will cancel further staging of files for the corresponding tasks.
The "cancel" is actually removing all stager DB records for these jobs.
Care must be taken to NOT cancel staging of files that are requested also by other tasks. """
connection = self.__getConnection( connection )
# get the TaskIDs
req = "SELECT TaskID from Tasks WHERE SourceTaskID IN (%s);" % intListToString( sourceTaskIDs )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
taskIDs = [ row[0] for row in res['Value'] ]
# ! Make sure to only cancel file staging for files with no relations with other tasks (jobs) but the killed ones
req = "SELECT DISTINCT(CR.ReplicaID) FROM TaskReplicas AS TR, CacheReplicas AS CR WHERE TR.TaskID IN (%s) AND CR.Links=1 and TR.ReplicaID=CR.ReplicaID;" % intListToString( taskIDs )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
replicaIDs = [ row[0] for row in res['Value'] ]
if replicaIDs:
req = "DELETE FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString ( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
taskIDs = [ row[0] for row in res['Value'] ]
# ! Make sure to only cancel file staging for files with no relations with other tasks (jobs) but the killed ones
if taskIDs:
req = "SELECT DISTINCT(CR.ReplicaID) FROM TaskReplicas AS TR, CacheReplicas AS CR WHERE TR.TaskID IN (%s) AND CR.Links=1 and TR.ReplicaID=CR.ReplicaID;" % intListToString( taskIDs )
res = self._query( req )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem retrieving records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
replicaIDs = [ row[0] for row in res['Value'] ]
if replicaIDs:
req = "DELETE FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString ( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem removing records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
req = "DELETE FROM CacheReplicas WHERE ReplicaID in (%s) AND Links=1;" % intListToString ( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "%s.%s_DB: problem removing records: %s. %s" % ( self._caller(), 'killTasksBySourceTaskID', req, res['Message'] ) )
# Finally, remove the Task and TaskReplicas entries.
res = self.removeTasks( taskIDs, connection )
return res
def removeStageRequests( self, replicaIDs, connection = False ):
connection = self.__getConnection( connection )
req = "DELETE FROM StageRequests WHERE ReplicaID in (%s);" % intListToString( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeStageRequests. Problem removing entries from StageRequests." )
return res
return res
def removeTasks( self, taskIDs, connection = False ):
""" This will delete the entries from the TaskReplicas for the provided taskIDs. """
connection = self.__getConnection( connection )
req = "DELETE FROM TaskReplicas WHERE TaskID IN (%s);" % intListToString( taskIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeTasks. Problem removing entries from TaskReplicas." )
return res
# gLogger.info( "%s_DB:%s" % ('removeTasks',req))
reqSelect = "SELECT * FROM Tasks WHERE TaskID IN (%s);" % intListToString( taskIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.error( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeTasks', reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete Tasks = %s" % ( self._caller(), 'removeTasks', record ) )
req = "DELETE FROM Tasks WHERE TaskID in (%s);" % intListToString( taskIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeTasks. Problem removing entries from Tasks." )
gLogger.verbose( "%s.%s_DB: deleted Tasks" % ( self._caller(), 'removeTasks' ) )
# gLogger.info( "%s_DB:%s" % ('removeTasks',req))
return res
def setOldTasksAsFailed( self, daysOld, connection = False ):
"""
Set Tasks older than "daysOld" number of days to Failed
These tasks have already been retried every day for staging
"""
req = "UPDATE Tasks SET Status='Failed' WHERE DATE_ADD(SubmitTime, INTERVAL %s DAY ) < UTC_TIMESTAMP();" % ( daysOld )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.setOldTasksAsFailed. Problem setting old Tasks to Failed." )
return res
return res
def getCacheReplicasSummary( self, connection = False ):
"""
Reports breakdown of file number/size in different staging states across storage elements
"""
connection = self.__getConnection( connection )
req = "SELECT DISTINCT(Status),SE,COUNT(*),sum(size)/(1024*1024*1024) FROM CacheReplicas GROUP BY Status,SE;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.getCacheReplicasSummary failed." )
return res
resSummary = {}
i = 1
for status, se, numFiles, sumFiles in res['Value']:
resSummary[i] = {'Status':status, 'SE':se, 'NumFiles':long( numFiles ), 'SumFiles':float( sumFiles )}
i += 1
return S_OK( resSummary )
def removeUnlinkedReplicas( self, connection = False ):
""" This will remove Replicas from the CacheReplicas that are not associated to any Task.
If the Replica has been Staged,
wait until StageRequest.PinExpiryTime and remove the StageRequest and CacheReplicas entries
"""
connection = self.__getConnection( connection )
# First, check if there is a StageRequest and PinExpiryTime has arrived
req = "select SR.ReplicaID from CacheReplicas CR,StageRequests SR WHERE CR.Links = 0 and CR.ReplicaID=SR.ReplicaID group by SR.ReplicaID HAVING max(SR.PinExpiryTime) < UTC_TIMESTAMP();"
# req = "SELECT ReplicaID from CacheReplicas WHERE Links = 0;"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0." )
return res
replicaIDs = [ row[0] for row in res['Value'] ]
# Look for Failed CacheReplicas which are not associated to any Task. These have no PinExpiryTime in StageRequests
# as they were not staged successfully (for various reasons), even though a staging request had been submitted
req = "SELECT ReplicaID FROM CacheReplicas WHERE Links = 0 AND Status = 'Failed';"
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0 AND Status=Failed." )
else:
replicaIDs.extend( [ row[0] for row in res['Value'] ] )
if replicaIDs:
# Removed the entries from the StageRequests table that are expired
reqSelect = "SELECT * FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeUnlinkedReplicas',
reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete StageRequests = %s" % ( self._caller(), 'removeUnlinkedReplicas',
record ) )
req = "DELETE FROM StageRequests WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
res = self._update( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem deleting from StageRequests." )
return res
gLogger.verbose( "%s.%s_DB: deleted StageRequests" % ( self._caller(), 'removeUnlinkedReplicas' ) )
gLogger.debug( "StorageManagementDB.removeUnlinkedReplicas: Successfully removed %s StageRequests entries for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
# Second look for CacheReplicas for which there is no entry in StageRequests
req = 'SELECT ReplicaID FROM CacheReplicas WHERE Links = 0 AND ReplicaID NOT IN ( SELECT DISTINCT( ReplicaID ) FROM StageRequests )'
res = self._query( req, connection )
if not res['OK']:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem selecting entries from CacheReplicas where Links = 0." )
else:
replicaIDs.extend( [ row[0] for row in res['Value'] ] )
if not replicaIDs:
return S_OK()
# Now delete all CacheReplicas
reqSelect = "SELECT * FROM CacheReplicas WHERE ReplicaID IN (%s);" % intListToString( replicaIDs )
resSelect = self._query( reqSelect )
if not resSelect['OK']:
gLogger.warn( "%s.%s_DB: problem retrieving record: %s. %s" % ( self._caller(), 'removeUnlinkedReplicas', reqSelect, resSelect['Message'] ) )
else:
for record in resSelect['Value']:
gLogger.verbose( "%s.%s_DB: to_delete CacheReplicas = %s" % ( self._caller(), 'removeUnlinkedReplicas', record ) )
req = "DELETE FROM CacheReplicas WHERE ReplicaID IN (%s) AND Links= 0;" % intListToString( replicaIDs )
res = self._update( req, connection )
if res['OK']:
gLogger.verbose( "%s.%s_DB: deleted CacheReplicas" % ( self._caller(), 'removeUnlinkedReplicas' ) )
gLogger.debug( "StorageManagementDB.removeUnlinkedReplicas: Successfully removed %s CacheReplicas entries for ReplicaIDs: %s." % ( res['Value'], replicaIDs ) )
else:
gLogger.error( "StorageManagementDB.removeUnlinkedReplicas. Problem removing entries from CacheReplicas." )
return res
|
sposs/DIRAC
|
StorageManagementSystem/DB/StorageManagementDB.py
|
Python
|
gpl-3.0
| 62,782
|
[
"DIRAC"
] |
752de7ca12b454687c8e584eef4f05a03f4f35a3bc2a17ba10583cbabdc75458
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""DeepQNetwork models for molecule generation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from rdkit import Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
from tensorflow.contrib import training as contrib_training
class DeepQNetwork(object):
"""Deep Q Network.
This class implements the network as used in the Nature
(2015) paper.
Human-level control through deep reinforcement learning
https://www.nature.com/articles/nature14236
https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
"""
def __init__(self,
input_shape,
q_fn,
learning_rate=0.001,
learning_rate_decay_steps=10000,
learning_rate_decay_rate=0.8,
optimizer='Adam',
grad_clipping=None,
gamma=1.0,
epsilon=0.2,
double_q=True,
num_bootstrap_heads=10,
scope='dqn',
reuse=None):
"""Creates the model function.
Args:
input_shape: Tuple. The shape of input.
q_fn: A function, whose input is the observation features, and the
output is the Q value of the observation.
learning_rate: Float. The learning rate of the optimizer.
learning_rate_decay_steps: Integer. The number of steps between each
learning rate decay.
learning_rate_decay_rate: Float. The rate of learning rate decay.
optimizer: String. Which optimizer to use.
grad_clipping: Boolean. Whether to clip gradient.
gamma: Float. Discount factor.
epsilon: Float. The probability of choosing a random action.
double_q: Boolean. Whether to use double q learning.
num_bootstrap_heads: Integer. The number of bootstrap heads to use.
scope: String or VariableScope. Variable Scope.
reuse: Boolean or None. Whether or not the variable should be reused.
"""
self.input_shape = input_shape
self.q_fn = q_fn
self.learning_rate = learning_rate
self.learning_rate_decay_steps = learning_rate_decay_steps
self.learning_rate_decay_rate = learning_rate_decay_rate
self.optimizer = optimizer
self.grad_clipping = grad_clipping
self.gamma = gamma
self.num_bootstrap_heads = num_bootstrap_heads
self.double_q = double_q
self.scope = scope
self.reuse = reuse
self.epsilon = epsilon
def build(self):
"""Builds the computational graph and training operations."""
self._build_graph()
self._build_training_ops()
self._build_summary_ops()
def _build_single_q_network(self, observations, head, state_t, state_tp1,
done_mask, reward_t, error_weight):
"""Builds the computational graph for a single Q network.
Briefly, this part is calculating the following two quantities:
1. q_value = q_fn(observations)
2. td_error = q_fn(state_t) - reward_t - gamma * q_fn(state_tp1)
The optimization target is to minimize the td_error.
Args:
observations: shape = [batch_size, hparams.fingerprint_length].
The input of the Q function.
head: shape = [1].
The index of the head chosen for decision in bootstrap DQN.
state_t: shape = [batch_size, hparams.fingerprint_length].
The state at time step t.
state_tp1: a list of tensors, with total number of batch_size,
each has shape = [num_actions, hparams.fingerprint_length].
Note that the num_actions can be different for each tensor.
The state at time step t+1, tp1 is short for t plus 1.
done_mask: shape = [batch_size, 1]
Whether state_tp1 is the terminal state.
reward_t: shape = [batch_size, 1]
the reward at time step t.
error_weight: shape = [batch_size, 1]
weight for the loss.
Returns:
q_values: Tensor of [batch_size, 1]. The q values for the observations.
td_error: Tensor of [batch_size, 1]. The TD error.
weighted_error: Tensor of [batch_size, 1]. The TD error weighted by
error_weight.
q_fn_vars: List of tf.Variables. The variables of q_fn when computing
the q_values of state_t
q_fn_vars: List of tf.Variables. The variables of q_fn when computing
the q_values of state_tp1
"""
with tf.variable_scope('q_fn'):
# q_value have shape [batch_size, 1].
q_values = tf.gather(self.q_fn(observations), head, axis=-1)
# calculating q_fn(state_t)
# The Q network shares parameters with the action graph.
with tf.variable_scope('q_fn', reuse=True):
q_t = self.q_fn(state_t, reuse=True)
q_fn_vars = tf.trainable_variables(scope=tf.get_variable_scope().name +
'/q_fn')
# calculating q_fn(state_tp1)
with tf.variable_scope('q_tp1', reuse=tf.AUTO_REUSE):
q_tp1 = [self.q_fn(s_tp1, reuse=tf.AUTO_REUSE) for s_tp1 in state_tp1]
q_tp1_vars = tf.trainable_variables(scope=tf.get_variable_scope().name +
'/q_tp1')
if self.double_q:
with tf.variable_scope('q_fn', reuse=True):
q_tp1_online = [self.q_fn(s_tp1, reuse=True) for s_tp1 in state_tp1]
if self.num_bootstrap_heads:
num_heads = self.num_bootstrap_heads
else:
num_heads = 1
# determine the action to choose based on online Q estimator.
q_tp1_online_idx = [
tf.stack(
[tf.argmax(q, axis=0),
tf.range(num_heads, dtype=tf.int64)],
axis=1) for q in q_tp1_online
]
# use the index from max online q_values to compute the value
# function
v_tp1 = tf.stack(
[tf.gather_nd(q, idx) for q, idx in zip(q_tp1, q_tp1_online_idx)],
axis=0)
else:
v_tp1 = tf.stack([tf.reduce_max(q) for q in q_tp1], axis=0)
# if s_{t+1} is the terminal state, we do not evaluate the Q value of
# the state.
q_tp1_masked = (1.0 - done_mask) * v_tp1
q_t_target = reward_t + self.gamma * q_tp1_masked
# stop gradient from flowing to the computating graph which computes
# the Q value of s_{t+1}.
# td_error has shape [batch_size, 1]
td_error = q_t - tf.stop_gradient(q_t_target)
# If use bootstrap, each head is trained with a different subset of the
# training sample. Like the idea of dropout.
if self.num_bootstrap_heads:
head_mask = tf.keras.backend.random_binomial(
shape=(1, self.num_bootstrap_heads), p=0.6)
td_error = tf.reduce_mean(td_error * head_mask, axis=1)
# The loss comes from a traditional trick in convex optimization:
# http://web.stanford.edu/~boyd/cvxbook/.
# See Chapter 6 pp. 298
# It will makes the optimization robust.
# Specifically, the loss will use l1 instead of l2 loss when the td error
# gets larger than 1.0. The l2 loss has the disadvantage that it has
# the tendency to be dominated by outliers. In terms of estimation theory,
# the asymptotic relative efficiency of the l1 loss estimator is better
# for heavy-tailed distributions.
errors = tf.where(
tf.abs(td_error) < 1.0, tf.square(td_error) * 0.5,
1.0 * (tf.abs(td_error) - 0.5))
weighted_error = tf.reduce_mean(error_weight * errors)
return q_values, td_error, weighted_error, q_fn_vars, q_tp1_vars
def _build_input_placeholder(self):
"""Creates the input placeholders.
Input placeholders created:
observations: shape = [batch_size, hparams.fingerprint_length].
The input of the Q function.
head: shape = [1].
The index of the head chosen for decision.
state_t: shape = [batch_size, hparams.fingerprint_length].
The state at time step t.
state_tp1: a list of tensors,
each has shape = [num_actions, hparams.fingerprint_length].
Note that the num_actions can be different for each tensor.
The state at time step t+1.
done_mask: shape = [batch_size, 1]
Whether state_tp1 is the terminal state.
error_weight: shape = [batch_size, 1]
weight for the loss.
"""
batch_size, fingerprint_length = self.input_shape
with tf.variable_scope(self.scope, reuse=self.reuse):
# Build the action graph to choose an action.
# The observations, which are the inputs of the Q function.
self.observations = tf.placeholder(
tf.float32, [None, fingerprint_length], name='observations')
# head is the index of the head we want to choose for decison.
# See https://arxiv.org/abs/1703.07608
self.head = tf.placeholder(tf.int32, [], name='head')
# When sample from memory, the batch_size can be fixed, as it is
# possible to sample any number of samples from memory.
# state_t is the state at time step t
self.state_t = tf.placeholder(
tf.float32, self.input_shape, name='state_t')
# state_tp1 is the state at time step t + 1, tp1 is short for t plus 1.
self.state_tp1 = [
tf.placeholder(
tf.float32, [None, fingerprint_length], name='state_tp1_%i' % i)
for i in range(batch_size)
]
# done_mask is a {0, 1} tensor indicating whether state_tp1 is the
# terminal state.
self.done_mask = tf.placeholder(
tf.float32, (batch_size, 1), name='done_mask')
self.error_weight = tf.placeholder(
tf.float32, (batch_size, 1), name='error_weight')
def _build_graph(self):
"""Builds the computational graph.
Input placeholders created:
reward_t: shape = [batch_size, 1]
the reward at time step t.
Instance attributes created:
q_values: the q values of the observations.
q_fn_vars: the variables in q function.
q_tp1_vars: the variables in q_tp1 function.
td_error: the td_error.
weighted_error: the weighted td error.
action: the action to choose next step.
"""
batch_size, _ = self.input_shape
with tf.variable_scope(self.scope, reuse=self.reuse):
self._build_input_placeholder()
self.reward_t = tf.placeholder(
tf.float32, (batch_size, 1), name='reward_t')
# The Q network shares parameters with the action graph.
# tenors start with q or v have shape [batch_size, 1] when not using
# bootstrap. When using bootstrap, the shapes are
# [batch_size, num_bootstrap_heads]
(self.q_values, self.td_error, self.weighted_error,
self.q_fn_vars, self.q_tp1_vars) = self._build_single_q_network(
self.observations, self.head, self.state_t, self.state_tp1,
self.done_mask, self.reward_t, self.error_weight)
self.action = tf.argmax(self.q_values)
def _build_training_ops(self):
"""Creates the training operations.
Instance attributes created:
optimization_op: the operation of optimize the loss.
update_op: the operation to update the q network.
"""
with tf.variable_scope(self.scope, reuse=self.reuse):
self.optimization_op = contrib_layers.optimize_loss(
loss=self.weighted_error,
global_step=tf.train.get_or_create_global_step(),
learning_rate=self.learning_rate,
optimizer=self.optimizer,
clip_gradients=self.grad_clipping,
learning_rate_decay_fn=functools.partial(
tf.train.exponential_decay,
decay_steps=self.learning_rate_decay_steps,
decay_rate=self.learning_rate_decay_rate),
variables=self.q_fn_vars)
self.update_op = []
for var, target in zip(
sorted(self.q_fn_vars, key=lambda v: v.name),
sorted(self.q_tp1_vars, key=lambda v: v.name)):
self.update_op.append(target.assign(var))
self.update_op = tf.group(*self.update_op)
def _build_summary_ops(self):
"""Creates the summary operations.
Input placeholders created:
smiles: the smiles string.
reward: the reward.
Instance attributes created:
error_summary: the operation to log the summary of error.
episode_summary: the operation to log the smiles string and reward.
"""
with tf.variable_scope(self.scope, reuse=self.reuse):
with tf.name_scope('summaries'):
# The td_error here is the difference between q_t and q_t_target.
# Without abs(), the summary of td_error is actually underestimated.
self.error_summary = tf.summary.scalar(
'td_error', tf.reduce_mean(tf.abs(self.td_error)))
self.smiles = tf.placeholder(tf.string, [], 'summary_smiles')
self.reward = tf.placeholder(tf.float32, [], 'summary_reward')
smiles_summary = tf.summary.text('SMILES', self.smiles)
reward_summary = tf.summary.scalar('reward', self.reward)
self.episode_summary = tf.summary.merge(
[smiles_summary, reward_summary])
def log_result(self, smiles, reward):
"""Summarizes the SMILES string and reward at the end of an episode.
Args:
smiles: String. The SMILES string.
reward: Float. The reward.
Returns:
the summary protobuf
"""
return tf.get_default_session().run(
self.episode_summary,
feed_dict={
self.smiles: smiles,
self.reward: reward
})
def _run_action_op(self, observations, head):
"""Function that runs the op calculating an action given the observations.
Args:
observations: np.array. shape = [num_actions, fingerprint_length].
Observations that can be feed into the Q network.
head: Integer. The output index to use.
Returns:
Integer. which action to be performed.
"""
return np.asscalar(tf.get_default_session().run(
self.action,
feed_dict={
self.observations: observations,
self.head: head
}))
def get_action(self,
observations,
stochastic=True,
head=0,
update_epsilon=None):
"""Function that chooses an action given the observations.
Args:
observations: np.array. shape = [num_actions, fingerprint_length].
Observations that can be feed into the Q network.
stochastic: Boolean. If set to False all the actions are always
deterministic (default True).
head: Integer. The output index to use.
update_epsilon: Float or None. update epsilon a new value, if None
no update happens (default: no update).
Returns:
Integer. which action to be performed.
"""
if update_epsilon is not None:
self.epsilon = update_epsilon
if stochastic and np.random.uniform() < self.epsilon:
return np.random.randint(0, observations.shape[0])
else:
return self._run_action_op(observations, head)
def train(self, states, rewards, next_states, done, weight, summary=True):
"""Function that takes a transition (s,a,r,s') and optimizes Bellman error.
Args:
states: object, a batch of observations.
rewards: np.array, immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,).
next_states: object, observations that followed states.
done: np.array, 1 if obs_t was the last observation in the episode and 0
otherwise obs_tp1 gets ignored, but must be of the valid shape. dtype
must be float32 and shape must be (batch_size,).
weight: np.array, importance sampling weights for every element of the
batch. dtype must be float32 and shape must be (batch_size,).
summary: Boolean, whether to get summary.
Returns:
td_error: np.array. a list of differences between Q(s,a) and the
target in Bellman's equation.
dtype is float32 and shape is (batch_size,).
"""
if summary:
ops = [self.td_error, self.error_summary, self.optimization_op]
else:
ops = [self.td_error, self.optimization_op]
feed_dict = {
self.state_t: states,
self.reward_t: rewards,
self.done_mask: done,
self.error_weight: weight
}
for i, next_state in enumerate(next_states):
feed_dict[self.state_tp1[i]] = next_state
return tf.get_default_session().run(ops, feed_dict=feed_dict)
class MultiObjectiveDeepQNetwork(DeepQNetwork):
"""Multi Objective Deep Q Network.
The idea is described in
Multiobjective Reinforcement Learning: A Comprehensive Overview
https://ieeexplore.ieee.org/document/6918520/
Briefly, the difference between this Multi Objective Deep Q Network and
a naive Deep Q Network is that this one uses one Q network for approximating
each of the objectives. And a weighted sum of those Q values are used for
decision making.
The loss is the summation of the losses of each Q network.
"""
def __init__(self, objective_weight, **kwargs):
"""Creates the model function.
Args:
objective_weight: np.array with shape [num_objectives, 1]. The weight
vector for the objectives.
**kwargs: arguments for the DeepQNetworks class.
"""
# Normalize the sum to 1.
self.objective_weight = objective_weight / np.sum(objective_weight)
self.num_objectives = objective_weight.shape[0]
super(MultiObjectiveDeepQNetwork, self).__init__(**kwargs)
def _build_graph(self):
"""Builds the computational graph.
Input placeholders created:
observations: shape = [batch_size, hparams.fingerprint_length].
The input of the Q function.
head: shape = [1].
The index of the head chosen for decision.
objective_weight: shape = [num_objectives, 1].
objective_weight is the weight to scalarize the objective vector:
reward = sum (objective_weight_i * objective_i)
state_t: shape = [batch_size, hparams.fingerprint_length].
The state at time step t.
state_tp1: a list of tensors,
each has shape = [num_actions, hparams.fingerprint_length].
Note that the num_actions can be different for each tensor.
The state at time step t+1.
done_mask: shape = [batch_size, 1]
Whether state_tp1 is the terminal state.
reward_t: shape = [batch_size, num_objectives]
the reward at time step t.
error weight: shape = [batch_size, 1]
weight for the loss.
Instance attributes created:
q_values: List of Tensors of [batch_size, 1]. The q values for the
observations.
td_error: List of Tensor of [batch_size, 1]. The TD error.
weighted_error: List of Tensor of [batch_size, 1]. The TD error weighted
by importance sampling weight.
q_fn_vars: List of tf.Variables. The variables of q_fn when computing
the q_values of state_t
q_fn_vars: List of tf.Variables. The variables of q_fn when computing
the q_values of state_tp1
"""
batch_size, _ = self.input_shape
with tf.variable_scope(self.scope, reuse=self.reuse):
self._build_input_placeholder()
self.reward_t = tf.placeholder(
tf.float32, (batch_size, self.num_objectives), name='reward_t')
# objective_weight is the weight to scalarize the objective vector:
# reward = sum (objective_weight_i * objective_i)
self.objective_weight_input = tf.placeholder(
tf.float32, [self.num_objectives, 1], name='objective_weight')
# split reward for each q network
rewards_list = tf.split(self.reward_t, self.num_objectives, axis=1)
q_values_list = []
self.td_error = []
self.weighted_error = 0
self.q_fn_vars = []
self.q_tp1_vars = []
# build a Q network for each objective
for obj_idx in range(self.num_objectives):
with tf.variable_scope('objective_%i' % obj_idx):
(q_values, td_error, weighted_error,
q_fn_vars, q_tp1_vars) = self._build_single_q_network(
self.observations, self.head, self.state_t, self.state_tp1,
self.done_mask, rewards_list[obj_idx], self.error_weight)
q_values_list.append(tf.expand_dims(q_values, 1))
# td error is for summary only.
# weighted error is the optimization goal.
self.td_error.append(td_error)
self.weighted_error += weighted_error / self.num_objectives
self.q_fn_vars += q_fn_vars
self.q_tp1_vars += q_tp1_vars
q_values = tf.concat(q_values_list, axis=1)
# action is the one that leads to the maximum weighted reward.
self.action = tf.argmax(
tf.matmul(q_values, self.objective_weight_input), axis=0)
def _build_summary_ops(self):
"""Creates the summary operations.
Input placeholders created:
smiles: the smiles string.
rewards: the rewards.
weighted_reward: the weighted sum of the rewards.
Instance attributes created:
error_summary: the operation to log the summary of error.
episode_summary: the operation to log the smiles string and reward.
"""
with tf.variable_scope(self.scope, reuse=self.reuse):
with tf.name_scope('summaries'):
# The td_error here is the difference between q_t and q_t_target.
# Without abs(), the summary of td_error is actually underestimated.
error_summaries = [
tf.summary.scalar('td_error_%i' % i,
tf.reduce_mean(tf.abs(self.td_error[i])))
for i in range(self.num_objectives)
]
self.error_summary = tf.summary.merge(error_summaries)
self.smiles = tf.placeholder(tf.string, [], 'summary_smiles')
self.rewards = [
tf.placeholder(tf.float32, [], 'summary_reward_obj_%i' % i)
for i in range(self.num_objectives)
]
# Weighted sum of the rewards.
self.weighted_reward = tf.placeholder(tf.float32, [],
'summary_reward_sum')
smiles_summary = tf.summary.text('SMILES', self.smiles)
reward_summaries = [
tf.summary.scalar('reward_obj_%i' % i, self.rewards[i])
for i in range(self.num_objectives)
]
reward_summaries.append(
tf.summary.scalar('sum_reward', self.rewards[-1]))
self.episode_summary = tf.summary.merge([smiles_summary] +
reward_summaries)
def log_result(self, smiles, reward):
"""Summarizes the SMILES string and reward at the end of an episode.
Args:
smiles: String. The SMILES string.
reward: List of Float. The rewards for each objective.
Returns:
the summary protobuf.
"""
feed_dict = {
self.smiles: smiles,
}
for i, reward_value in enumerate(reward):
feed_dict[self.rewards[i]] = reward_value
# calculated the weighted sum of the rewards.
feed_dict[self.weighted_reward] = np.asscalar(
np.array([reward]).dot(self.objective_weight))
return tf.get_default_session().run(
self.episode_summary, feed_dict=feed_dict)
def _run_action_op(self, observations, head):
"""Function that runs the op calculating an action given the observations.
Args:
observations: np.array. shape = [num_actions, fingerprint_length].
Observations that can be feed into the Q network.
head: Integer. The output index to use.
Returns:
Integer. which action to be performed.
"""
return np.asscalar(tf.get_default_session().run(
self.action,
feed_dict={
self.observations: observations,
self.objective_weight_input: self.objective_weight,
self.head: head
}))
def multi_layer_model(inputs, hparams, reuse=None):
"""Multi-layer model for q learning.
Args:
inputs: Tensor. The input.
hparams: tf.HParameters. The hyper-parameters.
reuse: Boolean. Whether the parameters should be reused.
Returns:
Tensor. shape = [batch_size, hparams.num_bootstrap_heads]. The output.
"""
output = inputs
for i, units in enumerate(hparams.dense_layers):
output = tf.layers.dense(output, units, name='dense_%i' % i, reuse=reuse)
output = getattr(tf.nn, hparams.activation)(output)
if hparams.batch_norm:
output = tf.layers.batch_normalization(
output, fused=True, name='bn_%i' % i, reuse=reuse)
if hparams.num_bootstrap_heads:
output_dim = hparams.num_bootstrap_heads
else:
output_dim = 1
output = tf.layers.dense(output, output_dim, name='final', reuse=reuse)
return output
def get_hparams(**kwargs):
"""Get the hyperparameters for the model from a json object.
Args:
**kwargs: Dict of parameter overrides.
Possible keyword arguments:
atom_types: Dict. The possible atom types in the molecule.
max_steps_per_episode: Integer. The maximum number of steps for one episode.
allow_removal: Boolean. Whether to allow removal of a bond.
allow_no_modification: Boolean. If true, the valid action set will include
doing nothing to the current molecule, i.e., the current molecule itself
will be added to the action set.
replay_buffer_size: Integer. The size of the replay buffer.
learning_rate: Float. Learning rate.
learning_rate_decay_steps: Integer. The number of steps between each
learning rate decay.
learning_rate_decay_rate: Float. The rate of learning rate decay.
num_episodes: Integer. Number of episodes to run.
batch_size: Integer. The batch size.
learning_frequency: Integer. The number of steps between each training
operation.
update_frequency: Integer. The number of steps between each update of the
target Q network
grad_clipping: Integer. maximum value of the gradient norm.
gamma: Float. The discount factor for the reward.
double_q: Boolean. Whether to used double Q learning.
See https://arxiv.org/abs/1509.06461 for detail.
bootstrap: Integer. The number of bootstrap heads. See
https://arxiv.org/abs/1703.07608 for detail.
prioritized: Boolean. Whether to use prioritized replay. See
https://arxiv.org/abs/1511.05952 for detail.
prioritized_alpha: Float. The parameter alpha in the prioritized replay.
prioritized_beta: Float. The parameter beta in the prioritized replay.
prioritized_epsilon: Float. The parameter epsilon in the prioritized replay.
fingerprint_radius: Integer. The radius of the Morgan fingerprint.
fingerprint_length: Integer. The length of the Morgan fingerprint.
dense_layers: List of integers. The hidden units in the dense layers.
activation: String. The activation function to use.
optimizer: String. The optimizer to use.
batch_norm: Boolean. Whether to use batch normalization.
save_frequency: Integer. The number of episodes between each saving.
Returns:
A HParams object containing all the hyperparameters.
"""
hparams = contrib_training.HParams(
atom_types=['C', 'O', 'N'],
max_steps_per_episode=40,
allow_removal=True,
allow_no_modification=True,
allow_bonds_between_rings=False,
allowed_ring_sizes=[3, 4, 5, 6],
replay_buffer_size=1000000,
learning_rate=1e-4,
learning_rate_decay_steps=10000,
learning_rate_decay_rate=0.8,
num_episodes=5000,
batch_size=64,
learning_frequency=4,
update_frequency=20,
grad_clipping=10.0,
gamma=0.9,
double_q=True,
num_bootstrap_heads=12,
prioritized=False,
prioritized_alpha=0.6,
prioritized_beta=0.4,
prioritized_epsilon=1e-6,
fingerprint_radius=3,
fingerprint_length=2048,
dense_layers=[1024, 512, 128, 32],
activation='relu',
optimizer='Adam',
batch_norm=False,
save_frequency=1000,
max_num_checkpoints=100,
discount_factor=0.7)
return hparams.override_from_dict(kwargs)
def get_fingerprint(smiles, hparams):
"""Get Morgan Fingerprint of a specific SMILES string.
Args:
smiles: String. The SMILES string of the molecule.
hparams: tf.contrib.training.HParams. Hyper parameters.
Returns:
np.array. shape = [hparams.fingerprint_length]. The Morgan fingerprint.
"""
if smiles is None:
return np.zeros((hparams.fingerprint_length,))
molecule = Chem.MolFromSmiles(smiles)
if molecule is None:
return np.zeros((hparams.fingerprint_length,))
fingerprint = AllChem.GetMorganFingerprintAsBitVect(
molecule, hparams.fingerprint_radius, hparams.fingerprint_length)
arr = np.zeros((1,))
# ConvertToNumpyArray takes ~ 0.19 ms, while
# np.asarray takes ~ 4.69 ms
DataStructs.ConvertToNumpyArray(fingerprint, arr)
return arr
def get_fingerprint_with_steps_left(smiles, steps_left, hparams):
"""Get Morgan Fingerprint of a SMILES string with number of steps left.
If fixing the max num of steps can be taken in a MDP, the MDP is then
a time-heterogeneous one. Therefore a time dependent policy is needed
for optimal performance.
Args:
smiles: String. The SMILES string of the molecule.
steps_left: Integer. The number of steps left in the environment.
hparams: tf.contrib.training.HParams. Hyper parameters.
Returns:
np.array. shape = [hparams.fingerprint_length + 1]. The fingerprint.
"""
fingerprint = get_fingerprint(smiles, hparams)
return np.append(fingerprint, steps_left)
|
google-research/google-research
|
mol_dqn/chemgraph/dqn/deep_q_networks.py
|
Python
|
apache-2.0
| 30,250
|
[
"RDKit"
] |
40f92a411fb3dce4396ba020579b1429f5227e0f4c35ee581a1e20cb2b04c3b1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.