text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""
First pass of a "Log file" generator.
Generates logfiles with the following characteristics:
* You can define a mean number of concurrent users for the majority
of the logfile
* You can define the number of sites those concurrent users are
accessing
* You define the number of desired requests in the logfile
* You can define a percentage sampling size, a default sampler of
100% is provided.
* You can change the sampling method to a custom one by implementing
a new method, and any others your sampling requires.
* You can set and receive cookies from clients.
Things to do:
* Add setting to clients as to whether they will correctly retain the
cookie
* Modify sampling method to handle "zero" memory density biassed sampling.
* Validate that the accuracy is sufficient.
* Validate assumption that memory consumption is proportional to the
granularity of site density, not proportional to the number of users.
(even density biassed suffers from this at present due to seen/unseen
filters)
"""
#########################################################################
#
# Configuration
#
percentageSample = 100
desiredNumberOfRequests = 100000
concurrentUsers=500
numberOfSites = 100
import math
import random
RAND_MAX=2147483647
class zipf:
"""Zipf distribution generator.
* The algorithm here is directly adapted from:
* http://www.cs.hut.fi/Opinnot/T-106.290/K2004/Ohjeet/Zipf.html
N # Value range [1,N]
a # Distribution skew. Zero = uniform.
c1,c2 # Computed once for all random no.s for N and a
"""
def __init__(self,N=10000,a=1):
self.N = N
if a < 0.0001:
self.a = 0.0
self.c1 = 0.0
self.c2 = 0.0
elif 0.9999 <a < 1.0001:
self.a = 1.0
self.c1 = math.log(N+1)
self.c2 = 0.0
else:
self.a = float(a)
self.c1 = math.exp((1-a) * math.log(N+1)) - 1
self.c2 = 1.0 / (1-a)
def __repr__(self):
return "zipf( " + str(self.N) + ", " + str(self.a) + ") : " + str(self.c1) + ", " + str(self.c2)
def next(self):
r = 0
x = 0.0
if self.a == 0:
return random.randint(1,self.N)
while r <= 1 or r>self.N:
x = random.random()
if self.a == 1:
x = math.exp(x * self.c1);
else:
x = math.exp(self.c2 * math.log(x * self.c1 + 1));
r=int(x)
return r
class client:
"""
A client is classified by:
* The number of requests they will make:
Heavily tailed (Inverse Gaussian) distribution, typical mean 3, standard deviation 9, mode of 1
* Whether they can handle cookies correctly (assume 90% even)
* What their currently set cookie is
* How many times they come back (-1 is infinitely repeating)
* How often they come back (This is a request delay)
* Are allocated a pseudorandom IP number.
* This is not guaranteed to be unique (simulate effect of proxies)
* They do not follow any particular request pattern, other than that generated by zipf.
"""
def __init__(self):
self._ip = "192.168.0."+str(random.randint(1,128))
self._cookie = "XXXX"
def ip(self):
return self._ip
def cookie(self):
return self._cookie
def set_cookie(self, cookie):
self._cookie = cookie
def __repr__(self):
return str(id(self))
class requestStream:
"""
A request stream is a sequence of client IP, client cookie, request (zipf number), client
In order to generate this it tracks:
* A list of clients
* Addition of new clients throughout the time period
* Deletion of expired clients
* Which clients are making a request
The user can configure the following attributes:
* concurrentUsers : This is a rough indication on the number of
concurrent users for a long running stream
"""
def __init__(self, concurrentUsers=1000,sites=10000):
self._users = concurrentUsers*2
Z=zipf(N=numberOfSites)
self._nextRequest = Z.next
def addClient(self):
C=client()
self.clients.append(C)
def removeClient(self):
c = random.randint(1,len(self.clients))-1
self.clients[c] = self.clients[len(self.clients)-1]
del self.clients[len(self.clients)-1]
def pickClient(self):
try:
c = random.randint(1,len(self.clients))-1
C = self.clients[c]
except ValueError:
C=client()
self.clients.append(C)
return C
def handleClientQueue(self):
if random.randint(1,self._users)>len(self.clients):
self.addClient()
else:
self.removeClient()
def main(self):
self.clients = []
while 1:
C=self.pickClient()
self.handleClientQueue()
yield [C.ip(), C.cookie(), self._nextRequest(), C]
#
# Need to merge the new version
#
def likelihood_old(numSamples, targetSample):
"NB, this can return > 1.0"
try:
return 1/float(numSamples) + targetSample/100.
except ZeroDivisionError:
return 1
def likelihood(numSamples, target, numRequests=1): # This can return > 1.0
"""Returns a likelyhood of sampling the next request based on
"""
try:
currentRate= numSamples/float(numRequests)
# The 100 in the next line was found empiricially, and is needed to
# force the two rates to converge reliably at a reliable rate.
result = 100* (1-(currentRate/target))
return result
except ZeroDivisionError:
return 1
class userSampling:
def __init__(self, percent):
self._percent=percent
self._siteTrack = {}
self._siteSamples = {}
def dumpSampleDB(self):
print "SITE, REQ, SAMP, RATE"
sites = self._siteTrack.keys()
sites.sort()
for site in sites:
print site, ",",
print self._siteTrack[site], ",",
print self._siteSamples[site], ",",
print int((self._siteSamples[site]/float(self._siteTrack[site]))*1000)/10.
def updateTrackSites(self, request):
self._siteTrack[request[2]] = self._siteTrack.get(request[2],0)+1
def updateClient(self, request):
"Override this method to change realtime sampling method"
# Unless we match the cookies, we miss the first request
self.updateTrackSites(request)
if random.random() < likelihood(self._siteSamples.get(request[2],0), 0.22, self._siteTrack[request[2]]):
request[3].set_cookie("sample")
request[1]="sample"
def sampling(self, request):
self.updateClient(request)
if req[1]=="sample":
self._siteSamples[request[2]] = self._siteSamples.get(request[2],0)+1
return True
else:
return False
sampler = userSampling(percentageSample)
XS=requestStream(concurrentUsers=concurrentUsers,sites=numberOfSites)
X=XS.main()
for i in xrange(desiredNumberOfRequests):
req = X.next()
if sampler.sampling(req):
#print i, req, "sample"
pass
else:
#print i, req, "reject"
pass
sampler.dumpSampleDB()
|
sparkslabs/kamaelia_
|
Sketches/MPS/Stats/zipf.py
|
Python
|
apache-2.0
| 8,103
|
[
"Gaussian"
] |
5f82a8390bb5e316f99d1a70e5cbbdadaf238d93dcc2da6209a67eeca7d23b36
|
# Copyright 2012 the rootpy developers
# distributed under the terms of the GNU General Public License
from __future__ import absolute_import
from math import sqrt
import ROOT
from . import log; log = log[__name__]
from . import MIN_ROOT_VERSION
from ...extern.six import string_types
from ...memory.keepalive import keepalive
from ...base import NamedObject
from ... import asrootpy, QROOT, ROOT_VERSION
if ROOT_VERSION < MIN_ROOT_VERSION:
raise NotImplementedError(
"histfactory requires ROOT {0} but you are using {1}".format(
MIN_ROOT_VERSION, ROOT_VERSION))
HistFactory = QROOT.RooStats.HistFactory
Constraint = HistFactory.Constraint
__all__ = [
'Constraint',
'Data',
'Sample',
'HistoSys',
'HistoFactor',
'NormFactor',
'OverallSys',
'ShapeFactor',
'ShapeSys',
'Channel',
'Measurement',
]
class _Named(object):
@property
def name(self):
return self.GetName()
@name.setter
def name(self, n):
self.SetName(n)
def __str__(self):
return self.__repr__()
def __repr__(self):
return "{0}('{1}')".format(
self.__class__.__name__, self.GetName())
class _HistNamePathFile(object):
@property
def hist_name(self):
return self.GetHistoName()
@hist_name.setter
def hist_name(self, name):
self.SetHistoName(name)
@property
def hist_path(self):
return self.GetHistoPath()
@hist_path.setter
def hist_path(self, path):
self.SetHistoPath(path)
@property
def hist_file(self):
return self.GetInputFile()
@hist_file.setter
def hist_file(self, infile):
self.SetInputFile(infile)
class _SampleBase(_Named, _HistNamePathFile):
def SetHisto(self, hist):
super(_SampleBase, self).SetHisto(hist)
self.SetHistoName(hist.name)
keepalive(self, hist)
def GetHisto(self):
hist = super(_SampleBase, self).GetHisto()
# NULL pointer check
if hist == None:
return None
return asrootpy(hist)
@property
def hist(self):
return self.GetHisto()
@hist.setter
def hist(self, h):
self.SetHisto(h)
def __add__(self, other):
if self.name != other.name:
raise ValueError("attempting to add samples with different names")
hist1 = self.GetHisto()
hist2 = other.GetHisto()
sample = self.__class__(self.name)
if hist1 is not None and hist2 is not None:
hist3 = hist1 + hist2
hist3.name = '{0}_plus_{1}'.format(hist1.name, hist2.name)
sample.SetHisto(hist3)
return sample
class Data(_SampleBase, HistFactory.Data):
_ROOT = HistFactory.Data
def __init__(self, name, hist=None):
# require a name
super(Data, self).__init__()
self.name = name
if hist is not None:
self.SetHisto(hist)
def total(self, xbin1=1, xbin2=-2):
"""
Return the total yield and its associated statistical uncertainty.
"""
return self.hist.integral(xbin1=xbin1, xbin2=xbin2, error=True)
def Clone(self):
clone = Data(self.name)
hist = self.hist
if hist is not None:
clone.hist = hist.Clone(shallow=True)
return clone
class Sample(_SampleBase, HistFactory.Sample):
_ROOT = HistFactory.Sample
def __init__(self, name, hist=None):
# require a sample name
super(Sample, self).__init__(name)
if hist is not None:
self.SetHisto(hist)
def __add__(self, other):
if self.GetHistoFactorList() or other.GetHistoFactorList():
raise NotImplementedError(
"Samples cannot be summed if "
"they contain HistoFactors")
if self.GetShapeFactorList() or other.GetShapeFactorList():
raise NotImplementedError(
"Samples cannot be summed if "
"they contain ShapeFactors")
if self.GetShapeSysList() or other.GetShapeSysList():
raise NotImplementedError(
"Samples cannot be summed if "
"they contain ShapeSys")
if self.GetNormalizeByTheory() != other.GetNormalizeByTheory():
raise ValueError(
"attempting to sum samples with "
"inconsistent NormalizeByTheory")
sample = super(Sample, self).__add__(other)
sample.SetNormalizeByTheory(self.GetNormalizeByTheory())
# sum the histosys
syslist1 = self.GetHistoSysList()
syslist2 = other.GetHistoSysList()
if len(syslist1) != len(syslist2):
raise ValueError(
"attempting to sum Samples with HistoSys lists of "
"differing lengths")
for sys1, sys2 in zip(syslist1, syslist2):
sample.AddHistoSys(sys1 + sys2)
# include the overallsys
overall1 = self.GetOverallSysList()
overall2 = other.GetOverallSysList()
if len(overall1) != len(overall2):
raise ValueError(
"attempting to sum Samples with OverallSys lists of "
"differing lengths")
for o1, o2 in zip(overall1, overall2):
if o1.name != o2.name:
raise ValueError(
"attempting to sum Samples containing OverallSys "
"with differing names: {0}, {1}".format(
o1.name, o2.name))
# TODO check equality of value, low and high
sample.AddOverallSys(o1)
# include the normfactors
norms1 = self.GetNormFactorList()
norms2 = other.GetNormFactorList()
if len(norms1) != len(norms2):
raise ValueError(
"attempting to sum Samples with NormFactor lists of "
"differing lengths")
for norm1, norm2 in zip(norms1, norms2):
if norm1.name != norm2.name:
raise ValueError(
"attempting to sum Samples containing NormFactors "
"with differing names: {0}, {1}".format(
norm1.name, norm2.name))
# TODO check equality of value, low and high
sample.AddNormFactor(norm1)
return sample
def __radd__(self, other):
# support sum([list of Samples])
if other == 0:
return self
raise TypeError(
"unsupported operand type(s) for +: '{0}' and '{1}'".format(
other.__class__.__name__, self.__class__.__name__))
def __mul__(self, scale):
clone = self.Clone()
clone *= scale
return clone
def __imul__(self, scale):
hist = self.hist
if hist is not None:
hist *= scale
for hsys in self.histo_sys:
low = hsys.low
high = hsys.high
if low is not None:
low *= scale
if high is not None:
high *= scale
return self
def sys_names(self):
"""
Return a list of unique systematic names from OverallSys and HistoSys
"""
names = {}
for osys in self.overall_sys:
names[osys.name] = None
for hsys in self.histo_sys:
names[hsys.name] = None
return names.keys()
def iter_sys(self):
"""
Iterate over sys_name, overall_sys, histo_sys.
overall_sys or histo_sys may be None for any given sys_name.
"""
names = self.sys_names()
for name in names:
osys = self.GetOverallSys(name)
hsys = self.GetHistoSys(name)
yield name, osys, hsys
def sys_hist(self, name=None):
"""
Return the effective low and high histogram for a given systematic.
If this sample does not contain the named systematic then return
the nominal histogram for both low and high variations.
"""
if name is None:
low = self.hist.Clone(shallow=True)
high = self.hist.Clone(shallow=True)
return low, high
osys = self.GetOverallSys(name)
hsys = self.GetHistoSys(name)
if osys is None:
osys_high, osys_low = 1., 1.
else:
osys_high, osys_low = osys.high, osys.low
if hsys is None:
hsys_high = self.hist.Clone(shallow=True)
hsys_low = self.hist.Clone(shallow=True)
else:
hsys_high = hsys.high.Clone(shallow=True)
hsys_low = hsys.low.Clone(shallow=True)
return hsys_low * osys_low, hsys_high * osys_high
def has_sys(self, name):
return (self.GetOverallSys(name) is not None or
self.GetHistoSys(name) is not None)
def total(self, xbin1=1, xbin2=-2):
"""
Return the total yield and its associated statistical and
systematic uncertainties.
"""
integral, stat_error = self.hist.integral(
xbin1=xbin1, xbin2=xbin2, error=True)
# sum systematics in quadrature
ups = [0]
dns = [0]
for sys_name in self.sys_names():
sys_low, sys_high = self.sys_hist(sys_name)
up = sys_high.integral(xbin1=xbin1, xbin2=xbin2) - integral
dn = sys_low.integral(xbin1=xbin1, xbin2=xbin2) - integral
if up > 0:
ups.append(up**2)
else:
dns.append(up**2)
if dn > 0:
ups.append(dn**2)
else:
dns.append(dn**2)
syst_error = (sqrt(sum(ups)), sqrt(sum(dns)))
return integral, stat_error, syst_error
###########################
# HistoSys
###########################
def AddHistoSys(self, *args):
super(Sample, self).AddHistoSys(*args)
if len(args) == 1:
# args is a HistoSys
keepalive(self, args[0])
def RemoveHistoSys(self, name):
histosys_vect = super(Sample, self).GetHistoSysList()
ivect = histosys_vect.begin()
for histosys in histosys_vect:
if histosys.GetName() == name:
histosys_vect.erase(ivect)
break
ivect.__preinc__()
def GetHistoSys(self, name):
histosys_vect = super(Sample, self).GetHistoSysList()
for histosys in histosys_vect:
if histosys.GetName() == name:
return asrootpy(histosys)
return None
def GetHistoSysList(self):
return [asrootpy(syst) for syst in
super(Sample, self).GetHistoSysList()]
@property
def histo_sys(self):
return self.GetHistoSysList()
###########################
# HistoFactor
###########################
def AddHistoFactor(self, *args):
super(Sample, self).AddHistoFactor(*args)
if len(args) == 1:
# args is a HistoFactor
keepalive(self, args[0])
def RemoveHistoFactor(self, name):
histofactor_vect = super(Sample, self).GetHistoFactorList()
ivect = histosys_factor.begin()
for histofactor in histofactor_vect:
if histofactor.GetName() == name:
histofactor_vect.erase(ivect)
break
ivect.__preinc__()
def GetHistoFactor(self, name):
histofactor_vect = super(Sample, self).GetHistoFactorList()
for histofactor in histofactor_vect:
if histofactor.GetName() == name:
return asrootpy(histofactor)
return None
def GetHistoFactorList(self):
return [asrootpy(syst) for syst in
super(Sample, self).GetHistoFactorList()]
@property
def histo_factors(self):
return self.GetHistoFactorList()
###########################
# NormFactor
###########################
def AddNormFactor(self, *args):
super(Sample, self).AddNormFactor(*args)
if len(args) == 1:
# args is a NormFactor
keepalive(self, args[0])
def RemoveNormFactor(self, name):
normfactor_vect = super(Sample, self).GetNormFactorList()
ivect = normfactor_vect.begin()
for normfactor in normfactor_vect:
if normfactor.GetName() == name:
normfactor_vect.erase(ivect)
break
ivect.__preinc__()
def GetNormFactor(self, name):
normfactor_vect = super(Sample, self).GetNormFactorList()
for normfactor in normfactor_vect:
if normfactor.GetName() == name:
return asrootpy(normfactor)
return None
def GetNormFactorList(self):
return [asrootpy(norm) for norm in
super(Sample, self).GetNormFactorList()]
@property
def norm_factors(self):
return self.GetNormFactorList()
###########################
# OverallSys
###########################
def AddOverallSys(self, *args):
super(Sample, self).AddOverallSys(*args)
if len(args) == 1:
# args is a OverallSys
keepalive(self, args[0])
def RemoveOverallSys(self, name):
overallsys_vect = super(Sample, self).GetOverallSysList()
ivect = overallsys_vect.begin()
for overallsys in overallsys_vect:
if overallsys.GetName() == name:
overallsys_vect.erase(ivect)
break
ivect.__preinc__()
def GetOverallSys(self, name):
overallsys_vect = super(Sample, self).GetOverallSysList()
for overallsys in overallsys_vect:
if overallsys.GetName() == name:
return asrootpy(overallsys)
return None
def GetOverallSysList(self):
return [asrootpy(syst) for syst in
super(Sample, self).GetOverallSysList()]
@property
def overall_sys(self):
return self.GetOverallSysList()
###########################
# ShapeFactor
###########################
def AddShapeFactor(self, shapefactor):
super(Sample, self).AddShapeFactor(shapefactor)
if isinstance(shapefactor, ROOT.RooStats.HistFactory.ShapeFactor):
keepalive(self, shapefactor)
def RemoveShapeFactor(self, name):
shapefactor_vect = super(Sample, self).GetShapeFactorList()
ivect = shapefactor_vect.begin()
for shapefactor in shapefactor_vect:
if shapefactor.GetName() == name:
shapefactor_vect.erase(ivect)
break
ivect.__preinc__()
def GetShapeFactor(self, name):
shapefactor_vect = super(Sample, self).GetShapeFactorList()
for shapefactor in shapefactor_vect:
if shapefactor.GetName() == name:
return asrootpy(shapefactor)
return None
def GetShapeFactorList(self):
return [asrootpy(sf) for sf in
super(Sample, self).GetShapeFactorList()]
@property
def shape_factors(self):
return self.GetShapeFactorList()
###########################
# ShapeSys
###########################
def AddShapeSys(self, *args):
super(Sample, self).AddShapeSys(*args)
if len(args) == 1:
# args is a ShapeSys
keepalive(self, args[0])
def RemoveShapeSys(self, name):
shapesys_vect = super(Sample, self).GetShapeSysList()
ivect = shapesys_vect.begin()
for shapesys in shapesys_vect:
if shapesys.GetName() == name:
shapesys_vect.erase(ivect)
break
ivect.__preinc__()
def GetShapeSys(self, name):
shapesys_vect = super(Sample, self).GetShapeSysList()
for shapesys in shapesys_vect:
if shapesys.GetName() == name:
return asrootpy(shapesys)
return None
def GetShapeSysList(self):
return [asrootpy(ss) for ss in
super(Sample, self).GetShapeSysList()]
@property
def shape_sys(self):
return self.GetShapeSysList()
def Clone(self):
clone = self.__class__(self.name)
hist = self.hist
if hist is not None:
clone.hist = hist.Clone(shallow=True)
# HistoSys
for hsys in self.histo_sys:
clone.AddHistoSys(hsys.Clone())
# HistoFactor
for hfact in self.histo_factors:
clone.AddHistoFactor(hfact.Clone())
# NormFactor
for norm in self.norm_factors:
clone.AddNormFactor(norm.Clone())
# OverallSys
for osys in self.overall_sys:
clone.AddOverallSys(osys.Clone())
# ShapeFactor
for sfact in self.shape_factors:
clone.AddShapeFactor(sfact.Clone())
# ShapeSys
for ssys in self.shape_sys:
clone.AddShapeSys(ssys.Clone())
return clone
class _HistoSysBase(object):
def SetHistoHigh(self, hist):
super(_HistoSysBase, self).SetHistoHigh(hist)
self.SetHistoNameHigh(hist.name)
keepalive(self, hist)
def SetHistoLow(self, hist):
super(_HistoSysBase, self).SetHistoLow(hist)
self.SetHistoNameLow(hist.name)
keepalive(self, hist)
def GetHistoHigh(self):
hist = super(_HistoSysBase, self).GetHistoHigh()
# NULL pointer check
if hist == None:
return None
return asrootpy(hist)
def GetHistoLow(self):
hist = super(_HistoSysBase, self).GetHistoLow()
# NULL pointer check
if hist == None:
return None
return asrootpy(hist)
@property
def low(self):
return self.GetHistoLow()
@low.setter
def low(self, h):
self.SetHistoLow(h)
@property
def high(self):
return self.GetHistoHigh()
@high.setter
def high(self, h):
self.SetHistoHigh(h)
@property
def low_name(self):
return self.GetHistoNameLow()
@low_name.setter
def low_name(self, name):
self.SetHistoNameLow(name)
@property
def high_name(self):
return self.GetHistoNameHigh()
@high_name.setter
def high_name(self, name):
self.SetHistoNameHigh(name)
@property
def low_path(self):
return self.GetHistoPathLow()
@low_path.setter
def low_path(self, path):
self.SetHistoPathLow(path)
@property
def high_path(self):
return self.GetHistoPathHigh()
@high_path.setter
def high_path(self, path):
self.SetHistoPathHigh(path)
@property
def low_file(self):
return self.GetInputFileLow()
@low_file.setter
def low_file(self, infile):
self.SetInputFileLow(infile)
@property
def high_file(self):
return self.GetInputFileHigh()
@high_file.setter
def high_file(self, infile):
self.SetInputFileHigh(infile)
def Clone(self):
clone = self.__class__(self.name)
low = self.low
high = self.high
if low is not None:
clone.low = low.Clone(shallow=True)
if high is not None:
clone.high = high.Clone(shallow=True)
clone.low_name = self.low_name
clone.high_name = self.high_name
clone.low_path = self.low_path
clone.high_path = self.high_path
clone.low_file = self.low_file
clone.high_file = self.high_file
return clone
class HistoSys(_Named, _HistoSysBase, HistFactory.HistoSys):
_ROOT = HistFactory.HistoSys
def __init__(self, name, low=None, high=None):
# require a name
super(HistoSys, self).__init__(name)
if low is not None:
self.low = low
if high is not None:
self.high = high
def __add__(self, other):
if self.name != other.name:
raise ValueError("attempting to add HistoSys with different names")
histosys = HistoSys(self.name)
low = self.low + other.low
low.name = '{0}_plus_{1}'.format(self.low.name, other.low.name)
histosys.low = low
high = self.high + other.high
high.name = '{0}_plus_{1}'.format(self.high.name, other.high.name)
histosys.high = high
return histosys
class HistoFactor(_Named, _HistoSysBase,
HistFactory.HistoFactor):
_ROOT = HistFactory.HistoFactor
def __init__(self, name, low=None, high=None):
# require a name
super(HistoFactor, self).__init__(name)
if low is not None:
self.low = low
if high is not None:
self.high = high
def __add__(self, other):
raise NotImplementedError("HistoFactors cannot be summed")
class NormFactor(_Named, HistFactory.NormFactor):
_ROOT = HistFactory.NormFactor
def __init__(self, name, value=None, low=None, high=None, const=None):
super(NormFactor, self).__init__()
self.name = name
if value is not None:
self.value = value
if low is not None:
self.low = low
if high is not None:
self.high = high
if const is not None:
self.const = const
@property
def const(self):
return self.GetConst()
@const.setter
def const(self, value):
self.SetConst(value)
@property
def value(self):
return self.GetVal()
@value.setter
def value(self, value):
self.SetVal(value)
@property
def low(self):
return self.GetLow()
@low.setter
def low(self, value):
self.SetLow(value)
@property
def high(self):
return self.GetHigh()
@high.setter
def high(self, value):
self.SetHigh(value)
def Clone(self):
return NormFactor(self.name,
value=self.value,
low=self.low,
high=self.high,
const=self.const)
class OverallSys(_Named, HistFactory.OverallSys):
_ROOT = HistFactory.OverallSys
def __init__(self, name, low=None, high=None):
# require a name
super(OverallSys, self).__init__()
self.name = name
if low is not None:
self.low = low
if high is not None:
self.high = high
@property
def low(self):
return self.GetLow()
@low.setter
def low(self, value):
self.SetLow(value)
@property
def high(self):
return self.GetHigh()
@high.setter
def high(self, value):
self.SetHigh(value)
def Clone(self):
return OverallSys(self.name, low=self.low, high=self.high)
class ShapeFactor(_Named, HistFactory.ShapeFactor):
_ROOT = HistFactory.ShapeFactor
def __init__(self, name):
# require a name
super(ShapeFactor, self).__init__()
self.name = name
def Clone(self):
return ShapeFactor(self.name)
class ShapeSys(_Named, _HistNamePathFile, HistFactory.ShapeSys):
_ROOT = HistFactory.ShapeSys
def __init__(self, name):
# require a name
super(ShapeSys, self).__init__()
self.name = name
# ConstraintType not initialized correctly on C++ side
# ROOT.RooStats.HistFactory.Constraint.Gaussian
super(ShapeSys, self).SetConstraintType(Constraint.Gaussian)
def SetConstraintType(self, value):
_value = value.lower() if isinstance(value, string_types) else value
if _value in (Constraint.Gaussian, 'gauss', 'gaussian'):
super(ShapeSys, self).SetConstraintType(Constraint.Gaussian)
elif _value in (Constraint.Poisson, 'pois', 'poisson'):
super(ShapeSys, self).SetConstraintType(Constraint.Poisson)
else:
raise ValueError(
"'{0}' is not a valid constraint".format(value))
@property
def constraint(self):
return super(ShapeSys, self).GetConstraintType()
@constraint.setter
def constraint(self, value):
self.SetConstraintType(value)
def GetErrorHist(self):
hist = super(ShapeSys, self).GetErrorHist()
# NULL pointer check
if hist == None:
return None
return asrootpy(hist)
def SetErrorHist(self, hist):
super(ShapeSys, self).SetErrorHist(hist)
self.SetHistoName(hist.name)
keepalive(self, hist)
@property
def hist(self):
self.GetErrorHist()
@hist.setter
def hist(self, h):
self.SetErrorHist(h)
def Clone(self):
clone = ShapeSys(self.name)
hist = self.hist
if hist is not None:
clone.hist = hist.Clone(shallow=True)
return clone
class Channel(_Named, HistFactory.Channel):
_ROOT = HistFactory.Channel
def __init__(self, name, samples=None, data=None, inputfile=""):
# require a name
super(Channel, self).__init__(name, inputfile)
if samples is not None:
for sample in samples:
self.AddSample(sample)
if data is not None:
self.SetData(data)
def __add__(self, other):
channel = Channel('{0}_plus_{1}'.format(self.name, other.name))
channel.SetData(self.data + other.data)
samples1 = self.samples
samples2 = other.samples
if len(samples1) != len(samples2):
raise ValueError(
"attempting to add Channels containing differing numbers of "
"Samples")
for s1, s2 in zip(samples1, samples2):
# samples must be compatible
channel.AddSample(s1 + s2)
channel.SetStatErrorConfig(self.GetStatErrorConfig())
return channel
def __radd__(self, other):
# support sum([list of Channels])
if other == 0:
return self
raise TypeError(
"unsupported operand type(s) for +: '{0}' and '{1}'".format(
other.__class__.__name__, self.__class__.__name__))
def sys_names(self):
"""
Return a list of unique systematic names from OverallSys and HistoSys
"""
names = []
for sample in self.samples:
names.extend(sample.sys_names())
return list(set(names))
def sys_hist(self, name=None, where=None):
"""
Return the effective total low and high histogram for a given
systematic over samples in this channel.
If a sample does not contain the named systematic then its nominal
histogram is used for both low and high variations.
Parameters
----------
name : string, optional (default=None)
The systematic name otherwise nominal if None
where : callable, optional (default=None)
A callable taking one argument: the sample, and returns True if
this sample should be included in the total.
Returns
-------
total_low, total_high : histograms
The total low and high histograms for this systematic
"""
total_low, total_high = None, None
for sample in self.samples:
if where is not None and not where(sample):
continue
low, high = sample.sys_hist(name)
if total_low is None:
total_low = low.Clone(shallow=True)
else:
total_low += low
if total_high is None:
total_high = high.Clone(shallow=True)
else:
total_high += high
return total_low, total_high
def has_sample(self, name):
for sample in self.samples:
if sample.name == name:
return True
return False
def has_sample_where(self, func):
for sample in self.samples:
if func(sample):
return True
return False
def total(self, where=None, xbin1=1, xbin2=-2):
"""
Return the total yield and its associated statistical and
systematic uncertainties.
"""
nominal, _ = self.sys_hist(None, where=where)
integral, stat_error = nominal.integral(
xbin1=xbin1, xbin2=xbin2, error=True)
ups = [0]
dns = [0]
for sys_name in self.sys_names():
low, high = self.sys_hist(sys_name, where=where)
up = high.integral(xbin1=xbin1, xbin2=xbin2) - integral
dn = low.integral(xbin1=xbin1, xbin2=xbin2) - integral
if up > 0:
ups.append(up**2)
else:
dns.append(up**2)
if dn > 0:
ups.append(dn**2)
else:
dns.append(dn**2)
syst_error = (sqrt(sum(ups)), sqrt(sum(dns)))
return integral, stat_error, syst_error
def SetData(self, data):
super(Channel, self).SetData(data)
if isinstance(data, ROOT.TH1):
keepalive(self, data)
def GetData(self):
return asrootpy(super(Channel, self).GetData())
@property
def data(self):
return self.GetData()
@data.setter
def data(self, d):
self.SetData(d)
def AddSample(self, sample):
super(Channel, self).AddSample(sample)
keepalive(self, sample)
def RemoveSample(self, name):
sample_vect = super(Channel, self).GetSamples()
ivect = sample_vect.begin()
for sample in sample_vect:
if sample.GetName() == name:
sample_vect.erase(ivect)
break
ivect.__preinc__()
def GetSample(self, name):
samples = super(Channel, self).GetSamples()
for sample in samples:
if sample.GetName() == name:
return asrootpy(sample)
return None
def GetSamples(self):
return [asrootpy(s) for s in super(Channel, self).GetSamples()]
def AddAdditionalData(self, data):
super(Channel, self).AddAdditionalData(data)
keepalive(self, data)
def GetAdditionalData(self):
return [asrootpy(d) for d in super(Channel, self).GetAdditionalData()]
@property
def samples(self):
return self.GetSamples()
@property
def additional_data(self):
return self.GetAdditionalData()
@property
def hist_path(self):
return self.GetHistoPath()
@hist_path.setter
def hist_path(self, path):
self.SetHistoPath(path)
@property
def hist_file(self):
return self.GetInputFile()
@hist_file.setter
def hist_file(self, infile):
self.SetInputFile(infile)
def apply_snapshot(self, argset):
"""
Create a clone of this Channel where histograms are modified according
to the values of the nuisance parameters in the snapshot. This is
useful when creating post-fit distribution plots.
Parameters
----------
argset : RooArtSet
A RooArgSet of RooRealVar nuisance parameters
Returns
-------
channel : Channel
The modified channel
"""
clone = self.Clone()
args = [var for var in argset if not (
var.name.startswith('binWidth_obs_x_') or
var.name.startswith('gamma_stat') or
var.name.startswith('nom_'))]
# handle NormFactors first
nargs = []
for var in args:
is_norm = False
name = var.name.replace('alpha_', '')
for sample in clone.samples:
if sample.GetNormFactor(name) is not None:
log.info("applying snapshot of {0} on sample {1}".format(
name, sample.name))
is_norm = True
# scale the entire sample
sample *= var.value
# add an OverallSys for the error
osys = OverallSys(name,
low=1. - var.error / var.value,
high=1. + var.error / var.value)
sample.AddOverallSys(osys)
# remove the NormFactor
sample.RemoveNormFactor(name)
if not is_norm:
nargs.append(var)
# modify the nominal shape and systematics
for sample in clone.samples:
# check that hist is not NULL
if sample.hist is None:
raise RuntimeError(
"sample {0} does not have a "
"nominal histogram".format(sample.name))
nominal = sample.hist.Clone(shallow=True)
for var in nargs:
name = var.name.replace('alpha_', '')
if not sample.has_sys(name):
continue
log.info("applying snapshot of {0} on sample {1}".format(
name, sample.name))
low, high = sample.sys_hist(name)
# modify nominal
val = var.value
if val > 0:
sample.hist += (high - nominal) * val
elif val < 0:
sample.hist += (nominal - low) * val
# TODO:
# modify OverallSys
# modify HistoSys
return clone
def Clone(self):
clone = Channel(self.name)
data = self.data
if data:
clone.data = data.Clone()
for sample in self.samples:
clone.AddSample(sample.Clone())
clone.hist_path = self.hist_path
clone.hist_file = self.hist_file
return clone
def __iter__(self):
for sample in super(Channel, self).GetSamples():
yield asrootpy(sample)
def __len__(self):
return len(super(Channel, self).GetSamples())
class Measurement(NamedObject, HistFactory.Measurement):
_ROOT = HistFactory.Measurement
def __init__(self, name, channels=None, title=""):
# require a name
super(Measurement, self).__init__(name=name, title=title)
self.SetExportOnly(True)
if channels is not None:
for channel in channels:
self.AddChannel(channel)
@property
def lumi(self):
return self.GetLumi()
@lumi.setter
def lumi(self, l):
self.SetLumi(l)
@property
def lumi_rel_error(self):
return self.GetLumiRelErr()
@lumi_rel_error.setter
def lumi_rel_error(self, err):
self.SetLumiRelErr(err)
@property
def poi(self):
return list(self.GetPOIList())
@poi.setter
def poi(self, p):
# this also adds a new POI so calling this multiple times will add
# multiple POIs
self.SetPOI(p)
def AddChannel(self, channel):
super(Measurement, self).AddChannel(channel)
keepalive(self, channel)
def RemoveChannel(self, name):
channel_vect = super(Measurement, self).GetChannels()
ivect = channel_vect.begin()
for channel in channel_vect:
if channel.GetName() == name:
channel_vect.erase(ivect)
break
ivect.__preinc__()
def GetChannel(self, name):
channels = super(Measurement, self).GetChannels()
for channel in channels:
if channel.GetName() == name:
return asrootpy(channel)
return None
def GetChannels(self):
return [asrootpy(c) for c in super(Measurement, self).GetChannels()]
@property
def channels(self):
return self.GetChannels()
def GetConstantParams(self):
return list(super(Measurement, self).GetConstantParams())
@property
def const_params(self):
return self.GetConstantParams()
def Clone(self):
clone = Measurement(self.name, self.title)
clone.lumi = self.lumi
clone.lumi_rel_error = self.lumi_rel_error
for channel in self.channels:
clone.AddChannel(channel.Clone())
for poi in self.GetPOIList():
clone.AddPOI(poi)
for const_param in self.const_params:
clone.AddConstantParam(const_param)
return clone
def __iter__(self):
for channel in super(Measurement, self).GetChannels():
yield asrootpy(channel)
def __len__(self):
return len(super(Measurement, self).GetChannels())
|
mverzett/rootpy
|
rootpy/stats/histfactory/histfactory.py
|
Python
|
gpl-3.0
| 36,348
|
[
"Gaussian"
] |
e2afe16cf13e187aaf40ca526dd7a2ccd46b4159d4786b8108fe706bd3142955
|
import urllib2
import json
from pprint import pprint
followedChannels = []
liveChannels = []
offset = 0;
#Get twitch.tv username as user input
username = raw_input("Enter your twitch.tv username: ")
#Create an html file to write to and initialize headers for .html
file = open('twitchyFollowers.html', 'w')
file.write('<!DOCTYPE html>' + '<html>' + '<head>' + '<title> twitchy </title>' + '</head>' + '<body>' + '<h1>' + username + ' follows' + '</h1>' + '<ul>')
#followsChannelUrl is a Url from twitch api. It returns a list of follow objects.
followsChannelsUrl = 'https://api.twitch.tv/kraken/users/' + username + '/follows/channels/'
#Cut this string from the link to get the username only
stripUrl = 'https://api.twitch.tv/kraken/users/' + username + '/follows/channels/'
#Request followsChannelsUrl and open it
req = urllib2.Request(followsChannelsUrl)
req.add_header('Accept', 'application/vnd.twitchtv.v3+json')
res = urllib2.urlopen(req)
#Take the JSON string and return as Python data
data = json.load(res)
#Grab the total number of streams the user followed
streamsFollowed = data[u'_total']
if streamsFollowed == 0:
pprint('You are not following any channels.')
else:
#Append all of your followed channels into an array which we print at the end.
while streamsFollowed > 0:
#Visit 'https://github.com/justintv/Twitch-API/blob/master/v3_resources/follows.md#get-usersuserfollowschannels' for more information about limits and offsets.
followsChannelsUrl = 'https://api.twitch.tv/kraken/users/' + username + '/follows/channels?direction=DESC&limit=100&offset=' + str(offset) + '&sortby=created_at'
#Request followsChannelsUrl and then take JSON string and return as Python data
req = urllib2.Request(followsChannelsUrl)
req.add_header('Accept', 'application/vnd.twitchtv.v3+json')
res = urllib2.urlopen(req)
data = json.load(res)
#The length of the current 'follows' array. We use this to loop through the arrays containing the usernames.
length = len(data[u'follows'])
#Loop through the array of streamers, parse the string to just use the username, and then append it to the followedChannels array.
for i in xrange(length):
streamer = data[u'follows'][i][u'_links'][u'self']
streamer = streamer.replace(stripUrl, "");
followedChannels.append(streamer)
pprint(username + ' is following ' + streamer)
#Subtract 100 for this iteration
streamsFollowed -= length
#Add 100 to offset in order to retrieve next 100 elements in the array (since the limit is set to 100 by default).
offset += 100;
followedChannels.sort();
for i in xrange(len(followedChannels)):
file.write('<li>' + '<a href=http://www.twitch.tv/' + str(followedChannels[i]) + '>' + str(followedChannels[i]) + '</a>' + '</li>')
file.write('</ul>')
file.write('<footer><p>Created by: James J. Lee </br>Visit his site: <a href=http://www.jamesjlee.in>jamesjlee.in</a></br>Follow him on twitter: <a href=https://www.twitter.com/jamesjlee8>@jamesjlee8</a></p></footer>')
file.write('</body>')
file.write('</html>')
file.flush()
file.close()
pprint('Done.')
|
jamesjlee/twitchy
|
twitchyFollowing.py
|
Python
|
mit
| 3,096
|
[
"VisIt"
] |
0f2296ba4d59a8815e3d30ed9a35d243e5b955fdf5df25a001589bac7feb8ff5
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGenomeinfodbdata(RPackage):
"""for mapping between NCBI taxonomy ID and species. Used by functions
in the GenomeInfoDb package."""
homepage = "https://bioconductor.org/packages/GenomeInfoDbData/"
url = "https://bioconductor.org/packages/3.5/data/annotation/src/contrib/GenomeInfoDbData_0.99.0.tar.gz"
version('1.2.1', sha256='75e6d683a29b8baeec66ba5194aa59a6aa69b04fae5a9c718a105c155fb41711',
url='https://bioconductor.org/packages/3.9/data/annotation/src/contrib/GenomeInfoDbData_1.2.1.tar.gz')
version('1.1.0', sha256='6efdca22839c90d455843bdab7c0ecb5d48e3b6c2f7b4882d3210a6bbad4304c',
url='https://bioconductor.org/packages/3.7/data/annotation/src/contrib/GenomeInfoDbData_1.1.0.tar.gz')
version('0.99.0', sha256='457049804bbd70f218c1c84067a23e83bdecb7304a3e4d8b697fee0b16dc1888')
depends_on('r@3.3:', when='@0.99.0:1.1.0', type=('build', 'run'))
depends_on('r@3.5:', when='@1.2.1:', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-genomeinfodbdata/package.py
|
Python
|
lgpl-2.1
| 1,208
|
[
"Bioconductor"
] |
e73567977aa1aa0ebaf116b2e416921dab8a5c51e06e94d8fd29a5c21aaafee1
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseServerError
from django.shortcuts import render_to_response
from django.template import defaultfilters, RequestContext
from django.template.defaultfilters import truncatewords
from django.utils import simplejson
from bartender.models import Article
from letters.models import Letter,LetterForm,EditorPick
from facebookconnect.models import FacebookTemplate
from search.models import Document
from search.backends.simple import SimpleSearcher
def index(request):
"""display letters index page"""
letter_list = [ep.letter for ep in EditorPick.objects.get_top_two_published()]
template_dict = {
'letter_list':letter_list,
'all_letters':Letter.objects.all().order_by('-created'),
'letter_count':Letter.objects.all().count,
}
return render_to_response('letters/index.html', template_dict, context_instance=RequestContext(request))
def read_letter(request,letter_id):
l = Letter.objects.get(pk=letter_id)
exclude_letters = [r for r in l.replies.all()]
exclude_letters.append(l)
letters = []
if l.article:
for let in l.article.letter_set.order_by('created'):
if not let in exclude_letters:
letters.append(l)
template_dict = {
'letter': l,
'related_letters':letters
}
return render_to_response('letters/read_letter.html', template_dict, context_instance=RequestContext(request))
def search(request):
"""search letters"""
if request.method == "POST":
my = SimpleSearcher()
docs = my.query(request.POST["q"])
template_dict = {
'document_list': docs,
'q':request.POST["q"]
}
return render_to_response('letters/search_results.html', template_dict, context_instance=RequestContext(request))
else:
return HttpResponseRedirect(request.META['HTTP_REFERER'])
@login_required
def new_letter(request,article_id=None,letter_id=None):
"""add a new letter"""
my_letter = Letter(user=request.user)
#see if this is in response to an article
try:
a = Article.objects.get(pk=article_id)
my_letter.article = a
except Article.DoesNotExist:
pass
#see if this is in response to a letter
try:
l = Letter.objects.get(pk=letter_id)
my_letter.article=l.article
my_letter.letter=l
except Letter.DoesNotExist:
pass
#this is an ajax request to create a new letter
if request.method == "POST":
form = LetterForm(request.POST, instance=my_letter)
if form.is_valid():
new_letter = form.save()
#set up template data
template_data = {
'title': truncatewords(new_letter.title,20),
'body': truncatewords(new_letter.body,50),
'url': settings.ROOT_URL + new_letter.get_absolute_url()
}
if new_letter.article:
template_data.update({
"headline": truncatewords(new_letter.article.headline,20),
"article": truncatewords(new_letter.article.body,50),
})
if new_letter.letter:
template_bundle_id = FacebookTemplate.objects.get(name='letter_re_letter_re_article').template_bundle_id
template_data.update({
"original_user": new_letter.letter.user.username,
"original_title": truncatewords(new_letter.letter.title,20),
"original_body": truncatewords(new_letter.letter.body,50),
})
#get template bundle id
if new_letter.article:
if new_letter.letter:
template_bundle_id = FacebookTemplate.objects.get(name='letter_re_letter_re_article').template_bundle_id
else:
template_bundle_id = FacebookTemplate.objects.get(name='letter_re_article').template_bundle_id
else:
if new_letter.letter:
template_bundle_id = FacebookTemplate.objects.get(name='letter_re_letter').template_bundle_id
else:
template_bundle_id = FacebookTemplate.objects.get(name='letter').template_bundle_id
results = {'success':True,'template_bundle_id':template_bundle_id,'template_data':template_data}
else:
errors = 'Please give your letter a '
for field in form.errors.keys():
if field == form.errors.keys()[-1]:
if len(form.errors.keys()) > 1:
errors = errors.rstrip(', ')
errors += ' and %s.' % field
else:
errors += ' %s.' % field
else:
errors += '%s, ' % field
results = {'success':False,'errors':errors}
json = simplejson.dumps(results)
return HttpResponse(json, mimetype='application/json')
#this is a request for a form to write a new letter
else:
form = LetterForm(instance=my_letter)
template_dict = {
'letter_form': form,
'letter_template_bundle_id': FacebookTemplate.objects.get(name='letter').template_bundle_id,
'letter_re_letter_template_bundle_id': FacebookTemplate.objects.get(name='letter_re_letter').template_bundle_id,
'letter_re_article_template_bundle_id': FacebookTemplate.objects.get(name='letter_re_article').template_bundle_id,
'letter_re_letter_re_article_template_bundle_id': FacebookTemplate.objects.get(name='letter_re_letter_re_article').template_bundle_id,
}
return render_to_response('letters/new_letter.html', template_dict, context_instance=RequestContext(request))
@login_required
def flag_as_offensive(request,letter_id):
b = Letter.objects.get(pk=letter_id)
b.offensive = True
b.save()
return HttpResponseRedirect(request.META['HTTP_REFERER'])
|
brianboyer/newsmixer
|
social/letters/views.py
|
Python
|
gpl-3.0
| 7,091
|
[
"Brian"
] |
c1fd7ac483b745d4be2657ed35a3201355f5a595dcd2d4d34c7041b48b55f1a2
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import re
import webapp2
import jinja2
import logging
import StringIO
from markupsafe import Markup, escape # https://pypi.python.org/pypi/MarkupSafe
import parsers
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
from google.appengine.ext.webapp import blobstore_handlers
from api import inLayer, read_file, full_path, read_schemas, read_extensions, read_examples, namespaces, DataCache
from api import Unit, GetTargets, GetSources
from api import GetComment, all_terms, GetAllTypes, GetAllProperties, GetAllEnumerationValues
from api import GetParentList, GetImmediateSubtypes, HasMultipleBaseTypes
from api import GetJsonLdContext, ShortenOnSentence, StripHtmlTags
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
SCHEMA_VERSION=2.2
FEEDBACK_FORM_BASE_URL='https://docs.google.com/a/google.com/forms/d/1krxHlWJAO3JgvHRZV9Rugkr9VYnMdrI10xbGsWt733c/viewform?entry.1174568178&entry.41124795={0}&entry.882602760={1}'
# {0}: term URL, {1} category of term.
sitemode = "mainsite" # whitespaced list for CSS tags,
# e.g. "mainsite testsite" when off expected domains
# "extensionsite" when in an extension (e.g. blue?)
releaselog = { "2.0": "2015-05-13", "2.1": "2015-08-06" }
#
silent_skip_list = [ "favicon.ico" ] # Do nothing for now
all_layers = {}
ext_re = re.compile(r'([^\w,])+')
PageCache = {}
#TODO: Modes:
# mainsite
# webschemadev
# known extension (not skiplist'd, eg. demo1 on schema.org)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'], autoescape=True, cache_size=0)
ENABLE_JSONLD_CONTEXT = True
ENABLE_CORS = True
ENABLE_HOSTED_EXTENSIONS = True
INTESTHARNESS = False #Used to indicate we are being called from tests - use setInTestHarness() & getInTestHarness() to manage value
EXTENSION_SUFFIX = "" # e.g. "*"
#ENABLED_EXTENSIONS = [ 'admin', 'auto', 'bib' ]
ENABLED_EXTENSIONS = [ 'auto', 'bib' ]
ALL_LAYERS = [ 'core', 'auto', 'bib' ]
FORCEDEBUGGING = False
# FORCEDEBUGGING = True
def cleanPath(node):
"""Return the substring of a string matching chars approved for use in our URL paths."""
return re.sub(r'[^a-zA-Z0-9\-/,\.]', '', str(node), flags=re.DOTALL)
class HTMLOutput:
"""Used in place of http response when we're collecting HTML to pass to template engine."""
def __init__(self):
self.outputStrings = []
def write(self, str):
self.outputStrings.append(str)
def toHTML(self):
return Markup ( "".join(self.outputStrings) )
def __str__(self):
return self.toHTML()
# Core API: we have a single schema graph built from triples and units.
# now in api.py
class TypeHierarchyTree:
def __init__(self, prefix=""):
self.txt = ""
self.visited = {}
self.prefix = prefix
def emit(self, s):
self.txt += s + "\n"
def emit2buff(self, buff, s):
buff.write(s + "\n")
def toHTML(self):
return '%s<ul>%s</ul>' % (self.prefix, self.txt)
def toJSON(self):
return self.txt
def traverseForHTML(self, node, depth = 1, hashorslash="/", layers='core', buff=None):
"""Generate a hierarchical tree view of the types. hashorslash is used for relative link prefixing."""
log.debug("traverseForHTML: node=%s hashorslash=%s" % ( node.id, hashorslash ))
localBuff = False
if buff == None:
localBuff = True
buff = StringIO.StringIO()
urlprefix = ""
home = node.getHomeLayer()
gotOutput = False
if home in layers:
gotOutput = True
if home in ENABLED_EXTENSIONS and home != getHostExt():
urlprefix = makeUrl(home)
extclass = ""
extflag = ""
tooltip=""
if home != "core" and home != "":
extclass = "class=\"ext ext-%s\"" % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Extended schema: %s.schema.org\" " % home
# we are a supertype of some kind
subTypes = node.GetImmediateSubtypes(layers=ALL_LAYERS)
if len(subTypes) > 0:
# and we haven't been here before
if node.id not in self.visited:
self.visited[node.id] = True # remember our visit
self.emit2buff(buff, ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s' % (" " * 4 * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag) )
self.emit2buff(buff, ' %s<ul>' % (" " * 4 * depth))
# handle our subtypes
for item in subTypes:
subBuff = StringIO.StringIO()
got = self.traverseForHTML(item, depth + 1, hashorslash=hashorslash, layers=layers, buff=subBuff)
if got:
gotOutput = True
self.emit2buff(buff,subBuff.getvalue())
subBuff.close()
self.emit2buff(buff, ' %s</ul>' % (" " * 4 * depth))
else:
# we are a supertype but we visited this type before, e.g. saw Restaurant via Place then via Organization
seen = ' <a href="#%s">+</a> ' % node.id
self.emit2buff(buff, ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * 4 * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag, seen) )
# leaf nodes
if len(subTypes) == 0:
if node.id not in self.visited:
self.emit2buff(buff, '%s<li class="tleaf" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag, "" ))
#else:
#self.visited[node.id] = True # never...
# we tolerate "VideoGame" appearing under both Game and SoftwareApplication
# and would only suppress it if it had its own subtypes. Seems legit.
self.emit2buff(buff, ' %s</li>' % (" " * 4 * depth) )
if localBuff:
self.emit(buff.getvalue())
buff.close()
return gotOutput
# based on http://danbri.org/2013/SchemaD3/examples/4063550/hackathon-schema.js - thanks @gregg, @sandro
def traverseForJSONLD(self, node, depth = 0, last_at_this_level = True, supertype="None", layers='core'):
emit_debug = False
if node.id in self.visited:
# self.emit("skipping %s - already visited" % node.id)
return
self.visited[node.id] = True
p1 = " " * 4 * depth
if emit_debug:
self.emit("%s# @id: %s last_at_this_level: %s" % (p1, node.id, last_at_this_level))
global namespaces;
ctx = "{}".format(""""@context": {
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"schema": "http://schema.org/",
"rdfs:subClassOf": { "@type": "@id" },
"name": "rdfs:label",
"description": "rdfs:comment",
"children": { "@reverse": "rdfs:subClassOf" }
},\n""" if last_at_this_level and depth==0 else '' )
unseen_subtypes = []
for st in node.GetImmediateSubtypes(layers=layers):
if not st.id in self.visited:
unseen_subtypes.append(st)
unvisited_subtype_count = len(unseen_subtypes)
subtype_count = len( node.GetImmediateSubtypes(layers=layers) )
supertx = "{}".format( '"rdfs:subClassOf": "schema:%s", ' % supertype.id if supertype != "None" else '' )
maybe_comma = "{}".format("," if unvisited_subtype_count > 0 else "")
comment = GetComment(node, layers).strip()
comment = comment.replace('"',"'")
comment = ShortenOnSentence(StripHtmlTags(comment),60)
self.emit('\n%s{\n%s\n%s"@type": "rdfs:Class", %s "description": "%s",\n%s"name": "%s",\n%s"@id": "schema:%s"%s'
% (p1, ctx, p1, supertx, comment, p1, node.id, p1, node.id, maybe_comma))
i = 1
if unvisited_subtype_count > 0:
self.emit('%s"children": ' % p1 )
self.emit(" %s[" % p1 )
inner_lastness = False
for t in unseen_subtypes:
if emit_debug:
self.emit("%s # In %s > %s i: %s unvisited_subtype_count: %s" %(p1, node.id, t.id, i, unvisited_subtype_count))
if i == unvisited_subtype_count:
inner_lastness = True
i = i + 1
self.traverseForJSONLD(t, depth + 1, inner_lastness, supertype=node, layers=layers)
self.emit("%s ]%s" % (p1, "{}".format( "" if not last_at_this_level else '' ) ) )
maybe_comma = "{}".format( ',' if not last_at_this_level else '' )
self.emit('\n%s}%s\n' % (p1, maybe_comma))
def GetExamples(node, layers='core'):
"""Returns the examples (if any) for some Unit node."""
return node.examples
def GetExtMappingsRDFa(node, layers='core'):
"""Self-contained chunk of RDFa HTML markup with mappings for this term."""
if (node.isClass()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentClass"), node, layers=layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
if (c.id.startswith('http')):
markup = markup + "<link property=\"owl:equivalentClass\" href=\"%s\"/>\n" % c.id
else:
markup = markup + "<link property=\"owl:equivalentClass\" resource=\"%s\"/>\n" % c.id
return markup
if (node.isAttribute()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentProperty"), node, layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
markup = markup + "<link property=\"owl:equivalentProperty\" href=\"%s\"/>\n" % c.id
return markup
return "<!-- no external mappings noted for this term. -->"
class ShowUnit (webapp2.RequestHandler):
"""ShowUnit exposes schema.org terms via Web RequestHandler
(HTML/HTTP etc.).
"""
# def __init__(self):
# self.outputStrings = []
def emitCacheHeaders(self):
"""Send cache-related headers via HTTP."""
self.response.headers['Cache-Control'] = "public, max-age=43200" # 12h
self.response.headers['Vary'] = "Accept, Accept-Encoding"
def GetCachedText(self, node, layers='core'):
"""Return page text from node.id cache (if found, otherwise None)."""
global PageCache
cachekey = "%s:%s" % ( layers, node.id ) # was node.id
if (cachekey in PageCache):
return PageCache[cachekey]
else:
return None
def AddCachedText(self, node, textStrings, layers='core'):
"""Cache text of our page for this node via its node.id.
We can be passed a text string or an array of text strings.
"""
global PageCache
cachekey = "%s:%s" % ( layers, node.id ) # was node.id
outputText = "".join(textStrings)
log.debug("CACHING: %s" % node.id)
PageCache[cachekey] = outputText
return outputText
def write(self, str):
"""Write some text to Web server's output stream."""
self.outputStrings.append(str)
def moreInfoBlock(self, node, layer='core'):
# if we think we have more info on this term, show a bulleted list of extra items.
# defaults
bugs = ["No known open issues."]
mappings = ["No recorded schema mappings."]
items = bugs + mappings
nodetype="Misc"
if node.isEnumeration():
nodetype = "enumeration"
elif node.isDataType(layers=layer):
nodetype = "datatype"
elif node.isClass(layers=layer):
nodetype = "type"
elif node.isAttribute(layers=layer):
nodetype = "property"
elif node.isEnumerationValue(layers=layer):
nodetype = "enumeratedvalue"
feedback_url = FEEDBACK_FORM_BASE_URL.format("http://schema.org/{0}".format(node.id), nodetype)
items = [
"<a href='{0}'>Leave public feedback on this term 💬</a>".format(feedback_url),
"<a href='https://github.com/schemaorg/schemaorg/issues?q=is%3Aissue+is%3Aopen+{0}'>Check for open issues.</a>".format(node.id)
]
for l in all_terms[node.id]:
l = l.replace("#","")
if l == "core":
ext = ""
else:
ext = "extension "
if ENABLE_HOSTED_EXTENSIONS:
items.append("'{0}' is mentioned in {1}layer: <a href='{2}'>{3}</a>".format( node.id, ext, makeUrl(l,node.id), l ))
moreinfo = """<div>
<div id='infobox' style='text-align: right;'><label role="checkbox" for=morecheck><b><span style="cursor: pointer;">[more...]</span></b></label></div>
<input type='checkbox' checked="checked" style='display: none' id=morecheck><div id='infomsg' style='background-color: #EEEEEE; text-align: left; padding: 0.5em;'>
<ul>"""
for i in items:
moreinfo += "<li>%s</li>" % i
# <li>mappings to other terms.</li>
# <li>or links to open issues.</li>
moreinfo += "</ul>\n</div>\n</div>\n"
return moreinfo
def GetParentStack(self, node, layers='core'):
"""Returns a hiearchical structured used for site breadcrumbs."""
thing = Unit.GetUnit("Thing")
if (node not in self.parentStack):
self.parentStack.append(node)
if (Unit.isAttribute(node, layers=layers)):
self.parentStack.append(Unit.GetUnit("Property"))
self.parentStack.append(thing)
sc = Unit.GetUnit("rdfs:subClassOf")
if GetTargets(sc, node, layers=layers):
for p in GetTargets(sc, node, layers=layers):
self.GetParentStack(p, layers=layers)
else:
# Enumerations are classes that have no declared subclasses
sc = Unit.GetUnit("typeOf")
for p in GetTargets(sc, node, layers=layers):
self.GetParentStack(p, layers=layers)
#Put 'Thing' to the end for multiple inheritance classes
if(thing in self.parentStack):
self.parentStack.remove(thing)
self.parentStack.append(thing)
def ml(self, node, label='', title='', prop='', hashorslash='/'):
"""ml ('make link')
Returns an HTML-formatted link to the class or property URL
* label = optional anchor text label for the link
* title = optional title attribute on the link
* prop = an optional property value to apply to the A element
"""
if(node.id == "DataType"): #Special case
return "<a href=\"%s\">%s</a>" % (node.id, node.id)
if label=='':
label = node.id
if title != '':
title = " title=\"%s\"" % (title)
if prop:
prop = " property=\"%s\"" % (prop)
urlprefix = ""
home = node.getHomeLayer()
if home in ENABLED_EXTENSIONS and home != getHostExt():
port = ""
if getHostPort() != "80":
port = ":%s" % getHostPort()
urlprefix = makeUrl(home)
extclass = ""
extflag = ""
tooltip = ""
if home != "core" and home != "":
extclass = "class=\"ext ext-%s\" " % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Extended schema: %s.schema.org\" " % home
return "<a %s %s href=\"%s%s%s\"%s%s>%s</a>%s" % (tooltip, extclass, urlprefix, hashorslash, node.id, prop, title, label, extflag)
def makeLinksFromArray(self, nodearray, tooltip=''):
"""Make a comma separate list of links via ml() function.
* tooltip - optional text to use as title of all links
"""
hyperlinks = []
for f in nodearray:
hyperlinks.append(self.ml(f, f.id, tooltip))
return (", ".join(hyperlinks))
def emitUnitHeaders(self, node, layers='core'):
"""Write out the HTML page headers for this node."""
self.write("<h1 class=\"page-title\">\n")
self.write(node.id)
self.write("</h1>")
home = node.home
if home != "core" and home != "":
self.write("Defined in the %s.schema.org extension." % home)
self.write(" (This is an initial exploratory release.)<br/>")
self.emitCanonicalURL(node)
self.BreadCrumbs(node, layers=layers)
comment = GetComment(node, layers)
self.write(" <div property=\"rdfs:comment\">%s</div>\n\n" % (comment) + "\n")
self.write(" <br/><div>Usage: %s</div>\n\n" % (node.UsageStr()) + "\n")
#was: self.write(self.moreInfoBlock(node))
if (node.isClass(layers=layers) and not node.isDataType(layers=layers) and node.id != "DataType"):
self.write("<table class=\"definition-table\">\n <thead>\n <tr><th>Property</th><th>Expected Type</th><th>Description</th> \n </tr>\n </thead>\n\n")
def emitCanonicalURL(self,node):
cURL = "http://schema.org/" + node.id
self.write(" <span class=\"canonicalUrl\">Canonical URL: <a href=\"%s\">%s</a></span>" % (cURL, cURL))
# Stacks to support multiple inheritance
crumbStacks = []
def BreadCrumbs(self, node, layers):
self.crumbStacks = []
cstack = []
self.crumbStacks.append(cstack)
self.WalkCrumbs(node,cstack,layers=layers)
if (node.isAttribute(layers=layers)):
cstack.append(Unit.GetUnit("Property"))
cstack.append(Unit.GetUnit("Thing"))
enuma = node.isEnumerationValue(layers=layers)
crumbsout = []
for row in range(len(self.crumbStacks)):
thisrow = ""
if(":" in self.crumbStacks[row][len(self.crumbStacks[row])-1].id):
continue
count = 0
while(len(self.crumbStacks[row]) > 0):
n = self.crumbStacks[row].pop()
if(count > 0):
if((len(self.crumbStacks[row]) == 0) and enuma):
thisrow += " :: "
else:
thisrow += " > "
elif n.id == "Class": # If Class is first breadcrum suppress it
continue
count += 1
thisrow += "%s" % (self.ml(n))
crumbsout.append(thisrow)
self.write("<h4>")
rowcount = 0
for crumb in sorted(crumbsout):
if rowcount > 0:
self.write("<br/>")
self.write("<span class='breadcrumbs'>%s</span>\n" % crumb)
rowcount += 1
self.write("</h4>\n")
#Walk up the stack, appending crumbs & create new (duplicating crumbs already identified) if more than one parent found
def WalkCrumbs(self, node, cstack, layers):
if "http://" in node.id or "https://" in node.id: #Suppress external class references
return
cstack.append(node)
tmpStacks = []
tmpStacks.append(cstack)
subs = []
if(node.isDataType(layers=layers)):
subs = GetTargets(Unit.GetUnit("typeOf"), node, layers=layers)
subs += GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layers)
elif node.isClass(layers=layers):
subs = GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layers)
elif(node.isAttribute(layers=layers)):
subs = GetTargets(Unit.GetUnit("rdfs:subPropertyOf"), node, layers=layers)
else:
subs = GetTargets(Unit.GetUnit("typeOf"), node, layers=layers)# Enumerations are classes that have no declared subclasses
for i in range(len(subs)):
if(i > 0):
t = cstack[:]
tmpStacks.append(t)
self.crumbStacks.append(t)
x = 0
for p in subs:
self.WalkCrumbs(p,tmpStacks[x],layers=layers)
x += 1
def emitSimplePropertiesPerType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties applicable to the specified type."""
if not out:
out = self
out.write("<ul class='props4type'>")
for prop in sorted(GetSources( Unit.GetUnit("domainIncludes"), cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.id, prop.id ))
out.write("</ul>\n\n")
def emitSimplePropertiesIntoType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties whose values are the specified type."""
if not out:
out = self
out.write("<ul class='props2type'>")
for prop in sorted(GetSources( Unit.GetUnit("rangeIncludes"), cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.id, prop.id ))
out.write("</ul>\n\n")
def ClassProperties (self, cl, subclass=False, layers="core", out=None, hashorslash="/"):
"""Write out a table of properties for a per-type page."""
if not out:
out = self
propcount = 0
headerPrinted = False
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
for prop in sorted(GetSources(di, cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
supersedes = prop.supersedes(layers=layers)
olderprops = prop.supersedes_all(layers=layers)
inverseprop = prop.inverseproperty(layers=layers)
subprops = prop.subproperties(layers=layers)
superprops = prop.superproperties(layers=layers)
ranges = GetTargets(ri, prop, layers=layers)
comment = GetComment(prop, layers=layers)
if (not headerPrinted):
class_head = self.ml(cl)
if subclass:
class_head = self.ml(cl, prop="rdfs:subClassOf")
out.write("<tr class=\"supertype\">\n <th class=\"supertype-name\" colspan=\"3\">Properties from %s</th>\n \n</tr>\n\n<tbody class=\"supertype\">\n " % (class_head))
headerPrinted = True
out.write("<tr typeof=\"rdfs:Property\" resource=\"http://schema.org/%s\">\n \n <th class=\"prop-nam\" scope=\"row\">\n\n<code property=\"rdfs:label\">%s</code>\n </th>\n " % (prop.id, self.ml(prop)))
out.write("<td class=\"prop-ect\">\n")
first_range = True
for r in ranges:
if (not first_range):
out.write(" or <br/> ")
first_range = False
out.write(self.ml(r, prop='rangeIncludes'))
out.write(" ")
out.write("</td>")
out.write("<td class=\"prop-desc\" property=\"rdfs:comment\">%s" % (comment))
if (len(olderprops) > 0):
olderlinks = ", ".join([self.ml(o) for o in olderprops])
out.write(" Supersedes %s." % olderlinks )
if (inverseprop != None):
out.write("<br/> Inverse property: %s." % (self.ml(inverseprop)))
out.write("</td></tr>")
subclass = False
propcount += 1
if subclass: # in case the superclass has no defined attributes
out.write("<tr><td colspan=\"3\"><meta property=\"rdfs:subClassOf\" content=\"%s\"></td></tr>" % (cl.id))
return propcount
def emitClassExtensionSuperclasses (self, cl, layers="core", out=None):
first = True
count = 0
if not out:
out = self
buff = StringIO.StringIO()
sc = Unit.GetUnit("rdfs:subClassOf")
for p in GetTargets(sc, cl, ALL_LAYERS):
if inLayer(layers,p):
continue
if p.id == "http://www.w3.org/2000/01/rdf-schema#Class": #Special case for "DataType"
p.id = "Class"
sep = ", "
if first:
sep = "<li>"
first = False
buff.write("%s%s" % (sep,self.ml(p)))
count += 1
if(count > 0):
buff.write("</li>\n")
content = buff.getvalue()
if(len(content) > 0):
if cl.id == "DataType":
self.write("<h4>Subclass of:<h4>")
else:
self.write("<h4>Available supertypes defined in extensions</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
def emitClassExtensionProperties (self, cl, layers="core", out=None):
if not out:
out = self
buff = StringIO.StringIO()
for p in self.parentStack:
self._ClassExtensionProperties(buff, p, layers=layers)
content = buff.getvalue()
if(len(content) > 0):
self.write("<h4>Available properties in extensions</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
def _ClassExtensionProperties (self, out, cl, layers="core"):
"""Write out a list of properties not displayed as they are in extensions for a per-type page."""
di = Unit.GetUnit("domainIncludes")
first = True
count = 0
for prop in sorted(GetSources(di, cl, ALL_LAYERS), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
if inLayer(layers,prop):
continue
log.debug("ClassExtensionfFound %s " % (prop))
sep = ", "
if first:
out.write("<li>From %s: " % cl)
sep = ""
first = False
out.write("%s%s" % (sep,self.ml(prop)))
count += 1
if(count > 0):
out.write("</li>\n")
def emitClassIncomingProperties (self, cl, layers="core", out=None, hashorslash="/"):
"""Write out a table of incoming properties for a per-type page."""
if not out:
out = self
headerPrinted = False
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
for prop in sorted(GetSources(ri, cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
supersedes = prop.supersedes(layers=layers)
inverseprop = prop.inverseproperty(layers=layers)
subprops = prop.subproperties(layers=layers)
superprops = prop.superproperties(layers=layers)
ranges = GetTargets(di, prop, layers=layers)
comment = GetComment(prop, layers=layers)
if (not headerPrinted):
self.write("<br/><br/>Instances of %s may appear as values for the following properties<br/>" % (self.ml(cl)))
self.write("<table class=\"definition-table\">\n \n \n<thead>\n <tr><th>Property</th><th>On Types</th><th>Description</th> \n </tr>\n</thead>\n\n")
headerPrinted = True
self.write("<tr>\n<th class=\"prop-nam\" scope=\"row\">\n <code>%s</code>\n</th>\n " % (self.ml(prop)) + "\n")
self.write("<td class=\"prop-ect\">\n")
first_range = True
for r in ranges:
if (not first_range):
self.write(" or<br/> ")
first_range = False
self.write(self.ml(r))
self.write(" ")
self.write("</td>")
self.write("<td class=\"prop-desc\">%s " % (comment))
if (supersedes != None):
self.write(" Supersedes %s." % (self.ml(supersedes)))
if (inverseprop != None):
self.write("<br/> inverse property: %s." % (self.ml(inverseprop)) )
self.write("</td></tr>")
if (headerPrinted):
self.write("</table>\n")
def emitRangeTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of this property's expected types."""
if not out:
out = self
out.write("<ul class='attrrangesummary'>")
for rt in sorted(GetTargets(Unit.GetUnit("rangeIncludes"), node, layers=layers), key=lambda u: u.id):
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, rt.id, rt.id ))
out.write("</ul>\n\n")
def emitDomainTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of types that expect this property."""
if not out:
out = self
out.write("<ul class='attrdomainsummary'>")
for dt in sorted(GetTargets(Unit.GetUnit("domainIncludes"), node, layers=layers), key=lambda u: u.id):
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, dt.id, dt.id ))
out.write("</ul>\n\n")
def emitAttributeProperties(self, node, layers="core", out=None, hashorslash="/"):
"""Write out properties of this property, for a per-property page."""
if not out:
out = self
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
ranges = sorted(GetTargets(ri, node, layers=layers), key=lambda u: u.id)
domains = sorted(GetTargets(di, node, layers=layers), key=lambda u: u.id)
first_range = True
newerprop = node.supersededBy(layers=layers) # None of one. e.g. we're on 'seller'(new) page, we get 'vendor'(old)
olderprop = node.supersedes(layers=layers) # None or one
olderprops = node.supersedes_all(layers=layers) # list, e.g. 'seller' has 'vendor', 'merchant'.
inverseprop = node.inverseproperty(layers=layers)
subprops = node.subproperties(layers=layers)
superprops = node.superproperties(layers=layers)
if (inverseprop != None):
tt = "This means the same thing, but with the relationship direction reversed."
out.write("<p>Inverse-property: %s.</p>" % (self.ml(inverseprop, inverseprop.id,tt, prop=False, hashorslash=hashorslash)) )
out.write("<table class=\"definition-table\">\n")
out.write("<thead>\n <tr>\n <th>Values expected to be one of these types</th>\n </tr>\n</thead>\n\n <tr>\n <td>\n ")
for r in ranges:
if (not first_range):
out.write("<br/>")
first_range = False
tt = "The '%s' property has values that include instances of the '%s' type." % (node.id, r.id)
out.write(" <code>%s</code> " % (self.ml(r, r.id, tt, prop="rangeIncludes", hashorslash=hashorslash) +"\n"))
out.write(" </td>\n </tr>\n</table>\n\n")
first_domain = True
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Used on these types</th>\n </tr>\n</thead>\n<tr>\n <td>")
for d in domains:
if (not first_domain):
out.write("<br/>")
first_domain = False
tt = "The '%s' property is used on the '%s' type." % (node.id, d.id)
out.write("\n <code>%s</code> " % (self.ml(d, d.id, tt, prop="domainIncludes",hashorslash=hashorslash)+"\n" ))
out.write(" </td>\n </tr>\n</table>\n\n")
if (subprops != None and len(subprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Sub-properties</th>\n </tr>\n</thead>\n")
for sbp in subprops:
c = GetComment(sbp,layers=layers)
tt = "%s: ''%s''" % ( sbp.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(sbp, sbp.id, tt, hashorslash=hashorslash)))
out.write("\n</table>\n\n")
# Super-properties
if (superprops != None and len(superprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Super-properties</th>\n </tr>\n</thead>\n")
for spp in superprops:
c = GetComment(spp, layers=layers) # markup needs to be stripped from c, e.g. see 'logo', 'photo'
c = re.sub(r'<[^>]*>', '', c) # This is not a sanitizer, we trust our input.
tt = "%s: ''%s''" % ( spp.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(spp, spp.id, tt,hashorslash)))
out.write("\n</table>\n\n")
# Supersedes
if (olderprops != None and len(olderprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Supersedes</th>\n </tr>\n</thead>\n")
for o in olderprops:
c = GetComment(o, layers=layers)
tt = "%s: ''%s''" % ( o.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(o, o.id, tt, hashorslash)))
out.write("\n</table>\n\n")
# supersededBy (at most one direct successor)
if (newerprop != None):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th><a href=\"/supersededBy\">supersededBy</a></th>\n </tr>\n</thead>\n")
tt="supersededBy: %s" % newerprop.id
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(newerprop, newerprop.id, tt,hashorslash)))
out.write("\n</table>\n\n")
def rep(self, markup):
"""Replace < and > with HTML escape chars."""
m1 = re.sub("<", "<", markup)
m2 = re.sub(">", ">", m1)
# TODO: Ampersand? Check usage with examples.
return m2
def handleHomepage(self, node):
"""Send the homepage, or if no HTML accept header received and JSON-LD was requested, send JSON-LD context file.
typical browser accept list: ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
# e.g. curl -H "Accept: application/ld+json" http://localhost:8080/
see also http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
https://github.com/rvguha/schemaorg/issues/5
https://github.com/rvguha/schemaorg/wiki/JsonLd
"""
accept_header = self.request.headers.get('Accept').split(',')
logging.info("accepts: %s" % self.request.headers.get('Accept'))
if ENABLE_JSONLD_CONTEXT:
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
# Homepage is content-negotiated. HTML or JSON-LD.
mimereq = {}
for ah in accept_header:
ah = re.sub( r";q=\d?\.\d+", '', ah).rstrip()
mimereq[ah] = 1
html_score = mimereq.get('text/html', 5)
xhtml_score = mimereq.get('application/xhtml+xml', 5)
jsonld_score = mimereq.get('application/ld+json', 10)
# print "accept_header: " + str(accept_header) + " mimereq: "+str(mimereq) + "Scores H:{0} XH:{1} J:{2} ".format(html_score,xhtml_score,jsonld_score)
if (ENABLE_JSONLD_CONTEXT and (jsonld_score < html_score and jsonld_score < xhtml_score)):
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
else:
# Serve a homepage from template
# the .tpl has responsibility for extension homepages
# TODO: pass in extension, base_domain etc.
sitekeyedhomepage = "homepage %s" % getSiteName()
hp = DataCache.get(sitekeyedhomepage)
if hp != None:
self.response.out.write( hp )
#log.info("Served datacache homepage.tpl key: %s" % sitekeyedhomepage)
log.debug("Served datacache homepage.tpl key: %s" % sitekeyedhomepage)
else:
template = JINJA_ENVIRONMENT.get_template('homepage.tpl')
template_values = {
'ENABLE_HOSTED_EXTENSIONS': ENABLE_HOSTED_EXTENSIONS,
'SCHEMA_VERSION': SCHEMA_VERSION,
'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'myhost': getHost(),
'myport': getHostPort(),
'mybasehost': getBaseHost(),
'host_ext': getHostExt(),
'ext_contents': self.handleExtensionContents(getHostExt()),
'home_page': "True",
'debugging': getAppVar('debugging')
}
# We don't want JINJA2 doing any cachine of included sub-templates.
page = template.render(template_values)
self.response.out.write( page )
log.debug("Served and cached fresh homepage.tpl key: %s " % sitekeyedhomepage)
#log.info("Served and cached fresh homepage.tpl key: %s " % sitekeyedhomepage)
DataCache.put(sitekeyedhomepage, page)
# self.response.out.write( open("static/index.html", 'r').read() )
return True
log.info("Warning: got here how?")
return False
def getExtendedSiteName(self, layers):
"""Returns site name (domain name), informed by the list of active layers."""
if layers==["core"]:
return "schema.org"
if len(layers)==0:
return "schema.org"
return (getHostExt() + ".schema.org")
def emitSchemaorgHeaders(self, node, is_class=False, ext_mappings='', sitemode="default", sitename="schema.org", layers="core"):
"""
Generates, caches and emits HTML headers for class, property and enumeration pages. Leaves <body> open.
* entry = name of the class or property
"""
anode = True
if isinstance(node, str):
entry = node
anode = False
else:
entry = node.id
rdfs_type = 'rdfs:Property'
if is_class:
rdfs_type = 'rdfs:Class'
generated_page_id = "genericTermPageHeader-%s-%s" % ( str(entry), getSiteName() )
gtp = DataCache.get( generated_page_id )
if gtp != None:
self.response.out.write( gtp )
log.debug("Served recycled genericTermPageHeader.tpl for %s" % generated_page_id )
else:
desc = entry
if anode:
desc = self.getMetaDescription(node, layers=layers, lengthHint=200)
template = JINJA_ENVIRONMENT.get_template('genericTermPageHeader.tpl')
template_values = {
'entry': str(entry),
'desc' : desc,
'sitemode': sitemode,
'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'menu_sel': "Schemas",
'rdfs_type': rdfs_type,
'ext_mappings': ext_mappings
}
out = template.render(template_values)
DataCache.put(generated_page_id,out)
log.debug("Served and cached fresh genericTermPageHeader.tpl for %s" % generated_page_id )
self.response.write(out)
def getMetaDescription(self, node, layers="core",lengthHint=250):
ins = ""
if node.isEnumeration():
ins += " Enumeration Type"
elif node.isClass():
ins += " Type"
elif node.isAttribute():
ins += " Property"
elif node.isEnumerationValue():
ins += " Enumeration Value"
desc = "Schema.org%s: %s - " % (ins, node.id)
lengthHint -= len(desc)
comment = GetComment(node, layers)
desc += ShortenOnSentence(StripHtmlTags(comment),lengthHint)
return desc
def emitExactTermPage(self, node, layers="core"):
"""Emit a Web page that exactly matches this node."""
log.debug("EXACT PAGE: %s" % node.id)
self.outputStrings = [] # blank slate
ext_mappings = GetExtMappingsRDFa(node, layers=layers)
global sitemode #,sitename
if ("schema.org" not in self.request.host and sitemode == "mainsite"):
sitemode = "mainsite testsite"
self.emitSchemaorgHeaders(node, node.isClass(), ext_mappings, sitemode, getSiteName(), layers)
if ( ENABLE_HOSTED_EXTENSIONS and ("core" not in layers or len(layers)>1) ):
ll = " ".join(layers).replace("core","")
target=""
if inLayer("core", node):
target = node.id
s = "<p id='lli' class='layerinfo %s'><a href=\"https://github.com/schemaorg/schemaorg/wiki/ExtensionList\">extension shown</a>: %s [<a href='%s'>x</a>]</p>\n" % (ll, ll, makeUrl("",target))
self.write(s)
cached = self.GetCachedText(node, layers)
if (cached != None):
self.response.write(cached)
return
self.parentStack = []
self.GetParentStack(node, layers=layers)
self.emitUnitHeaders(node, layers=layers) # writes <h1><table>...
if (node.isEnumerationValue(layers=layers)):
self.write(self.moreInfoBlock(node))
if (node.isClass(layers=layers)):
subclass = True
self.write(self.moreInfoBlock(node))
for p in self.parentStack:
self.ClassProperties(p, p==self.parentStack[0], layers=layers)
if (not node.isDataType(layers=layers) and node.id != "DataType"):
self.write("\n\n</table>\n\n")
self.emitClassIncomingProperties(node, layers=layers)
self.emitClassExtensionSuperclasses(node,layers)
self.emitClassExtensionProperties(p,layers)
elif (Unit.isAttribute(node, layers=layers)):
self.emitAttributeProperties(node, layers=layers)
self.write(self.moreInfoBlock(node))
if (node.isClass(layers=layers)):
children = []
children = GetSources(Unit.GetUnit("rdfs:subClassOf"), node, ALL_LAYERS)# Normal subclasses
if(node.isDataType() or node.id == "DataType"):
children += GetSources(Unit.GetUnit("typeOf"), node, ALL_LAYERS)# Datatypes
children = sorted(children, key=lambda u: u.id)
if (len(children) > 0):
buff = StringIO.StringIO()
extbuff = StringIO.StringIO()
firstext=True
for c in children:
if inLayer(layers, c):
buff.write("<li> %s </li>" % (self.ml(c)))
else:
sep = ", "
if firstext:
sep = ""
firstext=False
extbuff.write("%s%s" % (sep,self.ml(c)) )
if (len(buff.getvalue()) > 0):
if node.isDataType():
self.write("<br/><b>More specific DataTypes</b><ul>")
else:
self.write("<br/><b>More specific Types</b><ul>")
self.write(buff.getvalue())
self.write("</ul>")
if (len(extbuff.getvalue()) > 0):
self.write("<h4>More specific Types available in extensions</h4><ul><li>")
self.write(extbuff.getvalue())
self.write("</li></ul>")
buff.close()
extbuff.close()
if (node.isEnumeration(layers=layers)):
self.write(self.moreInfoBlock(node))
children = sorted(GetSources(Unit.GetUnit("typeOf"), node, ALL_LAYERS), key=lambda u: u.id)
if (len(children) > 0):
buff = StringIO.StringIO()
extbuff = StringIO.StringIO()
firstext=True
for c in children:
if inLayer(layers, c):
buff.write("<li> %s </li>" % (self.ml(c)))
else:
sep = ","
if firstext:
sep = ""
firstext=False
extbuff.write("%s%s" % (sep,self.ml(c)) )
if (len(buff.getvalue()) > 0):
self.write("<br/><br/><b>Enumeration members</b><ul>")
self.write(buff.getvalue())
self.write("</ul>")
if (len(extbuff.getvalue()) > 0):
self.write("<h4>Enumeration members available in extensions</h4><ul><li>")
self.write(extbuff.getvalue())
self.write("</li></ul>")
buff.close()
extbuff.close()
ackorgs = GetTargets(Unit.GetUnit("dc:source"), node, layers=layers)
if (len(ackorgs) > 0):
self.write("<h4 id=\"acks\">Acknowledgements</h4>\n")
for ao in ackorgs:
acks = sorted(GetTargets(Unit.GetUnit("rdfs:comment"), ao, layers))
for ack in acks:
self.write(str(ack+"<br/>"))
examples = GetExamples(node, layers=layers)
log.debug("Rendering n=%s examples" % len(examples))
if (len(examples) > 0):
example_labels = [
('Without Markup', 'original_html', 'selected'),
('Microdata', 'microdata', ''),
('RDFa', 'rdfa', ''),
('JSON-LD', 'jsonld', ''),
]
self.write("<br/><br/><b><a id=\"examples\">Examples</a></b><br/><br/>\n\n")
for ex in examples:
if "id" in ex.egmeta:
self.write('<span id="%s"></span>' % ex.egmeta["id"])
self.write("<div class='ds-selector-tabs ds-selector'>\n")
self.write(" <div class='selectors'>\n")
for label, example_type, selected in example_labels:
self.write(" <a data-selects='%s' class='%s'>%s</a>\n"
% (example_type, selected, label))
self.write("</div>\n\n")
for label, example_type, selected in example_labels:
self.write("<pre class=\"prettyprint lang-html linenums %s %s\">%s</pre>\n\n"
% (example_type, selected, self.rep(ex.get(example_type))))
self.write("</div>\n\n")
self.write("<p class=\"version\"><b>Schema Version %s</b></p>\n\n" % SCHEMA_VERSION)
# TODO: add some version info regarding the extension
# Analytics
self.write("""<script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-52672119-1', 'auto');ga('send', 'pageview');</script>""")
self.write(" \n\n</div>\n</body>\n</html>")
self.response.write(self.AddCachedText(node, self.outputStrings, layers))
def emitHTTPHeaders(self, node):
if ENABLE_CORS:
self.response.headers.add_header("Access-Control-Allow-Origin", "*") # entire site is public.
# see http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
def setupExtensionLayerlist(self, node):
# Identify which extension layer(s) are requested
# TODO: add subdomain support e.g. bib.schema.org/Globe
# instead of Globe?ext=bib which is more for debugging.
# 1. get a comma list from ?ext=foo,bar URL notation
extlist = cleanPath( self.request.get("ext") )# for debugging
extlist = re.sub(ext_re, '', extlist).split(',')
log.debug("?ext= extension list: %s " % ", ".join(extlist))
# 2. Ignore ?ext=, start with 'core' only.
layerlist = [ "core"]
# 3. Use host_ext if set, e.g. 'bib' from bib.schema.org
if getHostExt() != None:
log.debug("Host: %s host_ext: %s" % ( self.request.host , getHostExt() ) )
extlist.append(getHostExt())
# Report domain-requested extensions
for x in extlist:
log.debug("Ext filter found: %s" % str(x))
if x in ["core", "localhost", ""]:
continue
layerlist.append("%s" % str(x))
layerlist = list(set(layerlist)) # dedup
log.debug("layerlist: %s" % layerlist)
return layerlist
def handleJSONContext(self, node):
"""Handle JSON-LD Context non-homepage requests (including refuse if not enabled)."""
if not ENABLE_JSONLD_CONTEXT:
self.error(404)
self.response.out.write('<title>404 Not Found.</title><a href="/">404 Not Found (JSON-LD Context not enabled.)</a><br/><br/>')
return True
if (node=="docs/jsonldcontext.json.txt"):
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
self.response.headers['Content-Type'] = "text/plain"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
if (node=="docs/jsonldcontext.json"):
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
return False
# see also handleHomepage for conneg'd version.
def handleSchemasPage(self, node, layerlist='core'):
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
if DataCache.get('SchemasPage'):
self.response.out.write( DataCache.get('SchemasPage') )
log.debug("Serving recycled SchemasPage.")
return True
else:
extensions = []
for ex in sorted(ENABLED_EXTENSIONS):
extensions.append("<a href=\"%s\">%s.schema.org</a>" % (makeUrl(ex,""),ex))
template = JINJA_ENVIRONMENT.get_template('schemas.tpl')
page = template.render({'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'counts': self.getCounts(),
'extensions': extensions,
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh SchemasPage.")
DataCache.put("SchemasPage",page)
return True
def getCounts(self):
text = ""
text += "The core vocabulary currently consists of %s Types, " % len(GetAllTypes("core"))
text += " %s Properties, " % len(GetAllProperties("core"))
text += "and %s Enumeration values." % len(GetAllEnumerationValues("core"))
return text
def handleFullHierarchyPage(self, node, layerlist='core'):
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
if DataCache.get('FullTreePage'):
self.response.out.write( DataCache.get('FullTreePage') )
log.debug("Serving recycled FullTreePage.")
return True
else:
template = JINJA_ENVIRONMENT.get_template('full.tpl')
extlist=""
extonlylist=[]
count=0
for i in layerlist:
if i != "core":
sep = ""
if count > 0:
sep = ", "
extlist += "'%s'%s" % (i, sep)
extonlylist.append(i)
count += 1
local_button = ""
local_label = "<h3>Core plus %s extension vocabularies</h3>" % extlist
if count == 0:
local_button = "Core vocabulary"
elif count == 1:
local_button = "Core plus %s extension" % extlist
else:
local_button = "Core plus %s extensions" % extlist
ext_button = ""
if count == 1:
ext_button = "Extension %s" % extlist
elif count > 1:
ext_button = "Extensions %s" % extlist
uThing = Unit.GetUnit("Thing")
uDataType = Unit.GetUnit("DataType")
mainroot = TypeHierarchyTree(local_label)
mainroot.traverseForHTML(uThing, layers=layerlist)
thing_tree = mainroot.toHTML()
#az_enums = GetAllEnumerationValues(layerlist)
#az_enums.sort( key = lambda u: u.id)
#thing_tree += self.listTerms(az_enums,"<br/><strong>Enumeration Values</strong><br/>")
fullmainroot = TypeHierarchyTree("<h3>Core plus all extension vocabularies</h3>")
fullmainroot.traverseForHTML(uThing, layers=ALL_LAYERS)
full_thing_tree = fullmainroot.toHTML()
#az_enums = GetAllEnumerationValues(ALL_LAYERS)
#az_enums.sort( key = lambda u: u.id)
#full_thing_tree += self.listTerms(az_enums,"<br/><strong>Enumeration Values</strong><br/>")
ext_thing_tree = None
if len(extonlylist) > 0:
extroot = TypeHierarchyTree("<h3>Extension: %s</h3>" % extlist)
extroot.traverseForHTML(uThing, layers=extonlylist)
ext_thing_tree = extroot.toHTML()
#az_enums = GetAllEnumerationValues(extonlylist)
#az_enums.sort( key = lambda u: u.id)
#ext_thing_tree += self.listTerms(az_enums,"<br/><strong>Enumeration Values</strong><br/>")
dtroot = TypeHierarchyTree("<h4>Data Types</h4>")
dtroot.traverseForHTML(uDataType, layers=layerlist)
datatype_tree = dtroot.toHTML()
full_button = "Core plus all extensions"
page = template.render({ 'thing_tree': thing_tree,
'full_thing_tree': full_thing_tree,
'ext_thing_tree': ext_thing_tree,
'datatype_tree': datatype_tree,
'local_button': local_button,
'full_button': full_button,
'ext_button': ext_button,
'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh FullTreePage.")
DataCache.put("FullTreePage",page)
return True
def handleJSONSchemaTree(self, node, layerlist='core'):
"""Handle a request for a JSON-LD tree representation of the schemas (RDFS-based)."""
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
if DataCache.get('JSONLDThingTree'):
self.response.out.write( DataCache.get('JSONLDThingTree') )
log.debug("Serving recycled JSONLDThingTree.")
return True
else:
uThing = Unit.GetUnit("Thing")
mainroot = TypeHierarchyTree()
mainroot.traverseForJSONLD(Unit.GetUnit("Thing"), layers=layerlist)
thing_tree = mainroot.toJSON()
self.response.out.write( thing_tree )
log.debug("Serving fresh JSONLDThingTree.")
DataCache.put("JSONLDThingTree",thing_tree)
return True
return False
def handleExactTermPage(self, node, layers='core'):
"""Handle with requests for specific terms like /Person, /fooBar. """
#self.outputStrings = [] # blank slate
schema_node = Unit.GetUnit(node) # e.g. "Person", "CreativeWork".
log.debug("Layers: %s",layers)
if inLayer(layers, schema_node):
self.emitExactTermPage(schema_node, layers=layers)
return True
else:
# log.info("Looking for node: %s in layers: %s" % (node.id, ",".join(all_layers.keys() )) )
if not ENABLE_HOSTED_EXTENSIONS:
return False
if schema_node is not None and schema_node.id in all_terms:# look for it in other layers
log.debug("TODO: layer toc: %s" % all_terms[schema_node.id] )
# self.response.out.write("Layers should be listed here. %s " % all_terms[node.id] )
extensions = []
for x in all_terms[schema_node.id]:
x = x.replace("#","")
ext = {}
ext['href'] = makeUrl(x,schema_node.id)
ext['text'] = x
extensions.append(ext)
#self.response.out.write("<li><a href='%s'>%s</a></li>" % (makeUrl(x,schema_node.id), x) )
template = JINJA_ENVIRONMENT.get_template('wrongExt.tpl')
page = template.render({ 'target': schema_node.id,
'extensions': extensions,
'sitename': "schema.org",
'staticPath': makeUrl("","")})
self.response.out.write( page )
log.debug("Serving fresh wrongExtPage.")
return True
return False
def handle404Failure(self, node, layers="core"):
self.error(404)
self.emitSchemaorgHeaders("404 Missing")
self.response.out.write('<h3>404 Not Found.</h3><p><br/>Page not found. Please <a href="/">try the homepage.</a><br/><br/></p>')
clean_node = cleanPath(node)
log.debug("404: clean_node: clean_node: %s node: %s" % (clean_node, node))
base_term = Unit.GetUnit( node.rsplit('/')[0] )
if base_term != None :
self.response.out.write('<div>Perhaps you meant: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_term.id, base_term.id ))
base_actionprop = Unit.GetUnit( node.rsplit('-')[0] )
if base_actionprop != None :
self.response.out.write('<div>Looking for an <a href="/Action">Action</a>-related property? Note that xyz-input and xyz-output have <a href="/docs/actions.html">special meaning</a>. See also: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_actionprop.id, base_actionprop.id ))
return True
def handleJSONSchemaTree(self, node, layerlist='core'):
"""Handle a request for a JSON-LD tree representation of the schemas (RDFS-based)."""
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
if DataCache.get('JSONLDThingTree'):
self.response.out.write( DataCache.get('JSONLDThingTree') )
log.debug("Serving recycled JSONLDThingTree.")
return True
else:
uThing = Unit.GetUnit("Thing")
mainroot = TypeHierarchyTree()
mainroot.traverseForJSONLD(Unit.GetUnit("Thing"), layers=layerlist)
thing_tree = mainroot.toJSON()
self.response.out.write( thing_tree )
log.debug("Serving fresh JSONLDThingTree.")
DataCache.put("JSONLDThingTree",thing_tree)
return True
return False
# if (node == "version/2.0/" or node == "version/latest/" or "version/" in node) ...
def handleFullReleasePage(self, node, layerlist='core'):
"""Deal with a request for a full release summary page. Lists all terms and their descriptions inline in one long page.
version/latest/ is from current schemas, others will need to be loaded and emitted from stored HTML snapshots (for now)."""
# http://jinja.pocoo.org/docs/dev/templates/
global releaselog
clean_node = cleanPath(node)
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
requested_version = clean_node.rsplit('/')[1]
requested_format = clean_node.rsplit('/')[-1]
if len( clean_node.rsplit('/') ) == 2:
requested_format=""
log.info("Full release page for: node: '%s' cleannode: '%s' requested_version: '%s' requested_format: '%s' l: %s" % (node, clean_node, requested_version, requested_format, len(clean_node.rsplit('/')) ) )
# Full release page for: node: 'version/' cleannode: 'version/' requested_version: '' requested_format: '' l: 2
# /version/
if (clean_node=="version/" or clean_node=="version") and requested_version=="" and requested_format=="":
log.info("Table of contents should be sent instead, then succeed.")
if DataCache.get('tocVersionPage'):
self.response.out.write( DataCache.get('tocVersionPage'))
return True
else:
template = JINJA_ENVIRONMENT.get_template('tocVersionPage.tpl')
page = template.render({ "releases": releaselog.keys(),
"menu_sel": "Schemas",
"sitename": getSiteName(),
'staticPath': makeUrl("","")})
self.response.out.write( page )
log.debug("Serving fresh tocVersionPage.")
DataCache.put("tocVersionPage",page)
return True
if requested_version in releaselog:
log.info("Version '%s' was released on %s. Serving from filesystem." % ( node, releaselog[requested_version] ))
version_rdfa = "data/releases/%s/schema.rdfa" % requested_version
version_allhtml = "data/releases/%s/schema-all.html" % requested_version
version_nt = "data/releases/%s/schema.nt" % requested_version
if requested_format=="":
self.response.out.write( open(version_allhtml, 'r').read() )
return True
# log.info("Skipping filesystem for now.")
if requested_format=="schema.rdfa":
self.response.headers['Content-Type'] = "application/octet-stream" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.html" % requested_version
self.response.out.write( open(version_rdfa, 'r').read() )
return True
if requested_format=="schema.nt":
self.response.headers['Content-Type'] = "application/n-triples" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.nt" % requested_version
self.response.out.write( open(version_nt, 'r').read() )
return True
if requested_format != "":
return False # Turtle, csv etc.
else:
log.info("Unreleased version requested. We only understand requests for latest if unreleased.")
if requested_version != "latest":
return False
log.info("giving up to 404.")
else:
log.info("generating a live view of this latest release.")
if DataCache.get('FullReleasePage'):
self.response.out.write( DataCache.get('FullReleasePage') )
log.debug("Serving recycled FullReleasePage.")
return True
else:
template = JINJA_ENVIRONMENT.get_template('fullReleasePage.tpl')
mainroot = TypeHierarchyTree()
mainroot.traverseForHTML(Unit.GetUnit("Thing"), hashorslash="#term_", layers=layerlist)
thing_tree = mainroot.toHTML()
base_href = "/version/%s/" % requested_version
az_types = GetAllTypes()
az_types.sort( key=lambda u: u.id)
az_type_meta = {}
az_props = GetAllProperties()
az_props.sort( key = lambda u: u.id)
az_prop_meta = {}
#TODO: ClassProperties (self, cl, subclass=False, layers="core", out=None, hashorslash="/"):
# TYPES
for t in az_types:
props4type = HTMLOutput() # properties applicable for a type
props2type = HTMLOutput() # properties that go into a type
self.emitSimplePropertiesPerType(t, out=props4type, hashorslash="#term_" )
self.emitSimplePropertiesIntoType(t, out=props2type, hashorslash="#term_" )
#self.ClassProperties(t, out=typeInfo, hashorslash="#term_" )
tcmt = Markup(GetComment(t))
az_type_meta[t]={}
az_type_meta[t]['comment'] = tcmt
az_type_meta[t]['props4type'] = props4type.toHTML()
az_type_meta[t]['props2type'] = props2type.toHTML()
# PROPERTIES
for pt in az_props:
attrInfo = HTMLOutput()
rangeList = HTMLOutput()
domainList = HTMLOutput()
# self.emitAttributeProperties(pt, out=attrInfo, hashorslash="#term_" )
# self.emitSimpleAttributeProperties(pt, out=rangedomainInfo, hashorslash="#term_" )
self.emitRangeTypesForProperty(pt, out=rangeList, hashorslash="#term_" )
self.emitDomainTypesForProperty(pt, out=domainList, hashorslash="#term_" )
cmt = Markup(GetComment(pt))
az_prop_meta[pt] = {}
az_prop_meta[pt]['comment'] = cmt
az_prop_meta[pt]['attrinfo'] = attrInfo.toHTML()
az_prop_meta[pt]['rangelist'] = rangeList.toHTML()
az_prop_meta[pt]['domainlist'] = domainList.toHTML()
page = template.render({ "base_href": base_href, 'thing_tree': thing_tree,
'liveversion': SCHEMA_VERSION,
'requested_version': requested_version,
'releasedate': releaselog[str(SCHEMA_VERSION)],
'az_props': az_props, 'az_types': az_types,
'az_prop_meta': az_prop_meta, 'az_type_meta': az_type_meta,
'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'menu_sel': "Documentation"})
self.response.out.write( page )
log.debug("Serving fresh FullReleasePage.")
DataCache.put("FullReleasePage",page)
return True
def handleExtensionContents(self,ext):
if not ext in ENABLED_EXTENSIONS:
log.info("cannot list ext %s",ext)
return ""
buff = StringIO.StringIO()
az_types = GetAllTypes(ext)
az_types.sort( key=lambda u: u.id)
az_props = GetAllProperties(ext)
az_props.sort( key = lambda u: u.id)
az_enums = GetAllEnumerationValues(ext)
az_enums.sort( key = lambda u: u.id)
buff.write("<br/><h3>Terms defined or referenced in the '%s' extension.</h3>" % ext)
buff.write(self.listTerms(az_types,"<br/><strong>Types</strong> (%s)<br/>" % len(az_types)))
buff.write(self.listTerms(az_props,"<br/><br/><strong>Properties</strong> (%s)<br/>" % len(az_props)))
buff.write(self.listTerms(az_enums,"<br/><br/><strong>Enumeration values</strong> (%s)<br/>" % len(az_enums)))
ret = buff.getvalue()
buff.close()
return ret
def listTerms(self,terms,prefix=""):
buff = StringIO.StringIO()
if(len(terms) > 0):
buff.write(prefix)
first = True
sep = ""
for term in terms:
if not first:
sep = ", "
else:
first = False
buff.write("%s%s" % (sep,self.ml(term)))
ret = buff.getvalue()
buff.close()
return ret
def setupHostinfo(self, node, test=""):
hostString = test
if test == "":
hostString = self.request.host
scheme = "http" #Defalt for tests
if not getInTestHarness(): #Get the actual scheme from the request
scheme = self.request.scheme
host_ext = re.match( r'([\w\-_]+)[\.:]?', hostString).group(1)
log.info("setupHostinfo: scheme=%s hoststring=%s host_ext?=%s" % (scheme, hostString, str(host_ext) ))
setHttpScheme(scheme)
split = hostString.rsplit(':')
myhost = split[0]
mybasehost = myhost
myport = "80"
if len(split) > 1:
myport = split[1]
if host_ext != None:
# e.g. "bib"
log.debug("HOST: Found %s in %s" % ( host_ext, hostString ))
if host_ext == "www":
# www is special case that cannot be an extension - need to redirect to basehost
mybasehost = mybasehost[4:]
return self.redirectToBase(node)
elif not host_ext in ENABLED_EXTENSIONS:
host_ext = ""
else:
mybasehost = mybasehost[len(host_ext) + 1:]
setHostExt(host_ext)
setBaseHost(mybasehost)
setHostPort(myport)
dcn = host_ext
if dcn == None or dcn == "" or dcn =="core":
dcn = "core"
log.debug("sdoapp.py setting current datacache to: %s " % dcn)
DataCache.setCurrent(dcn)
debugging = False
if "localhost" in hostString or "sdo-phobos.appspot.com" in hostString or FORCEDEBUGGING:
debugging = True
setAppVar('debugging',debugging)
return True
def redirectToBase(self,node=""):
uri = makeUrl("",node)
self.response = webapp2.redirect(uri, True, 301)
log.info("Redirecting [301] to: %s" % uri)
return False
def get(self, node):
"""Get a schema.org site page generated for this node/term.
Web content is written directly via self.response.
CORS enabled all URLs - we assume site entirely public.
See http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
These should give a JSON version of schema.org:
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json.txt
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/
Per-term pages vary for type, property and enumeration.
Last resort is a 404 error if we do not exactly match a term's id.
See also https://webapp-improved.appspot.com/guide/request.html#guide-request
"""
if not self.setupHostinfo(node):
return
self.callCount()
self.emitHTTPHeaders(node)
if (node in silent_skip_list):
return
if ENABLE_HOSTED_EXTENSIONS:
layerlist = self.setupExtensionLayerlist(node) # e.g. ['core', 'bib']
else:
layerlist = ["core"]
setSiteName(self.getExtendedSiteName(layerlist)) # e.g. 'bib.schema.org', 'schema.org'
log.debug("EXT: set sitename to %s " % getSiteName())
if (node in ["", "/"]):
if self.handleHomepage(node):
return
else:
log.info("Error handling homepage: %s" % node)
return
if node in ["docs/jsonldcontext.json.txt", "docs/jsonldcontext.json"]:
if self.handleJSONContext(node):
return
else:
log.info("Error handling JSON-LD context: %s" % node)
return
if (node == "docs/full.html"): # DataCache.getDataCache.get
if self.handleFullHierarchyPage(node, layerlist=layerlist):
return
else:
log.info("Error handling full.html : %s " % node)
return
if (node == "docs/schemas.html"): # DataCache.getDataCache.get
if self.handleSchemasPage(node, layerlist=layerlist):
return
else:
log.info("Error handling schemas.html : %s " % node)
return
if (node == "docs/tree.jsonld" or node == "docs/tree.json"):
if self.handleJSONSchemaTree(node, layerlist=layerlist):
return
else:
log.info("Error handling JSON-LD schema tree: %s " % node)
return
if (node == "version/2.0/" or node == "version/latest/" or "version/" in node):
if self.handleFullReleasePage(node, layerlist=layerlist):
return
else:
log.info("Error handling full release page: %s " % node)
if self.handle404Failure(node):
return
else:
log.info("Error handling 404 under /version/")
return
if(node == "_siteDebug"):
self.siteDebug()
return
# Pages based on request path matching a Unit in the term graph:
if self.handleExactTermPage(node, layers=layerlist):
return
else:
log.info("Error handling exact term page. Assuming a 404: %s" % node)
# Drop through to 404 as default exit.
if self.handle404Failure(node):
return
else:
log.info("Error handling 404.")
return
def siteDebug(self):
global STATS
template = JINJA_ENVIRONMENT.get_template('siteDebug.tpl')
page = template.render({'sitename': getSiteName(),
'staticPath': makeUrl("","")})
self.response.out.write( page )
self.response.out.write("<table style=\"width: 50%; border: solid 1px #CCCCCC; border-collapse: collapse;\"><tbody>\n")
self.writeDebugRow("Setting","Value",True)
self.writeDebugRow("httpScheme",getHttpScheme())
self.writeDebugRow("host_ext",getHostExt())
self.writeDebugRow("basehost",getBaseHost())
self.writeDebugRow("hostport",getHostPort())
self.writeDebugRow("sitename",getSiteName())
self.writeDebugRow("debugging",getAppVar('debugging'))
self.writeDebugRow("intestharness",getInTestHarness())
self.writeDebugRow("Current DataCache",DataCache.getCurrent())
self.writeDebugRow("DataCaches",len(DataCache.keys()))
for c in DataCache.keys():
self.writeDebugRow("DataCache[%s] size" % c, len(DataCache.getCache(c)))
for s in STATS.keys():
self.writeDebugRow("%s" % s, STATS[s])
self.response.out.write("</tbody><table><br/>\n")
self.response.out.write( "</div>\n<body>\n</html>" )
def writeDebugRow(self,term,value,head=False):
rt = "td"
cellStyle = "border: solid 1px #CCCCCC; border-collapse: collapse;"
if head:
rt = "th"
cellStyle += " color: #FFFFFF; background: #888888;"
self.response.out.write("<tr><%s style=\"%s\">%s</%s><%s style=\"%s\">%s</%s></tr>\n" % (rt,cellStyle,term,rt,rt,cellStyle,value,rt))
def callCount(self):
statInc("total calls")
statInc(getHttpScheme() + " calls")
if getHostExt() != "":
statInc(getHostExt() + " calls")
else:
statInc("core calls")
STATS = {}
def statInc(stat):
global STATS
val = 1
if stat in STATS:
val += STATS.get(stat)
STATS[stat] = val
def setInTestHarness(val):
global INTESTHARNESS
INTESTHARNESS = val
def getInTestHarness():
global INTESTHARNESS
return INTESTHARNESS
TestAppIndex = {}
def getAppVar(index):
global TestAppIndex
reg = None
if not getInTestHarness():
app = webapp2.get_app()
reg = app.registry
else:
log.debug("getAppVar(): Using non-threadsafe session variables for test only")
reg = TestAppIndex
return reg.get(index)
def setAppVar(index,val):
global TestAppIndex
reg = None
if not getInTestHarness():
app = webapp2.get_app()
reg = app.registry
else:
log.debug("setAppVar(): Using non-threadsafe session variables for test only")
reg = TestAppIndex
reg[index] = val
def setHttpScheme(val):
setAppVar('httpScheme',val)
def getHttpScheme():
return getAppVar('httpScheme')
def setHostExt(val):
setAppVar('host_ext',val)
def getHostExt():
return getAppVar('host_ext')
def setSiteName(val):
setAppVar('sitename',val)
def getSiteName():
return getAppVar('sitename')
def setHost(val):
setAppVar('myhost',val)
def getHost():
return getAppVar('myhost')
def setBaseHost(val):
setAppVar('mybasehost',val)
def getBaseHost():
return getAppVar('mybasehost')
def setHostPort(val):
setAppVar('myport',val)
def getHostPort():
return getAppVar('myport')
def makeUrl(ext="",path=""):
port = ""
sub = ""
p = ""
if(getHostPort() != "80"):
port = ":%s" % getHostPort()
if ext != "core" and ext != "":
sub = "%s." % ext
if path != "":
if path.startswith("/"):
p = path
else:
p = "/%s" % path
url = "%s://%s%s%s%s" % (getHttpScheme(),sub,getBaseHost(),port,p)
return url
#log.info("STARTING UP... reading schemas.")
read_schemas(loadExtensions=ENABLE_HOSTED_EXTENSIONS)
if ENABLE_HOSTED_EXTENSIONS:
read_extensions(ENABLED_EXTENSIONS)
schemasInitialized = True
app = ndb.toplevel(webapp2.WSGIApplication([("/(.*)", ShowUnit)]))
|
pwz3n0/schemaorg
|
sdoapp.py
|
Python
|
apache-2.0
| 78,319
|
[
"VisIt"
] |
cbbcca0a91579cf303b697b950c414934147218e0abd7a75032ceb4a204fa92e
|
#!/usr/bin/env python
########################################################################
# File : dirac-wms-cpu-normalization
# Author : Ricardo Graciani
########################################################################
"""
Determine Normalization for current CPU
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.registerSwitch( "U", "Update", "Update dirac.cfg with the resulting value" )
Script.registerSwitch( "R:", "Reconfig=", "Update given configuration file with the resulting value" )
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ' % Script.scriptName ] ) )
Script.parseCommandLine( ignoreErrors = True )
update = False
configFile = None
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ( "U", "Update" ):
update = True
elif unprocSw[0] in ( "R", "Reconfig" ):
configFile = unprocSw[1]
if __name__ == "__main__":
from DIRAC.WorkloadManagementSystem.Client.CPUNormalization import getCPUNormalization
result = getCPUNormalization()
if not result['OK']:
DIRAC.gLogger.error( result['Message'] )
norm = int( ( result['Value']['NORM'] + 0.05 ) * 10 ) / 10.
DIRAC.gLogger.notice( 'Normalization for current CPU is %.1f %s' % ( norm, result['Value']['UNIT'] ) )
if update:
DIRAC.gConfig.setOptionValue( '/LocalSite/CPUNormalizationFactor', norm )
DIRAC.gConfig.dumpLocalCFGToFile( DIRAC.gConfig.diracConfigFilePath )
if configFile:
from DIRAC.Core.Utilities.CFG import CFG
cfg = CFG()
try:
# Attempt to open the given file
cfg.loadFromFile( configFile )
except:
pass
# Create the section if it does not exist
if not cfg.existsKey( 'LocalSite' ):
cfg.createNewSection( 'LocalSite' )
cfg.setOption( '/LocalSite/CPUNormalizationFactor', norm )
cfg.writeToFile( configFile )
DIRAC.exit()
|
avedaee/DIRAC
|
WorkloadManagementSystem/scripts/dirac-wms-cpu-normalization.py
|
Python
|
gpl-3.0
| 1,989
|
[
"DIRAC"
] |
4504a8bac34739a9f1c0cc2701d224c3ee4f055c69486f25367c7cefa0eb5794
|
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from struct import unpack
from androguard.decompiler.dad.util import get_type
from androguard.decompiler.dad.opcode_ins import Op
from androguard.decompiler.dad.instruction import (
Constant, ThisParam, BinaryExpression, BaseClass, InstanceExpression,
NewInstance, Variable, BinaryCompExpression)
logger = logging.getLogger('dad.writer')
class Writer(object):
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.ind = 4
self.buffer = []
self.buffer2 = []
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.skip = False
self.need_break = True
def __str__(self):
return ''.join(self.buffer)
def str_ext(self):
return self.buffer2
def inc_ind(self, i=1):
self.ind += (4 * i)
def dec_ind(self, i=1):
self.ind -= (4 * i)
def space(self):
if self.skip:
self.skip = False
return ''
return ' ' * self.ind
def write_ind(self):
if self.skip:
self.skip = False
else:
self.write(self.space())
self.write_ext(('INDENTATION', self.space()))
def write(self, s, data=None):
self.buffer.append(s)
# old method, still used
# TODO: clean?
if data:
self.buffer2.append((data, s))
# at minimum, we have t as a tuple of the form:
# (TYPE_STR, MY_STR) such as ('THIS', 'this')
# where the 2nd field is the actual generated source code
# We can have more fields, for example:
# ('METHOD', 'sendToServer', 'this -> sendToServer', <androguard.decompiler.dad.instruction.ThisParam>)
def write_ext(self, t):
if not isinstance(t, tuple):
raise "Error in write_ext: %s not a tuple" % str(t)
self.buffer2.append(t)
def end_ins(self):
self.write(';\n')
self.write_ext(('END_INSTRUCTION', ';\n'))
def write_ind_visit_end(self, lhs, s, rhs=None, data=None):
self.write_ind()
lhs.visit(self)
self.write(s)
self.write_ext(('TODO_4343', s, data))
if rhs is not None:
rhs.visit(self)
self.end_ins()
#TODO: prefer this class as write_ind_visit_end that should be deprecated
# at the end
def write_ind_visit_end_ext(self,
lhs,
before,
s,
after,
rhs=None,
data=None,
subsection='UNKNOWN_SUBSECTION'):
self.write_ind()
lhs.visit(self)
self.write(before + s + after)
self.write_ext(('BEFORE', before))
self.write_ext((subsection, s, data))
self.write_ext(('AFTER', after))
if rhs is not None:
rhs.visit(self)
self.end_ins()
def write_inplace_if_possible(self, lhs, rhs):
if isinstance(rhs, BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
if rhs.op in '+-' and isinstance(exp_rhs, Constant) and\
exp_rhs.get_int_value() == 1:
return self.write_ind_visit_end(lhs, rhs.op * 2, data=rhs)
return self.write_ind_visit_end(
lhs,
' %s= ' % rhs.op,
exp_rhs,
data=rhs)
return self.write_ind_visit_end(lhs, ' = ', rhs, data=rhs)
def visit_ins(self, ins):
ins.visit(self)
def write_method(self):
acc = []
access = self.method.access
self.constructor = False
for modifier in access:
if modifier == 'constructor':
self.constructor = True
continue
acc.append(modifier)
self.write('\n%s' % self.space())
self.write_ext(('NEWLINE', '\n%s' % (self.space())))
if acc:
self.write('%s ' % ' '.join(acc))
self.write_ext(('PROTOTYPE_ACCESS', '%s ' % ' '.join(acc)))
if self.constructor:
name = get_type(self.method.cls_name).split('.')[-1]
self.write(name)
self.write_ext(('NAME_METHOD_PROTOTYPE', '%s' % name, self.method))
else:
self.write('%s %s' % (get_type(self.method.type), self.method.name))
self.write_ext(
('PROTOTYPE_TYPE', '%s' % get_type(self.method.type)))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_METHOD_PROTOTYPE', '%s' % self.method.name, self.method))
params = self.method.lparams
if 'static' not in access:
params = params[1:]
proto = ''
self.write_ext(('PARENTHESIS_START', '('))
if self.method.params_type:
proto = ', '.join(['%s p%s' % (get_type(p_type), param) for p_type,
param in zip(self.method.params_type, params)])
first = True
for p_type, param in zip(self.method.params_type, params):
if not first:
self.write_ext(('COMMA', ', '))
else:
first = False
self.write_ext(('ARG_TYPE', '%s' % get_type(p_type)))
self.write_ext(('SPACE', ' '))
self.write_ext(('NAME_ARG', 'p%s' % param, p_type, self.method))
self.write_ext(('PARENTHESIS_END', ')'))
self.write('(%s)' % proto)
if self.graph is None:
self.write(';\n')
self.write_ext(('METHOD_END_NO_CONTENT', ';\n'))
return
self.write('\n%s{\n' % self.space())
self.write_ext(('METHOD_START', '\n%s{\n' % self.space()))
self.inc_ind()
self.visit_node(self.graph.entry)
self.dec_ind()
self.write('%s}\n' % self.space())
self.write_ext(('METHOD_END', '%s}\n' % self.space()))
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
var.visit_decl(self)
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
follow = loop.follow['loop']
if follow is None and not loop.looptype.is_endless:
logger.error('Loop has no follow !')
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
self.write('%swhile (' % self.space())
self.write_ext(('WHILE', '%swhile (' % self.space()))
loop.visit_cond(self)
self.write(') {\n')
self.write_ext(('WHILE_START', ') {\n'))
elif loop.looptype.is_posttest:
self.write('%sdo {\n' % self.space())
self.write_ext(('DO', '%sdo {\n' % self.space()))
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
self.write('%swhile(true) {\n' % self.space())
self.write_ext(('WHILE_TRUE', '%swhile(true) {\n' % self.space()))
self.inc_ind()
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
self.dec_ind()
if loop.looptype.is_pretest:
self.write('%s}\n' % self.space())
self.write_ext(('END_PRETEST', '%s}\n' % self.space()))
elif loop.looptype.is_posttest:
self.latch_node.pop()
self.write('%s} while(' % self.space())
self.write_ext(('WHILE_POSTTEST', '%s} while(' % self.space()))
loop.latch.visit_cond(self)
self.write(');\n')
self.write_ext(('POSTTEST_END', ');\n'))
else:
self.inc_ind()
self.visit_node(loop.latch)
self.dec_ind()
self.write('%s}\n' % self.space())
self.write_ext(('END_LOOP', '%s}\n' % self.space()))
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
follow = cond.follow['if']
if cond.false is cond.true:
self.write('%s// Both branches of the condition point to the same'
' code.\n' % self.space())
self.write_ext(
('COMMENT_ERROR_MSG',
'%s// Both branches of the condition point to the same'
' code.\n' % self.space()))
self.write('%s// if (' % self.space())
self.write_ext(('COMMENT_IF', '%s// if (' % self.space()))
cond.visit_cond(self)
self.write(') {\n')
self.write_ext(('COMMENT_COND_END', ') {\n'))
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s// }\n' % self.space(), data="COMMENT_IF_COND_END")
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
self.write('%sif (' % self.space(), data="IF_2")
cond.visit_cond(self)
self.write(') {\n', data="IF_TRUE_2")
self.inc_ind()
self.write('%sbreak;\n' % self.space(), data="BREAK")
self.dec_ind()
self.write('%s}\n' % self.space(), data="IF_END_2")
self.visit_node(cond.false)
elif follow is not None:
if cond.true in (follow, self.next_case) or\
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if cond.true: # in self.visited_nodes:
self.write('%sif (' % self.space(), data="IF")
cond.visit_cond(self)
self.write(') {\n', data="IF_TRUE")
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
is_else = not (follow in (cond.true, cond.false))
if is_else and not cond.false in self.visited_nodes:
self.write('%s} else {\n' % self.space(), data="IF_FALSE")
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.if_follow.pop()
self.write('%s}\n' % self.space(), data="IF_END")
self.visit_node(follow)
else:
self.write('%sif (' % self.space(), data="IF_3")
cond.visit_cond(self)
self.write(') {\n', data="IF_COND_3")
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s} else {\n' % self.space(), data="ELSE_3")
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.write('%s}\n' % self.space(), data="IF_END_3")
def visit_short_circuit_condition(self, nnot, aand, cond1, cond2):
if nnot:
cond1.neg()
self.write('(', data="TODO24")
cond1.visit_cond(self)
self.write(') %s (' % ['||', '&&'][aand], data="TODO25")
cond2.visit_cond(self)
self.write(')', data="TODO26")
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
self.write('%sswitch (' % self.space(), data="SWITCH")
self.visit_ins(switch_ins)
self.write(') {\n', data="SWITCH_END")
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
self.inc_ind()
for case in switch.node_to_case[node]:
self.write(
'%scase %d:\n' % (self.space(), case),
data="CASE_XX")
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
self.write('%sdefault:\n' % self.space(), data="CASE_DEFAULT")
default = None
self.inc_ind()
self.visit_node(node)
if self.need_break:
self.write('%sbreak;\n' % self.space(), data="CASE_BREAK")
else:
self.need_break = True
self.dec_ind(2)
if default not in (None, follow):
self.inc_ind()
self.write('%sdefault:\n' % self.space(), data="CASE_DEFAULT_2")
self.inc_ind()
self.visit_node(default)
self.dec_ind(2)
self.write('%s}\n' % self.space(), data="CASE_END")
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.write('%sbreak;\n' % self.space(), data="BREAK_2")
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
self.write('%stry {\n' % self.space(), data="TRY_START")
self.inc_ind()
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
self.dec_ind()
self.write('%s}' % self.space(), data="TRY_START_END")
for catch in try_node.catch:
self.visit_node(catch)
self.write('\n', data="NEWLINE_END_TRY")
self.visit_node(self.try_follow.pop())
def visit_catch_node(self, catch_node):
self.write(' catch (', data="CATCH")
catch_node.visit_exception(self)
self.write(') {\n', data="CATCH_START")
self.inc_ind()
self.visit_node(catch_node.catch_start)
self.dec_ind()
self.write('%s}' % self.space(), data="CATCH_END")
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
def visit_decl(self, var):
if not var.declared:
var_type = var.get_type() or 'unknownType'
self.write('%s%s v%s' % (
self.space(), get_type(var_type), var.name),
data="DECLARATION")
self.end_ins()
def visit_constant(self, cst):
if isinstance(cst, basestring):
return self.write(string(cst), data="CONSTANT_STRING")
self.write('%r' % cst,
data="CONSTANT_INTEGER") # INTEGER or also others?
def visit_base_class(self, cls, data=None):
self.write(cls)
self.write_ext(('NAME_BASE_CLASS', cls, data))
def visit_variable(self, var):
var_type = var.get_type() or 'unknownType'
if not var.declared:
self.write('%s ' % get_type(var_type))
self.write_ext(
('VARIABLE_TYPE', '%s' % get_type(var_type), var_type))
self.write_ext(('SPACE', ' '))
var.declared = True
self.write('v%s' % var.name)
self.write_ext(('NAME_VARIABLE', 'v%s' % var.name, var, var_type))
def visit_param(self, param, data=None):
self.write('p%s' % param)
self.write_ext(('NAME_PARAM', 'p%s' % param, data))
def visit_this(self):
self.write('this', data="THIS")
def visit_super(self):
self.write('super')
def visit_assign(self, lhs, rhs):
if lhs is not None:
return self.write_inplace_if_possible(lhs, rhs)
self.write_ind()
rhs.visit(self)
if not self.skip:
self.end_ins()
def visit_move_result(self, lhs, rhs):
self.write_ind_visit_end(lhs, ' = ', rhs)
def visit_move(self, lhs, rhs):
if lhs is not rhs:
self.write_inplace_if_possible(lhs, rhs)
def visit_astore(self, array, index, rhs, data=None):
self.write_ind()
array.visit(self)
self.write('[', data=("ASTORE_START", data))
index.visit(self)
self.write('] = ', data="ASTORE_END")
rhs.visit(self)
self.end_ins()
def visit_put_static(self, cls, name, rhs):
self.write_ind()
self.write('%s.%s = ' % (cls, name), data="STATIC_PUT")
rhs.visit(self)
self.end_ins()
def visit_put_instance(self, lhs, name, rhs, data=None):
self.write_ind_visit_end_ext(
lhs,
'.',
'%s' % name,
' = ',
rhs,
data=data,
subsection='NAME_CLASS_ASSIGNMENT')
def visit_new(self, atype, data=None):
self.write('new %s' % get_type(atype))
self.write_ext(('NEW', 'new '))
self.write_ext(
('NAME_CLASS_NEW', '%s' % get_type(atype), data.type, data))
def visit_invoke(self, name, base, ptype, rtype, args, invokeInstr):
if isinstance(base, ThisParam):
if name == '<init>':
if self.constructor and len(args) == 0:
self.skip = True
return
if invokeInstr and base.type[1:-1].replace('/', '.') != invokeInstr.cls:
base.super = True
base.visit(self)
if name != '<init>':
if isinstance(base, BaseClass):
call_name = "%s -> %s" % (base.cls, name)
elif isinstance(base, InstanceExpression):
call_name = "%s -> %s" % (base.ftype, name)
elif hasattr(base, "base") and hasattr(base, "var_map"):
base2base = base
while True:
base2base = base2base.var_map[base2base.base]
if isinstance(base2base, NewInstance):
call_name = "%s -> %s" % (base2base.type, name)
break
elif (hasattr(base2base, "base") and
hasattr(base2base, "var_map")):
continue
else:
call_name = "UNKNOWN_TODO"
break
elif isinstance(base, ThisParam):
call_name = "this -> %s" % name
elif isinstance(base, Variable):
call_name = "%s -> %s" % (base.type, name)
else:
call_name = "UNKNOWN_TODO2"
self.write('.%s' % name)
self.write_ext(('INVOKE', '.'))
self.write_ext(
('NAME_METHOD_INVOKE', '%s' % name, call_name, ptype, rtype,
base, invokeInstr))
self.write('(', data="PARAM_START")
comma = False
for arg in args:
if comma:
self.write(', ', data="PARAM_SEPARATOR")
comma = True
arg.visit(self)
self.write(')', data="PARAM_END")
def visit_return_void(self):
self.write_ind()
self.write('return', data="RETURN")
self.end_ins()
def visit_return(self, arg):
self.write_ind()
self.write('return ', data="RETURN")
arg.visit(self)
self.end_ins()
def visit_nop(self):
pass
def visit_switch(self, arg):
arg.visit(self)
def visit_check_cast(self, arg, atype):
self.write('((%s) ' % atype, data="CHECKCAST")
arg.visit(self)
self.write(')')
def visit_aload(self, array, index):
array.visit(self)
self.write('[', data="ALOAD_START")
index.visit(self)
self.write(']', data="ALOAD_END")
def visit_alength(self, array):
array.visit(self)
self.write('.length', data="ARRAY_LENGTH")
def visit_new_array(self, atype, size):
self.write('new %s[' % get_type(atype[1:]), data="NEW_ARRAY")
size.visit(self)
self.write(']', data="NEW_ARRAY_END")
def visit_filled_new_array(self, atype, size, args):
self.write('new %s {' % get_type(atype), data="NEW_ARRAY_FILLED")
for idx, arg in enumerate(args):
arg.visit(self)
if idx + 1 < len(args):
self.write(', ', data="COMMA")
self.write('})', data="NEW_ARRAY_FILLED_END")
def visit_fill_array(self, array, value):
self.write_ind()
array.visit(self)
self.write(' = {', data="ARRAY_FILLED")
data = value.get_data()
tab = []
elem_size = value.element_width
if elem_size == 4:
for i in range(0, value.size * 4, 4):
tab.append('%s' % unpack('i', data[i:i + 4])[0])
else: # FIXME: other cases
for i in range(value.size):
tab.append('%s' % unpack('b', data[i])[0])
self.write(', '.join(tab), data="COMMA")
self.write('}', data="ARRAY_FILLED_END")
self.end_ins()
def visit_move_exception(self, var, data=None):
var.declared = True
var_type = var.get_type() or 'unknownType'
self.write('%s v%s' % (get_type(var_type), var.name))
self.write_ext(('EXCEPTION_TYPE', '%s' % get_type(var_type), data.type))
self.write_ext(('SPACE', ' '))
self.write_ext(
('NAME_CLASS_EXCEPTION', 'v%s' % var.value(), data.type, data))
def visit_monitor_enter(self, ref):
self.write_ind()
self.write('synchronized(', data="SYNCHRONIZED")
ref.visit(self)
self.write(') {\n', data="SYNCHRONIZED_END")
self.inc_ind()
def visit_monitor_exit(self, ref):
self.dec_ind()
self.write_ind()
self.write('}\n', data="MONITOR_EXIT")
def visit_throw(self, ref):
self.write_ind()
self.write('throw ', data="THROW")
ref.visit(self)
self.end_ins()
def visit_binary_expression(self, op, arg1, arg2):
self.write('(', data="BINARY_EXPRESSION_START")
arg1.visit(self)
self.write(' %s ' % op, data="TODO58")
arg2.visit(self)
self.write(')', data="BINARY_EXPRESSION_END")
def visit_unary_expression(self, op, arg):
self.write('(%s ' % op, data="UNARY_EXPRESSION_START")
arg.visit(self)
self.write(')', data="UNARY_EXPRESSION_END")
def visit_cast(self, op, arg):
self.write('(%s ' % op, data="CAST_START")
arg.visit(self)
self.write(')', data="CAST_END")
def visit_cond_expression(self, op, arg1, arg2):
arg1.visit(self)
self.write(' %s ' % op, data="COND_EXPRESSION")
arg2.visit(self)
def visit_condz_expression(self, op, arg):
if isinstance(arg, BinaryCompExpression):
arg.op = op
return arg.visit(self)
atype = arg.get_type()
if atype == 'Z':
if op == Op.EQUAL:
self.write('!', data="NEGATE")
arg.visit(self)
else:
arg.visit(self)
if atype in 'VBSCIJFD':
self.write(' %s 0' % op, data="TODO64")
else:
self.write(' %s null' % op, data="TODO65")
def visit_get_instance(self, arg, name, data=None):
arg.visit(self)
self.write('.%s' % name)
self.write_ext(('GET_INSTANCE', '.'))
self.write_ext(('NAME_CLASS_INSTANCE', '%s' % name, data))
def visit_get_static(self, cls, name):
self.write('%s.%s' % (cls, name), data="GET_STATIC")
def string(s):
ret = ['"']
for c in s.decode('utf8'):
if c >= ' ' and c < '\x7f':
if c == "'" or c == '"' or c == '\\':
ret.append('\\')
ret.append(c)
continue
elif c <= '\x7f':
if c in ('\r', '\n', '\t'):
ret.append(c.encode('unicode-escape'))
continue
i = ord(c)
ret.append('\\u')
ret.append('%x' % (i >> 12))
ret.append('%x' % ((i >> 8) & 0x0f))
ret.append('%x' % ((i >> 4) & 0x0f))
ret.append('%x' % (i & 0x0f))
ret.append('"')
return ''.join(ret).encode('utf8')
|
xtiankisutsa/MARA_Framework
|
tools/androguard/androguard/decompiler/dad/writer.py
|
Python
|
lgpl-3.0
| 25,752
|
[
"VisIt"
] |
85d6a8a2341b919b5c390427ac065f479491f82cb27f5718c59ff141b9ee399d
|
from __future__ import division
"""
Module: spectrum.py
Contains SourceSpectrum and SpectralElement class defnitions and
their subclasses.
Also contains the Vega object, which is an instance of a FileSourceSpectrum
that can be imported from this file and used for Vega-related calculations.
Dependencies:
=============
pyfits, numpy
"""
import string
import re
import os
import math
import pyfits
import numpy as N
from numpy import ma as MA
import units
import observationmode
import locations
import planck
from pysynphot import __version__, __svn_version__
# Renormalization constants from synphot:
PI = 3.14159265 # Mysterious math constant
RSUN = 6.9599E10 # Radius of sun
PC = 3.085678E18 # Parsec
RADIAN = RSUN / PC /1000.
RENORM = PI * RADIAN * RADIAN # Normalize to 1 solar radius @ 1 kpc
#Single-precision epsilon value, taken from the synphot FAQ.
#This is the minimum separation in wavelength value necessary for
#synphot to read the entries as distinct single-precision numbers.
syn_epsilon=0.00032
def _computeDefaultWaveset():
minwave = 500.
maxwave = 26000.
lenwave = 10000
w1 = math.log10(minwave)
w2 = math.log10(maxwave)
result = N.zeros(shape=[lenwave,],dtype=N.float64)
for i in range(lenwave):
frac = float(i) / lenwave
result[i] = 10 ** (w1 * (1.0 - frac) + w2 * frac)
return result
# Default waveset is computed at load time, once and for all.
# Note that this is not thread safe.
global default_waveset
default_waveset = _computeDefaultWaveset()
def MergeWaveSets(waveset1, waveset2):
"""Return the union of the two wavesets, unless one or
both of them is None."""
if waveset1 is None and waveset2 is not None:
MergedWaveSet = waveset2
elif waveset2 is None and waveset1 is not None:
MergedWaveSet = waveset1
elif waveset1 is None and Waveset2 is None:
MergedWaveSet = None
else:
MergedWaveSet = N.sort(N.union1d(waveset1, waveset2))
return MergedWaveSet
def trimSpectrum(sp, minw, maxw):
''' Creates a new spectrum with trimmed upper and lower ranges.
'''
wave = sp.GetWaveSet()
flux = sp(wave)
new_wave = N.compress(wave >= minw, wave)
new_flux = N.compress(wave >= minw, flux)
new_wave = N.compress(new_wave <= maxw, new_wave)
new_flux = N.compress(new_wave <= maxw, new_flux)
result = TabularSourceSpectrum()
result._wavetable = new_wave
result._fluxtable = new_flux
result.waveunits = units.Units(sp.waveunits.name)
result.fluxunits = units.Units(sp.fluxunits.name)
return result
class Integrator(object):
''' Integrator engine.
'''
def trapezoidIntegration(self,x,y):
npoints = x.size
if npoints > 0:
indices = N.arange(npoints)[:-1]
deltas = x[indices+1] - x[indices]
integrand = 0.5*(y[indices+1] + y[indices])*deltas
sum = integrand.sum()
if x[-1]<x[0]:
sum*= -1.0
return sum
else:
return 0.0
def _columnsFromASCII(self, filename):
""" Following synphot/TABLES, ASCII files may contain blank lines,
comment lines (beginning with '#'), or terminal comments. This routine
may be called by both Spectrum and SpectralElement objects to extract
the first two columns from a file."""
wlist=[]
flist=[]
lcount=0
fs = open(filename,mode='r')
lines=fs.readlines()
fs.close()
for line in lines:
lcount+=1
cline=line.strip()
if ((len(cline) > 0) and (not cline.startswith('#'))):
try:
cols=cline.split()
if len(cols) >= 2:
wlist.append(float(cols[0]))
flist.append(float(cols[1]))
except Exception, e:
raise ValueError("Error reading %s: %s"%(filename,str(e)))
return wlist, flist
def validate_wavetable(self):
"Enforce monotonic, ascending wavelengths with no zero values"
#First check for invalid values
wave=self._wavetable
if N.any(wave <= 0):
raise ValueError('Zero wavelength occurs in wavelength array: invalid value')
#Now check for monotonicity & enforce ascending
sorted=N.sort(wave)
if not N.alltrue(sorted == wave):
if N.alltrue(sorted[::-1] == wave):
#monotonic descending is allowed
pass
else:
raise ValueError('Wavelength array is not monotonic: invalid')
#Check for duplicate values
dw=sorted[1:]-sorted[:-1]
if N.any(dw==0):
raise ValueError("Wavelength array contains duplicate entries: invalid")
def validate_fluxtable(self):
"Enforce non-negative fluxes"
if ((not self.fluxunits.isMag) #neg. magnitudes are legal
and (self._fluxtable.min() < 0)):
idx=N.where(self._fluxtable < 0)
self._fluxtable[idx]=0.0
print "Warning, %d of %d bins contained negative fluxes; they have been set to zero."%(len(idx[0]),len(self._fluxtable))
class SourceSpectrum(Integrator):
'''Base class for the Source Spectrum object.
'''
def __add__(self, other):
'''Source Spectra can be added. Delegate the work to the
CompositeSourceSpectrum class.
'''
if not isinstance(other, SourceSpectrum):
raise TypeError("Can only add two SourceSpectrum objects")
return CompositeSourceSpectrum(self, other, 'add')
def __sub__(self, other):
""" Source Spectra can be subtracted, which is just another way
of adding."""
return self.__add__(-1.0*other)
def __mul__(self, other):
'''Source Spectra can be multiplied, by constants or by
SpectralElement objects.
'''
#Multiplying by numeric constants is allowed
if isinstance(other, (int, float) ):
other = UniformTransmission(other)
#so is by SpectralElements. Otherwise, raise an exception.
if not isinstance(other, SpectralElement):
raise TypeError("SourceSpectrum objects can only be multiplied by SpectralElement objects or constants; %s type detected"%type(other))
## Delegate the work of multiplying to CompositeSourceSpectrum
return CompositeSourceSpectrum(self, other, 'multiply')
def __rmul__(self, other):
return self.__mul__(other)
def addmag(self,magval):
"""Adding a magnitude is like multiplying a flux. Only works for
numbers -- not arrays, spectrum objects, etc"""
if N.isscalar(magval):
factor = 10**(-0.4*magval)
return self*factor
else:
raise TypeError(".addmag() only takes a constant scalar argument")
def getArrays(self):
'''Returns wavelength and flux arrays as a tuple, performing
units conversion.
'''
wave = self.GetWaveSet();
flux = self(wave)
flux = units.Photlam().Convert(wave, flux, self.fluxunits.name)
wave = units.Angstrom().Convert(wave, self.waveunits.name)
return (wave, flux)
#Define properties for consistent UI
def _getWaveProp(self):
wave,flux=self.getArrays()
return wave
def _getFluxProp(self):
wave,flux=self.getArrays()
return flux
wave=property(_getWaveProp,doc="Wavelength property")
flux=property(_getFluxProp,doc="Flux property")
def validate_units(self):
"Ensure that waveunits are WaveUnits and fluxunits are FluxUnits"
if (not isinstance(self.waveunits,units.WaveUnits)):
raise TypeError("%s is not a valid WaveUnit"%self.waveunits)
if (not isinstance(self.fluxunits,units.FluxUnits)):
raise TypeError("%s is not a valid FluxUnit"%self.fluxunits)
def writefits(self, filename, clobber=True, trimzero=True,
binned=False,precision=None,hkeys=None):
"""Write the spectrum to a FITS file.
filename: name of file to write to
clobber=True: Will clobber existing file by default
trimzero=True: Will trim zero-flux elements from both ends
by default
binned=False: Will write in native waveset by default
precision: Will write in native precision by default; can be
set to "single" or "double"
hkeys: Optional dictionary of {keyword:(value,comment)}
to be added to primary FITS header
"""
pcodes={'d':'D','s':'E'}
if precision is None:
precision=self.flux.dtype.char
_precision=precision.lower()[0]
pcodes={'d':'D','s':'E','f':'E'}
if clobber:
try:
os.remove(filename)
except OSError:
pass
if binned:
wave=self.binwave
flux=self.binflux
else:
wave=self.wave
flux=self.flux
#Add a check for single/double precision clash, so
#that if written out in single precision, the wavelength table
#will still be sorted with no duplicates
#The value of epsilon is taken from the Synphot FAQ.
if wave.dtype == N.float64 and _precision == 's':
idx=N.where(abs(wave[1:]-wave[:-1]) > syn_epsilon)
else:
idx=N.where(wave) #=> idx=[:]
wave=wave[idx]
flux=flux[idx]
first,last=0,len(flux)
if trimzero:
#Keep one zero at each end
nz = flux.nonzero()[0]
try:
first=max(nz[0]-1,first)
last=min(nz[-1]+2,last)
except IndexError:
pass
#Construct the columns and HDUlist
cw = pyfits.Column(name='WAVELENGTH',
array=wave[first:last],
unit=self.waveunits.name,
format=pcodes[_precision])
cf = pyfits.Column(name='FLUX',
array=flux[first:last],
unit=self.fluxunits.name,
format=pcodes[_precision])
#Make the primary header
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
#User-provided keys are written to the primary header
#so are filename and origin
bkeys = dict(filename=(os.path.basename(filename),'name of file'),
origin=('pysynphot',
'Version (%s, %s)'%(__version__,__svn_version__)))
#User-values if present may override default values
if hkeys is not None:
bkeys.update(hkeys)
#Now update the primary header
for key,val in bkeys.items():
hdu.header.update(key, *val)
#Make the extension HDU
cols = pyfits.ColDefs([cw, cf])
hdu = pyfits.new_table(cols)
#There are some standard keywords that should be added
#to the extension header.
bkeys=dict(expr =(str(self),'pysyn expression'),
tdisp1 =('G15.7',),
tdisp2 =('G15.7',)
)
try:
bkeys['grftable']=(self.bandpass.obsmode.gtname,)
bkeys['cmptable']=(self.bandpass.obsmode.ctname,)
except AttributeError:
pass #Not all spectra have these
for key,val in bkeys.items():
hdu.header.update(key,*val)
#Add the header to the list, and write the file
hdulist.append(hdu)
hdulist.writeto(filename)
def integrate(self,fluxunits='photlam'):
#Extract the flux in the desired units
u=self.fluxunits
self.convert(fluxunits)
wave,flux=self.getArrays()
self.convert(u)
#then do the integration
return self.trapezoidIntegration(wave,flux)
def sample(self,wave):
"""Return a flux array, in self.fluxunits, on the provided
wavetable"""
#First use the __call__ to get it in photlam
flux=self(wave)
#Then convert to the desired units
ans=units.Photlam().Convert(wave,flux,self.fluxunits.name)
return ans
def convert(self, targetunits):
'''Convert to other units. This method actually just changes the
wavelength and flux units objects, it does not recompute the
internally kept wave and flux data; these are kept always in internal
units. Method getArrays does the actual computation.
'''
nunits = units.Units(targetunits)
if nunits.isFlux:
self.fluxunits = nunits
else:
self.waveunits = nunits
def redshift(self, z):
''' Returns a new redshifted spectrum.
'''
#By default, apply only the doppler shift.
waveunits=self.waveunits
self.convert('angstrom')
newwave=self.wave*(1.0+z)
copy = ArraySourceSpectrum(wave=newwave,
flux=self.flux,
waveunits=self.waveunits,
fluxunits=self.fluxunits,
name="%s at z=%g"%(self.name,z)
)
self.convert(waveunits)
return copy
def setMagnitude(self, band, value):
'''Makes the magnitude of the source in the band equal to value.
band is a SpectralElement.
This method is marked for deletion once the .renorm method is
well tested.
'''
objectFlux = band.calcTotalFlux(self)
vegaFlux = band.calcVegaFlux()
magDiff = -2.5*math.log10(objectFlux/vegaFlux)
factor = 10**(-0.4*(value - magDiff))
'''Object returned is a CompositeSourceSpectrum'''
return self * factor
def renorm(self, RNval, RNUnits, band, force=False):
"""Renormalize the spectrum to the specified value (in the specified
flux units) in the specified band.
Calls a function in another module to alleviate circular import
issues."""
from renorm import StdRenorm
return StdRenorm(self,band,RNval,RNUnits,force=force)
def effstim(self,fluxunits='photlam'):
print "?? %s"%fluxunits
raise NotImplementedError("Ticket #140: calcphot.effstim functionality")
class CompositeSourceSpectrum(SourceSpectrum):
'''Composite Source Spectrum object, handles addition, multiplication
and keeping track of the wavelength set.
'''
def __init__(self, source1, source2, operation):
self.component1 = source1
self.component2 = source2
self.operation = operation
self.name=str(self)
#Propagate warnings
self.warnings={}
self.warnings.update(source1.warnings)
self.warnings.update(source2.warnings)
# for now we keep these attributes here, in spite of the internal
# units model. There is code that still breaks down if these attributes
# are not here.
try:
self.waveunits = source1.waveunits
self.fluxunits = source1.fluxunits
except AttributeError:
self.waveunits = source2.waveunits
self.fluxunits = source2.fluxunits
self.isAnalytic = source1.isAnalytic and source2.isAnalytic
def __str__(self):
opdict = {'add':'+','multiply':'*'}
return "%s %s %s"%(str(self.component1),opdict[self.operation],str(self.component2))
def __call__(self, wavelength):
'''Add or multiply components, delegating the function calculation
to the individual objects.
'''
if self.operation == 'add':
return self.component1(wavelength) + self.component2(wavelength)
if self.operation == 'multiply':
return self.component1(wavelength) * self.component2(wavelength)
def complist(self):
ans=[]
for comp in (self.component1, self.component2):
try:
ans.extend(comp.complist())
except AttributeError:
ans.append(comp)
return ans
def GetWaveSet(self):
'''Obtain the wavelength set for the composite source by forming
the union of wavelengths from each component.
'''
waveset1 = self.component1.GetWaveSet()
waveset2 = self.component2.GetWaveSet()
return MergeWaveSets(waveset1, waveset2)
def tabulate(self):
"""Evaluate the spectrum in order to return a tabular source
spectrum"""
sp=ArraySourceSpectrum(wave=self.wave,
flux=self.flux,
waveunits=self.waveunits,
fluxunits=self.fluxunits,
name='%s (tabulated)'%self.name)
return sp
class TabularSourceSpectrum(SourceSpectrum):
'''Class for a source spectrum that is read in from a table.
'''
def __init__(self, filename=None, fluxname=None, keepneg=False):
self.isAnalytic=False
self.warnings={}
if filename:
self._readSpectrumFile(filename, fluxname)
self.filename=filename
self.validate_units()
self.validate_wavetable()
if not keepneg:
self.validate_fluxtable()
self.ToInternal()
self.name=self.filename
self.isAnalytic=False
else:
self._wavetable = None
self._fluxtable = None
self.waveunits = None
self.fluxunits = None
self.filename = None
self.name=self.filename
def _reverse_wave(self):
self._wavetable = self._wavetable[::-1]
def __str__(self):
return str(self.name)
def _readSpectrumFile(self, filename, fluxname):
if filename.endswith('.fits') or filename.endswith('.fit'):
self._readFITS(filename, fluxname)
else:
self._readASCII(filename)
def _readFITS(self, filename, fluxname):
fs = pyfits.open(filename)
self._wavetable = fs[1].data.field('wavelength')
if fluxname == None:
fluxname = 'flux'
self._fluxtable = fs[1].data.field(fluxname)
self.waveunits = units.Units(fs[1].header['tunit1'].lower())
self.fluxunits = units.Units(fs[1].header['tunit2'].lower())
fs.close()
def _readASCII(self, filename):
""" Ascii files have no headers. Following synphot, this
routine will assume the first column is wavelength in Angstroms,
and the second column is flux in Flam."""
self.waveunits = units.Units('angstrom')
self.fluxunits = units.Units('flam')
wlist,flist = self._columnsFromASCII(filename)
self._wavetable=N.array(wlist,dtype=N.float64)
self._fluxtable=N.array(flist,dtype=N.float64)
def __call__(self, wavelengths):
'''This is where the flux array is actually calculated given a
wavelength array. Returns an array of flux values calculated at
the wavelength values input.
'''
if N.isscalar(wavelengths):
delta=0.0001
ww=N.array([wavelengths-delta,wavelengths,wavelengths+delta])
tmp=self.resample(ww)
return tmp._fluxtable[1]
else:
return self.resample(wavelengths)._fluxtable
def taper(self):
'''Taper the spectrum by adding zeros to each end.
'''
OutSpec = TabularSourceSpectrum()
wcopy = N.zeros(self._wavetable.size+2,dtype=N.float64)
fcopy = N.zeros(self._fluxtable.size+2,dtype=N.float64)
wcopy[1:-1] = self._wavetable
fcopy[1:-1] = self._fluxtable
fcopy[0] = 0.0
fcopy[-1] = 0.0
## The wavelengths to use for the first and last points are
## calculated by using the same ratio as for the 2 interior points
wcopy[0] = wcopy[1]*wcopy[1]/wcopy[2]
wcopy[-1] = wcopy[-2]*wcopy[-2]/wcopy[-3]
OutSpec._wavetable = wcopy
OutSpec._fluxtable = fcopy
OutSpec.waveunits = units.Units(str(self.waveunits))
OutSpec.fluxunits = units.Units(str(self.fluxunits))
return OutSpec
def resample(self, resampledWaveTab):
'''Interpolate flux given a wavelength array that is monotonically
increasing and the TabularSourceSpectrum object.
@param resampledWaveTab: new wavelength table IN ANGSTROMS
@type ressampledWaveTab: ndarray
'''
##Check whether the input wavetab is in descending order
if resampledWaveTab[0]<resampledWaveTab[-1]:
newwave=resampledWaveTab
newasc = True
else:
newwave=resampledWaveTab[::-1]
newasc = False
## Use numpy interpolation function
if self._wavetable[0]<self._wavetable[-1]:
oldasc = True
ans = N.interp(newwave,self._wavetable,
self._fluxtable)
else:
oldasc = False
rev = N.interp(newwave,self._wavetable[::-1],
self._fluxtable[::-1])
ans = rev[::-1]
## If the new and old waveset don't have the same parity,
## the answer has to be flipped again
if (newasc != oldasc):
ans=ans[::-1]
## Finally, make the new object
# NB: these manipulations were done using the internal
#tables in Angstrom and photlam, so those are the units
#that must be fed to the constructor.
resampled=ArraySourceSpectrum(wave=resampledWaveTab.copy(),
waveunits = 'angstroms',
flux = ans.copy(),
fluxunits = 'photlam',
keepneg=True)
#Use the convert method to set the units desired by the user.
resampled.convert(self.waveunits)
resampled.convert(self.fluxunits)
return resampled
def GetWaveSet(self):
'''For a TabularSource Spectrum, the WaveSet is just the _wavetable
member. Return a copy so that there is no reference to the original
object.
'''
return self._wavetable.copy()
def ToInternal(self):
'''Convert to the internal representation of (angstroms, photlam).
'''
self.validate_units()
savewunits = self.waveunits
savefunits = self.fluxunits
angwave = self.waveunits.Convert(self.GetWaveSet(), 'angstrom')
phoflux = self.fluxunits.Convert(angwave, self._fluxtable, 'photlam')
self._wavetable = angwave.copy()
self._fluxtable = phoflux.copy()
self.waveunits = savewunits
self.fluxunits = savefunits
class ArraySourceSpectrum(TabularSourceSpectrum):
""" spec = ArraySpectrum(numpy array containing wavelenght table,
numpy array containing flux table, waveunits, fluxunits,
name=human-readable nickname for spectrum, keepneg=True to
override the default behavior of setting negative flux values to zero)
"""
def __init__(self, wave=None, flux=None,
waveunits='angstrom', fluxunits='photlam',
name='UnnamedArraySpectrum',
keepneg=False):
"""Create a spectrum from arrays.
@param wave: Wavelength array
@param flux: Flux array
@type wave,flux: Numpy array with numerical data
@param waveunits: Units of wave
@param fluxunits: Units of flux
@type waveunits: L{units.WaveUnits} or subclass
@type fluxunits: L{units.FluxUnits} or subclass
@param name: Description of this array
@type name: string
@param keepneg: If true, negative flux values will be retained; by default, they are forced to zero
@type keepneg: bool
"""
if len(wave)!=len(flux):
raise ValueError("wave and flux arrays must be of equal length")
self._wavetable=wave
self._fluxtable=flux
self.waveunits=units.Units(waveunits)
self.fluxunits=units.Units(fluxunits)
self.name=name
self.isAnalytic=False
self.warnings={}
self.validate_units() #must do before validate_fluxtable because it tests against unit type
self.validate_wavetable() #must do before ToInternal in case of descending
if not keepneg:
self.validate_fluxtable()
self.ToInternal()
class FileSourceSpectrum(TabularSourceSpectrum):
"""spec = FileSpectrum(filename (FITS or ASCII),
fluxname=column name containing flux (for FITS tables only),
keepneg=True to override thedefault behavior of setting negative
flux values to zero)"""
def __init__(self, filename, fluxname=None, keepneg=False):
"""Create a spectrum from a file.
@param filename: FITS or ASCII file containing the spectrum
@type filename: string
@param fluxname: Column name specifying the flux (FITS only)
@type fluxname: string
@param keepneg: If true, negative flux values will be retained; by default, they are forced to zero
@type keepneg: bool
"""
self.name = locations.irafconvert(filename)
self._readSpectrumFile(self.name, fluxname)
self.validate_units()
self.validate_wavetable()
if not keepneg:
self.validate_fluxtable()
self.ToInternal()
self.isAnalytic=False
self.warnings={}
def _readSpectrumFile(self, filename, fluxname):
if filename.endswith('.fits') or filename.endswith('.fit'):
self._readFITS(filename, fluxname)
else:
self._readASCII(filename)
def _readFITS(self, filename, fluxname):
fs = pyfits.open(filename)
self._wavetable = fs[1].data.field('wavelength')
if fluxname == None:
fluxname = 'flux'
self._fluxtable = fs[1].data.field(fluxname)
self.waveunits = units.Units(fs[1].header['tunit1'].lower())
self.fluxunits = units.Units(fs[1].header['tunit2'].lower())
#Retain the header information as a convenience for the user.
#If duplicate keywords exist, the value in the extension
#header will override that in the primary.
self.fheader = dict(fs[0].header)
self.fheader.update(dict(fs[1].header))
fs.close()
def _readASCII(self, filename):
""" Ascii files have no headers. Following synphot, this
routine will assume the first column is wavelength in Angstroms,
and the second column is flux in Flam."""
self.waveunits = units.Units('angstrom')
self.fluxunits = units.Units('flam')
wlist,flist = self._columnsFromASCII(filename)
self._wavetable=N.array(wlist,dtype=N.float64)
self._fluxtable=N.array(flist,dtype=N.float64)
#We don't support headers from ascii files
self.fheader = dict()
class AnalyticSpectrum(SourceSpectrum):
''' Base class for analytic functions. These are spectral forms
which are defined, by default, on top of the default synphot
waveset.
'''
def __init__(self,waveunits='angstrom',fluxunits='photlam'):
"All AnalyticSpectra must set wave & flux units; do it here."
self.waveunits = units.Units(waveunits)
self.fluxunits = units.Units(fluxunits)
self.validate_units()
self.isAnalytic=True
self.warnings={}
def GetWaveSet(self):
global default_waveset
return default_waveset.copy()
class GaussianSource(AnalyticSpectrum):
"""spec = GaussianSource(TotalFlux under Gaussian,
central wavelength of Gaussian,
FWHM of Gaussian,
waveunits, fluxunits)
"""
def __init__(self, flux, center, fwhm, waveunits='angstrom',
fluxunits='flam'):
AnalyticSpectrum.__init__(self,waveunits,fluxunits)
self.center = center
self.input_fwhm = fwhm
self.input_flux = flux
self._input_units = self.fluxunits
self.sigma = fwhm / math.sqrt(8.0 * math.log(2.0))
self.factor = flux / (math.sqrt(2.0 * math.pi) * self.sigma)
self.name ='Gaussian: mu=%g,fwhm=%g,flux=%g %s'%(self.center,self.input_fwhm,self.input_flux,self._input_units)
def __str__(self):
return self.name
def __call__(self, wavelength):
sp = TabularSourceSpectrum()
sp.waveunits = self.waveunits
sp.fluxunits = self._input_units
sp._wavetable = wavelength
sp._fluxtable = self.factor * N.exp( \
-0.5 *((wavelength - self.center)/ self.sigma)**2)
sp.ToInternal()
return sp(wavelength)
def GetWaveSet(self):
'''Return a wavelength set that describes the Gaussian.
Overrides the base class to compute 101 values, from
center - 5*sigma to center + 5*sigma, in units of
0.1*sigma
'''
increment = 0.1*self.sigma
first = self.center - 50.0*increment
last = self.center + 50.0*increment
return N.arange(first, last, increment)
class FlatSpectrum(AnalyticSpectrum):
"""spec = FlatSpectrum(Flux density, waveunits, fluxunits). Defines a
flat spectrum in units of fluxunits."""
def __init__(self, fluxdensity, waveunits='angstrom', fluxunits='photlam'):
AnalyticSpectrum.__init__(self,waveunits,fluxunits)
self.wavelength = None
self._fluxdensity = fluxdensity
self._input_units = self.fluxunits
self.name="Flat spectrum of %g %s"%(self._fluxdensity,
self._input_units)
def __str__(self):
return self.name
def __call__(self, wavelength):
"""Create a TabularSourceSpectrum, then use its __call__"""
sp = TabularSourceSpectrum()
sp.waveunits = self.waveunits
sp.fluxunits = self._input_units
sp._wavetable = wavelength
sp._fluxtable = self._fluxdensity*N.ones(sp._wavetable.shape,
dtype=N.float64)
sp.ToInternal()
return sp(wavelength)
def redshift(self, z):
"""Call the parent's method, which returns a TabularSourceSpectrum,
then use its results to create a new FlatSpectrum with the correct
value. """
tmp=SourceSpectrum.redshift(self,z)
ans=FlatSpectrum(tmp.flux.max(),
fluxunits=tmp.fluxunits)
return ans
##This change produces 5 errors and 17 failures in cos_etc_test.py
## def GetWaveSet(self):
## global default_waveset
## return N.array([default_waveset[0],default_waveset[-1]])
class Powerlaw(AnalyticSpectrum):
"""spec=PowerLaw(refwave, exponent, waveunits, fluxunits).
Power law spectrum of the form (lambda/refval)**exponent,
where refval is in Angstroms.
The spectrum is normalized to a flux of 1 in "fluxunits" at "refval".
"""
def __init__(self, refwave, index, waveunits='angstrom', fluxunits='photlam'):
AnalyticSpectrum.__init__(self,waveunits,fluxunits)
self.wavelength = None
self._input_units = self.fluxunits
self._refwave = refwave
self._index = index
self.name="Power law: refwave %g, index %g"%(self._refwave,self._index)
def __str__(self):
return self.name
def __call__(self, wavelength):
sp = TabularSourceSpectrum()
sp.waveunits = self.waveunits
sp.fluxunits = self._input_units
sp._wavetable = wavelength
sp._fluxtable = N.ones(sp._wavetable.shape, dtype=N.float64)
for i in range(len(sp._fluxtable)):
sp._fluxtable[i] = (sp._wavetable[i] / self._refwave) ** self._index
sp.ToInternal()
return sp(wavelength)
class BlackBody(AnalyticSpectrum):
"""
spec = BlackBody(T in Kelvin)
Blackbody spectrum with specified temperature, in Kelvin.
The flux of the spectrum is normalized to a star of solar radius
at a distance of 1 kpc.L
"""
def __init__(self, temperature):
waveunits=units.Units('angstrom')
fluxunits=units.Units('photlam')
AnalyticSpectrum.__init__(self,waveunits,fluxunits)
self.wavelength = None
self.temperature = temperature
self.name='BB(T=%d)'%self.temperature
def __str__(self):
return self.name
def __call__(self, wavelength):
sp = TabularSourceSpectrum()
sp.waveunits = self.waveunits
sp.fluxunits = self.fluxunits
sp._wavetable = wavelength
sp._fluxtable = planck.bbfunc(wavelength, self.temperature)* RENORM
return sp(wavelength)
class SpectralElement(Integrator):
'''Base class for a Spectral Element (e.g. Filter, Detector...).
'''
def validate_units(self):
"Ensure that waveunits are WaveUnits"
if (not isinstance(self.waveunits,units.WaveUnits)):
raise TypeError("%s is not a valid WaveUnit"%self.waveunits)
def __mul__(self, other):
'''Permitted to multiply a SpectralElement by another
SpectralElement, or by a SourceSpectrum. In the former
case we return a CompositeSpectralElement, while in the
latter case a CompositeSourceSpectrum.
'''
if isinstance(other, SpectralElement):
return CompositeSpectralElement(self, other)
if isinstance(other, SourceSpectrum):
return CompositeSourceSpectrum(self, other, 'multiply')
## Multiplying by a constant is the same as multiplying by a
## UniformTransmission object
if isinstance(other, (int, float)):
return CompositeSpectralElement(self, UniformTransmission(other))
else:
print "SpectralElements can only be multiplied by other " + \
"SpectralElements or SourceSpectrum objects"
def __rmul__(self, other):
return self.__mul__(other)
def integrate(self,wave=None):
"""Integrate the throughput over the specified waveset,
if None, integrate over the full waveset."""
if wave is None:
wave=self.wave
ans=self.trapezoidIntegration(wave,self(wave))
return ans
#..................................................................
# Methods to implement bandpar functionality go here
#..................................................................
def avgwave(self):
"""Implement the equation for lambda nought as defined
in Koornneef et al 1987, p 836.
Should be equivalent to bandpar.avglam = bandpar.avgwv"""
mywaveunits = self.waveunits.name
self.convert('angstroms')
wave=self.wave
thru=self.throughput
self.convert(mywaveunits)
num = self.trapezoidIntegration(wave, thru*wave)
den = self.trapezoidIntegration(wave, thru)
if 0.0 in (num, den):
return 0.0
else:
return num/den
def rmswidth(self, floor=0):
"""Defines the lambda sub rms from Koornneef et al 1987,
p 836; should be definition of bandpar.bandw"""
mywaveunits = self.waveunits.name
self.convert('angstroms')
wave=self.wave
thru=self.throughput
self.convert(mywaveunits)
if floor != 0:
idx = N.where(thru >= floor)
wave = wave[idx]
thru = thru[idx]
integrand = (wave-self.avgwave())**2 * thru
num = self.trapezoidIntegration(wave, integrand)
den = self.trapezoidIntegration(wave, thru)
if 0.0 in (num, den):
return 0.0
else:
ans = math.sqrt(num/den)
return ans
def rectwidth(self):
"""RECTW = INT(THRU) / MAX(THRU)"""
mywaveunits = self.waveunits.name
self.convert('angstroms')
wave=self.wave
thru=self.throughput
self.convert(mywaveunits)
num = self.trapezoidIntegration(wave, thru)
den = thru.max()
if 0.0 in (num, den):
return 0.0
else:
return num/den
def equivwidth(self):
""" EQUVW = INT(THRU) """
return self.integrate()
def efficiency(self):
"""QTLAM = dimensionless efficience
= INT(THRU / LAM)
"""
mywaveunits = self.waveunits.name
self.convert('angstroms')
wave=self.wave
thru=self.throughput
self.convert(mywaveunits)
ans = self.trapezoidIntegration(wave, thru/wave)
return ans
#..................................................................
def check_sig(self, other):
"""Only call this if check_overlap returns 'partial'.
Returns True if the LACK of overlap is INsignificant:
i.e., it is ok to go ahead and do whatever we are doing."""
swave=self.wave[N.where(self.throughput != 0)]
s1,s2=swave.min(),swave.max()
owave=other.wave
o1,o2=owave.min(),owave.max()
lorange=sorted([s1,o1])
hirange=sorted([s2,o2])
#Get the full throughput
total=self.integrate()
#Now get the other two pieces
#We cannot yet do
#low=self[slice(*lowrange)].integrate()
wave=self.wave
idxs=[N.searchsorted(wave, lorange, 'left'),
N.searchsorted(wave, hirange, 'left')]
excluded=0.0
for idx in idxs:
try:
excluded+=self.integrate(wave=wave[slice(*idx)])
except IndexError:
pass #If the range is zero, do nothing
if excluded/total < 0.01:
return True
else:
return False
def check_overlap(self, other):
"""Check whether the wavelength range of other is defined everywhere
that the wavelength range of self is defined.
Returns "full", "partial", "none".
Normally used for checking whether a spectrum is fully defined over
the range of a bandpass.
Note that the full overlap case is asymmetric: if the range of 'self'
extends past the limits of 'other', this will return a partial
overlap.
"""
if other.isAnalytic:
#then it's defined everywhere
return 'full'
swave=self.wave[N.where(self.throughput != 0)]
s1,s2=swave.min(),swave.max()
owave=other.wave
o1,o2=owave.min(),owave.max()
if (s1>=o1 and s2<=o2):
ans='full'
elif (s2<o1) or (o2<s1):
ans='none'
else:
ans='partial'
return ans
def convert(self, targetunits):
'''Convert to other units. This method actually just changes the
wavelength unit objects, it does not recompute the
internally kept wave data; these are kept always in internal
units. Method getWaveSet does the actual computation.'''
nunits = units.Units(targetunits)
self.waveunits = nunits
def ToInternal(self):
'''Convert wavelengths to the internal representation of angstroms..
Note: This is not yet used, but should be for safety when creating
TabularSpectralElements from files. It will also be necessary for the
ArraySpectralElement class that we want to create RSN.
'''
self.validate_units()
savewunits = self.waveunits
angwave = self.waveunits.Convert(self.GetWaveSet(), 'angstrom')
self._wavetable = angwave.copy()
self.waveunits = savewunits
def __call__(self, wavelengths):
'''This is where the throughput array is calculated for a given
input wavelength table.
@param wavelengths: an array of wavelengths in Angstroms at which the
throughput should be sampled
@type wavelengths: ndarray
'''
if N.isscalar(wavelengths):
delta=0.0001
ww=N.array([wavelengths-delta,wavelengths,wavelengths+delta])
tmp=self.resample(ww)
return tmp._throughputtable[1]
else:
return self.resample(wavelengths)._throughputtable
def sample(self, wavelengths):
"""Provide a more normal user interface to the __call__"""
return self.__call__(wavelengths)
def taper(self):
'''Taper the spectrum by adding zeros to each end.
'''
OutElement = TabularSpectralElement()
wcopy = N.zeros(self._wavetable.size+2,dtype=N.float64)
fcopy = N.zeros(self._throughputtable.size+2,dtype=N.float64)
wcopy[1:-1] = self._wavetable
fcopy[1:-1] = self._throughputtable
fcopy[0] = 0.0
fcopy[-1] = 0.0
## The wavelengths to use for the first and last points are
## calculated by using the same ratio as for the 2 interior points
wcopy[0] = wcopy[1]*wcopy[1]/wcopy[2]
wcopy[-1] = wcopy[-2]*wcopy[-2]/wcopy[-3]
OutElement._wavetable = wcopy
OutElement._throughputtable = fcopy
return OutElement
def writefits(self, filename, clobber=True, trimzero=True,
precision=None, hkeys=None):
"""Write the bandpass to a FITS file.
filename: name of file to write to
clobber=True: Will clobber existing file by default
trimzero=True: Will trim zero-flux elements from both ends
by default
precision: Will write in native precision by default; can be
set to "single" or "double"
hkeys: Optional dictionary of {keyword:(value,comment)}
to be added to primary FITS header
"""
if precision is None:
precision=self.throughput.dtype.char
_precision=precision.lower()[0]
pcodes={'d':'D','s':'E','f':'E'}
if clobber:
try:
os.remove(filename)
except OSError:
pass
wave=self.wave
thru=self.throughput
#Add a check for single/double precision clash, so
#that if written out in single precision, the wavelength table
#will still be sorted with no duplicates
#The value of epsilon is taken from the Synphot FAQ.
if wave.dtype == N.float64 and _precision == 's':
idx=N.where(abs(wave[1:]-wave[:-1]) > syn_epsilon)
else:
idx=N.where(wave) #=> idx=[:]
wave=wave[idx]
thru=thru[idx]
first,last=0,len(thru)
if trimzero:
#Keep one zero at each end
nz = thru.nonzero()[0]
try:
first=max(nz[0]-1,first)
last=min(nz[-1]+2,last)
except IndexError:
pass
#Construct the columns and HDUlist
cw = pyfits.Column(name='WAVELENGTH',
array=wave[first:last],
unit=self.waveunits.name,
format=pcodes[_precision])
cf = pyfits.Column(name='THROUGHPUT',
array=thru[first:last],
unit=' ',
format=pcodes[_precision])
#Make the primary header
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
#User-provided keys are written to the primary header;
#so are filename and origin
bkeys = dict(filename=(os.path.basename(filename),'name of file'),
origin=('pysynphot',
'Version (%s, %s)'%(__version__,__svn_version__)))
#User-values if present may override default values
if hkeys is not None:
bkeys.update(hkeys)
#Now update the primary header
for key,val in bkeys.items():
hdu.header.update(key, *val)
#Make the extension HDU
cols = pyfits.ColDefs([cw, cf])
hdu = pyfits.new_table(cols)
#There are also some keys to be written to the extension
#header
bkeys=dict(expr =(str(self),'pysyn expression'),
tdisp1=('G15.7',),
tdisp2=('G15.7',)
)
try:
bkeys['grftable']=(os.path.basename(self.obsmode.gtname),
'graph table used')
bkeys['cmptable']=(os.path.basename(self.obsmode.ctname),
'component table used')
except AttributeError:
pass #Not all bandpasses have these
for key,val in bkeys.items():
hdu.header.update(key, *val)
#Add the extension to the list, and write to file.
hdulist.append(hdu)
hdulist.writeto(filename)
def resample(self, resampledWaveTab):
'''Interpolate throughput given a wavelength array that is
monotonically increasing and the TabularSpectralElement object.'''
##Check whether the input wavetab is in descending order
if resampledWaveTab[0]<resampledWaveTab[-1]:
newwave=resampledWaveTab
newasc = True
else:
newwave=resampledWaveTab[::-1]
newasc = False
## Use numpy interpolation function
if self._wavetable[0]<self._wavetable[-1]:
oldasc = True
ans = N.interp(newwave,self._wavetable,
self._throughputtable)
else:
oldasc = False
rev = N.interp(newwave,self._wavetable[::-1],
self._throughputtable[::-1])
ans = rev[::-1]
## If the new and old waveset don't have the same parity,
## the answer has to be flipped again
if (newasc != oldasc):
ans=ans[::-1]
# Finally, make the new object.
# NB: these manipulations were done using the internal
#tables in Angstrom, so those are the units
#that must be fed to the constructor.
resampled=ArraySpectralElement(wave=resampledWaveTab.copy(),
waveunits = 'angstroms',
throughput = ans.copy())
#Use the convert method to set the units desired by the user.
resampled.convert(self.waveunits)
return resampled
def unitResponse(self):
"""Is this correct if waveunits != Angstrom?"""
wave = self.GetWaveSet()
thru = self(wave)
return 1.0 / self.trapezoidIntegration(wave,thru)
def GetWaveSet(self):
"Return the waveset in the requested units."
wave = units.Angstrom().Convert(self._wavetable, self.waveunits.name)
return wave
def GetThroughput(self):
"""Return the throughput for the internal wavetable."""
## NB: Throughput never changes units no matter what the
## wavelength does. There is an implicit assumption here that
## the units of the input waveset to the __call__ are always
## Angstroms.
self.convert('angstroms')
return self.__call__(self.wave)
wave = property(GetWaveSet, doc='Waveset for bandpass')
throughput = property(GetThroughput, doc='Throughput for bandpass')
def fwhm(self):
raise NotImplementedError("#139: Implement calcband functionality")
class CompositeSpectralElement(SpectralElement):
'''CompositeSpectralElement Class, which knows how to calculate
its throughput by delegating the calculating the calculating to
its components.
'''
def __init__(self, component1, component2):
if (not isinstance(component1, SpectralElement) or
not isinstance(component2, SpectralElement)):
raise TypeError("Arguments must be SpectralElements")
self.component1 = component1
self.component2 = component2
self.isAnalytic = component1.isAnalytic and component2.isAnalytic
if component1.waveunits.name == component2.waveunits.name:
self.waveunits = component1.waveunits
else:
msg="Components have different waveunits (%s and %s)"%(component1.waveunits,component2.waveunits)
raise NotImplementedError(msg)
self.throughputunits = None
self.name="(%s * %s)"%(str(self.component1),str(self.component2))
self.warnings={}
self.warnings.update(component1.warnings)
self.warnings.update(component2.warnings)
def __call__(self, wavelength):
'''This is where the throughput calculation is delegated.
'''
return self.component1(wavelength) * self.component2(wavelength)
def __str__(self):
return self.name
def complist(self):
ans=[]
for comp in (self.component1, self.component2):
try:
ans.extend(comp.complist())
except AttributeError:
ans.append(comp)
return ans
def GetWaveSet(self):
'''This method returns a wavelength set appropriate for a composite
object by forming the union of the wavelengths of the components.
'''
wave1 = self.component1.GetWaveSet()
wave2 = self.component2.GetWaveSet()
return MergeWaveSets(wave1, wave2)
wave = property(GetWaveSet,doc="wave for CompositeSpectralElement")
class UniformTransmission(SpectralElement):
'''bandpass=UniformTransmission(dimensionless throughput)
@todo: Need to add a GetWaveSet method (or just return None).
'''
def __init__(self, value, waveunits='angstrom'):
self.waveunits = units.Units(waveunits)
self.value = value
self.name=str(self)
self.isAnalytic=True
self.warnings={}
#The ._wavetable is used only by the .writefits() method at this time
#It is not for general use.
self._wavetable = N.array([default_waveset[0],default_waveset[-1]])
def __str__(self):
return "%g"%self.value
def GetWaveSet(self):
return None
## This produced 15 test failures in cos_etc_test.
## def GetWaveSet(self):
## global default_waveset
## return N.array([default_waveset[0],default_waveset[-1]])
##
## wave = property(GetWaveSet,doc="wave for UniformTransmission")
def __call__(self, wavelength):
'''__call__ returns the constant value as an array, given a
wavelength array as argument.
'''
return 0.0 * wavelength + self.value
class TabularSpectralElement(SpectralElement):
"""bandpass = FileBandpass(FITS or ASCII filename, thrucol= name of
column containing throughput values (for FITS tables only)
"""
def __init__(self, fileName=None, thrucol='throughput'):
'''__init__ takes a character string argument that contains the name
of the file with the spectral element table.
'''
self.isAnalytic=False
self.warnings={}
if fileName:
if fileName.endswith('.fits') or fileName.endswith('.fit'):
self._readFITS(fileName, thrucol)
else:
self._readASCII(fileName)
self.name = fileName
else:
self.name = None
self._wavetable = None
self._throughputtable = None
self.waveunits = None
self.throughputunits = None
def _reverse_wave(self):
self._wavetable = self._wavetable[::-1]
def __str__(self):
return str(self.name)
def ToInternal(self):
'''Convert wavelengths to the internal representation of angstroms..
'''
self.validate_units()
savewunits = self.waveunits
angwave = self.waveunits.Convert(self._wavetable, 'angstrom')
self._wavetable = angwave.copy()
self.waveunits = savewunits
def _readASCII(self,filename):
""" Ascii files have no headers. Following synphot, this
routine will assume the first column is wavelength in Angstroms,
and the second column is throughput (dimensionless)."""
self.waveunits = units.Units('angstrom')
self.throughputunits = 'none'
wlist,tlist = self._columnsFromASCII(filename)
self._wavetable=N.array(wlist,dtype=N.float64)
self._throughputtable=N.array(tlist,dtype=N.float64)
def _readFITS(self,filename,thrucol='throughput'):
fs = pyfits.open(filename)
self._wavetable = fs[1].data.field('wavelength')
self._throughputtable = fs[1].data.field(thrucol)
self.waveunits = units.Units(fs[1].header['tunit1'].lower())
self.throughputunits = 'none'
self.getHeaderKeywords(fs[1].header)
fs.close()
def getHeaderKeywords(self, header):
''' This is a placeholder for subclasses to get header keywords without
having to reopen the file again.
'''
pass
class ArraySpectralElement(TabularSpectralElement):
""" spec = ArraySpectrum(numpy array containing wavelength table,
numpy array containing throughput table, waveunits,
name=human-readable nickname for bandpass.
"""
def __init__(self, wave=None, throughput=None,
waveunits='angstrom',
name='UnnamedArrayBandpass'):
"""Create a spectrum from arrays.
@param wave: Wavelength array
@param throughput: Throughput array
@type wave,throughput: Numpy array with numerical data
@param waveunits: Units of wave
@type waveunits: L{units.WaveUnits} or subclass
@param name: Description of this spectral element
@type name: string
"""
if len(wave)!=len(throughput):
raise ValueError("wave and throughput arrays must be of equal length")
self._wavetable=wave
self._throughputtable=throughput
self.waveunits=units.Units(waveunits)
self.name=name
self.isAnalytic=False
self.warnings={}
self.validate_units() #must do before validate_fluxtable because it tests against unit type
self.validate_wavetable() #must do before ToInternal in case of descending
self.ToInternal()
class FileSpectralElement(TabularSpectralElement):
"""spec = FileSpectrum(filename (FITS or ASCII),
throughputname=column name containing throughput (for FITS tables only),
keepneg=True to override the default behavior of setting negative
throughput values to zero)"""
def __init__(self, filename, thrucol=None):
"""Create a bandpass from a file.
@param filename: FITS or ASCII file containing the bandpass
@type filename: string
@param thrucol: Column name specifying the throughput (FITS only)
@type thrucol: string
"""
self.name = locations.irafconvert(filename)
self._readThroughputFile(self.name, thrucol)
self.validate_units()
self.validate_wavetable()
self.ToInternal()
self.isAnalytic=False
self.warnings={}
def _readThroughputFile(self, filename, throughputname):
if filename.endswith('.fits') or filename.endswith('.fit'):
self._readFITS(filename, throughputname)
else:
self._readASCII(filename)
def _readFITS(self, filename, throughputname):
fs = pyfits.open(filename)
self._wavetable = fs[1].data.field('wavelength')
if throughputname == None:
throughputname = 'throughput'
self._throughputtable = fs[1].data.field(throughputname)
self.waveunits = units.Units(fs[1].header['tunit1'].lower())
#Retain the header information as a convenience for the user.
#If duplicate keywords exist, the value in the extension
#header will override that in the primary.
self.fheader = dict(fs[0].header)
self.fheader.update(dict(fs[1].header))
fs.close()
def _readASCII(self, filename):
""" Ascii files have no headers. Following synphot, this
routine will assume the first column is wavelength in Angstroms,
and the second column is throughput in Flam."""
self.waveunits = units.Units('angstrom')
wlist,flist = self._columnsFromASCII(filename)
self._wavetable=N.array(wlist,dtype=N.float64)
self._throughputtable=N.array(flist,dtype=N.float64)
#We don't support headers from asii files
self.fheader = dict()
class InterpolatedSpectralElement(SpectralElement):
'''The InterpolatedSpectralElement class handles spectral elements
that are interpolated from columns stored in FITS tables
'''
def __init__(self, fileName, wavelength):
''' The file name contains a suffix with a column name specification
in between square brackets, such as [fr388n#]. The wavelength
parameter (poorly named -- it is not always a wavelength) is used to
interpolate between two columns in the file.
'''
xre=re.search('\[(?P<col>.*?)\]',fileName)
self.name = os.path.expandvars(fileName[0:(xre.start())])
colSpec = xre.group('col')
self.analytic=False
self.warnings={}
self.interpval = wavelength
fs = pyfits.open(self.name)
#The wavelength table will have to be adjusted before use
wave0 = fs[1].data.field('wavelength')
#Determine the columns that bracket the desired value
colNames = fs[1].data.names[2:]
colWaves = []
for columnName in colNames:
try:
colWaves.append(float(columnName.split('#')[1]))
except IndexError,e:
#make sure this is the case we know about
if columnName.lower().startswith('error'):
pass
else:
raise e
waves = MA.array(colWaves)
greater = MA.masked_less(waves, wavelength)
less = MA.masked_greater(waves, wavelength)
upper = MA.minimum(greater)
lower = MA.maximum(less)
if '--' in (str(upper),str(lower)):
raise NotImplementedError("%g outside of range in %s; extrapolation not yet supported"%(wavelength,fileName))
#Construct the column names
lcol = colNames[MA.argmax(less)]
ucol = colNames[MA.argmin(greater)]
#Extract the data from those columns
lthr = fs[1].data.field(lcol)
uthr = fs[1].data.field(ucol)
if upper != lower:
#Adjust the wavelength table to bracket the range
lwave = wave0 + (lower-self.interpval)
uwave = wave0 + (upper-self.interpval)
#Interpolate the columns at those ranges
lthr = N.interp(lwave, wave0, fs[1].data.field(lcol))
uthr = N.interp(uwave, wave0, fs[1].data.field(ucol))
#Then interpolate between the two columns
w = (wavelength - lower) / (upper - lower)
self._throughputtable = uthr * w + lthr * (1.0 - w)
else:
#Interpolate the matching column to the correct wave range
uwave = wave0 + (upper-self.interpval)
uthr = N.interp(uwave, wave0, fs[1].data.field(ucol))
self._throughputtable = uthr
self._wavetable = wave0
self.waveunits = units.Units(fs[1].header['tunit1'].lower())
self.throughputunits = 'none'
fs.close()
def __str__(self):
return "%s#%g"%(self.name,self.interpval)
class ThermalSpectralElement(TabularSpectralElement):
'''The ThermalSpectralElement class handles spectral elements
that have associated thermal properties read from a FITS table.
ThermalSpectralElements differ from regular SpectralElements in
that they carry thermal parameters such as temperature and beam
filling factor, but otherwise they operate just as regular
SpectralElements. They dont know how to apply themselves to an
existing beam, in the sense that their emissivities should be
handled explictly, outside the objects themselves.
'''
def __init__(self, fileName):
TabularSpectralElement.__init__(self, fileName=fileName, thrucol='emissivity')
self.warnings={}
def getHeaderKeywords(self, header):
''' Overrides base class in order to get thermal keywords.
'''
self.temperature = header['DEFT']
self.beamFillFactor = header['BEAMFILL']
class Box(SpectralElement):
"""bandpass = Box(central wavelength, width) - both in Angstroms"""
def __init__(self, center, width):
''' Both center and width are assumed to be in Angstrom
units, according to the synphot definition.
'''
self.waveunits=units.Units('angstrom') #per docstring: for now
lower = center - width / 2.0
upper = center + width / 2.0
step = 0.05 # fixed step for now (in A)
self.name='Box at %g (%g wide)'%(center,width)
nwaves = int(((upper - lower) / step)) + 2
self._wavetable = N.zeros(shape=[nwaves,], dtype=N.float64)
for i in range(nwaves):
self._wavetable[i] = lower + step * i
self._wavetable[0] = self._wavetable[1] - step
self._wavetable[-1] = self._wavetable[-2] + step
self._throughputtable = N.ones(shape=self._wavetable.shape, \
dtype=N.float64)
self._throughputtable[0] = 0.0
self._throughputtable[-1] = 0.0
self.isAnalytic=False
self.warnings={}
Vega = FileSourceSpectrum(locations.VegaFile)
|
martindurant/starclassifier
|
ui/pysynphot/spectrum.py
|
Python
|
mit
| 62,915
|
[
"Gaussian"
] |
fac9c08c32a9620f98986e4e9e8b67934ffb55784e3d9bf23dde57d41013860b
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
import re
#-------------------------------------------------------------------------
#
# HasNameOf
#
#-------------------------------------------------------------------------
class RegExpName(Rule):
"""Rule that checks for full or partial name matches"""
labels = [_('Expression:')]
name = _('People matching the <regex_name>')
description = _("Matches people's names with a specified regular expression")
category = _('General filters')
def __init__(self, list):
Rule.__init__(self, list)
try:
self.match = re.compile(list[0],re.I|re.U|re.L)
except re.error:
#indicate error by matching everyone
self.match = re.compile('')
def apply(self,db,person):
for name in [person.get_primary_name()] + person.get_alternate_names():
for field in [name.first_name, name.get_surname(), name.suffix,
name.title, name.nick, name.famnick, name.call]:
if self.match.match(field):
return True
else:
return False
|
Forage/Gramps
|
gramps/gen/filters/rules/person/_regexpname.py
|
Python
|
gpl-2.0
| 2,430
|
[
"Brian"
] |
9b812e294f1930c8506ebb189920dacc04c2181e62f6118856bdb1f07c8b69ed
|
#!/usr/bin/env python
'''Analysis mdtraj test'''
import os
import mdtraj as md
import numpy as np
import matplotlib
import scipy.cluster.hierarchy
from pylab import *
from math import pi
from sklearn.decomposition import PCA
from itertools import combinations
import mdtraj.testing
import itertools
#loading and printing trajectories with coordinate files
#--------------------------------------------------------
traj = md.load('traj.dcd', top='coor.psf')
print traj
#printng different info about the protein such as # of atoms, residues etc
#--------------------------------------------------------------------------
print 'How many atoms? %s' % traj.n_atoms
print 'How many residues? %s' % traj.n_residues
#slicing the trajectory file into samller peices, saving it back to the disk as an hd5 format
#----------------------------------------------------------------------------------------------
traj[0:2].save_dcd('first-two-frames.dcd')
traj[::].save('traj.h5')
#we can load hd5 files and use them for analysis
#------------------------------------------------
traj1 = md.load(tarj.hd5)
#selecting certain part of the protien; in this case trajectory with only alpha carbons present
#----------------------------------------------------------------------------------------------
atoms_to_keep = [a.index for a in traj.topology.atoms if a.name == 'CA']
traj.atom_slice(atoms_to_keep)
traj.save('CA-only.h5')
#Root-mean-square deviation (RMSD), comparing target with the reference protein
#-------------------------------------------------------------------------------
RMSD = md.rmsd(traj,ref_prot[0:10]) #10 frames to be compared
#Calculating the average distance between two atoms
#---------------------------------------------------
traj = md.load('traj.h5')
av_dis = np.mean(np.sqrt(np.sum((traj.xyz[:, 'X', :] - traj.xyz[:, 'Y', :])**2, axis=1)))#Change X and Y to atom of interest they should be int
print "Average distance betwen atom Y and Y: %f nm" % np.mean(av_dis)
#Computing all pairwise rmsds between conformations
#---------------------------------------------------
distances = np.empty((traj.n_frames, traj.n_frames))
for i in range(traj.n_frames):
distances[i] = md.rmsd(traj, traj, i)
print 'Max pairwise rmsd: %f nm' % np.max(distances)
#Plotting the cluster
#---------------------
linkage = scipy.cluster.hierarchy.ward(distances)
figure()
title('RMSD Ward hierarchical clustering')
graph = scipy.cluster.hierarchy.dendrogram(linkage, no_labels=True, count_sort='descendent')
savefig('cluster.gif')
show ()
#Plotting Ramachandra plot
#--------------------------
atoms, bonds = traj1.topology.to_dataframe()
psi_indices, phi_indices = [6, 8, 14, 16], [4, 6, 8, 14]#Check the numbers here, taken from the tutorial
#directly we need to look for some common way of
#calculation for all kinds of protein if this for
#a specific case
angles = md.geometry.compute_dihedrals(traj1, [phi_indices, psi_indices])
figure()
title('Test Dihedral Map For Ramachandra Plot')
plot=scatter(angles[:, 0], angles[:, 1], marker='x', c=traj1.time)
cbar = colorbar()
cbar.set_label('Time [ps]')
xlabel(r'$\Phi$ Angle [radians]')
xlim(-pi, pi)
ylabel(r'$\Psi$ Angle [radians]')
ylim(-pi, pi)
savefig('ramchplot.gif')
show()
#Principal component analysis and plotting the data, should check whether the componetes and frames are good any protein simulation
#-----------------------------------------------------------------------------------------------------------------------------------
pca = PCA(n_components=2)
traj.superpose(traj1, 0)
reduced_cartesian = pca.fit_transform(traj.xyz.reshape(traj.n_frames, traj.n_atoms * 3))
print reduced_cartesian.shape
#------Plotting the data -------
figure()
scatter(reduced_cartesian[:, 0], reduced_cartesian[:,1], marker='x', c=traj.time)
xlabel('PC1')
ylabel('PC2')
title('(Principal componener analysis) Cartesian coordinate PCA')
cbar = colorbar()
cbar.set_label('Time [ps]')
savefig('pca1.gif')
show()
|
mkuiper/MD_workflow_py
|
Analysis/analysis_mdtraj.py
|
Python
|
bsd-3-clause
| 4,143
|
[
"MDTraj"
] |
0a6dd424cab45455044be1d04bea1dd9b6daead8236e5b462a7fbe7b33584601
|
# coding: utf-8
"""Python bindings for 0MQ."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
import atexit
import weakref
from zmq.backend import Context as ContextBase
from . import constants
from .attrsettr import AttributeSetter
from .constants import ENOTSUP, ctx_opt_names
from .socket import Socket
from zmq.error import ZMQError
from zmq.utils.interop import cast_int_addr
class Context(ContextBase, AttributeSetter):
"""Create a zmq Context
A zmq Context creates sockets via its ``ctx.socket`` method.
"""
sockopts = None
_instance = None
_shadow = False
_exiting = False
def __init__(self, io_threads=1, **kwargs):
super(Context, self).__init__(io_threads=io_threads, **kwargs)
if kwargs.get('shadow', False):
self._shadow = True
else:
self._shadow = False
self.sockopts = {}
self._exiting = False
if not self._shadow:
ctx_ref = weakref.ref(self)
def _notify_atexit():
ctx = ctx_ref()
if ctx is not None:
ctx._exiting = True
atexit.register(_notify_atexit)
def __del__(self):
"""deleting a Context should terminate it, without trying non-threadsafe destroy"""
if not self._shadow and not self._exiting:
self.term()
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.term()
@classmethod
def shadow(cls, address):
"""Shadow an existing libzmq context
address is the integer address of the libzmq context
or an FFI pointer to it.
.. versionadded:: 14.1
"""
address = cast_int_addr(address)
return cls(shadow=address)
@classmethod
def shadow_pyczmq(cls, ctx):
"""Shadow an existing pyczmq context
ctx is the FFI `zctx_t *` pointer
.. versionadded:: 14.1
"""
from pyczmq import zctx
underlying = zctx.underlying(ctx)
address = cast_int_addr(underlying)
return cls(shadow=address)
# static method copied from tornado IOLoop.instance
@classmethod
def instance(cls, io_threads=1):
"""Returns a global Context instance.
Most single-threaded applications have a single, global Context.
Use this method instead of passing around Context instances
throughout your code.
A common pattern for classes that depend on Contexts is to use
a default argument to enable programs with multiple Contexts
but not require the argument for simpler applications:
class MyClass(object):
def __init__(self, context=None):
self.context = context or Context.instance()
"""
if cls._instance is None or cls._instance.closed:
cls._instance = cls(io_threads=io_threads)
return cls._instance
#-------------------------------------------------------------------------
# Hooks for ctxopt completion
#-------------------------------------------------------------------------
def __dir__(self):
keys = dir(self.__class__)
for collection in (
ctx_opt_names,
):
keys.extend(collection)
return keys
#-------------------------------------------------------------------------
# Creating Sockets
#-------------------------------------------------------------------------
@property
def _socket_class(self):
return Socket
def socket(self, socket_type):
"""Create a Socket associated with this Context.
Parameters
----------
socket_type : int
The socket type, which can be any of the 0MQ socket types:
REQ, REP, PUB, SUB, PAIR, DEALER, ROUTER, PULL, PUSH, etc.
"""
if self.closed:
raise ZMQError(ENOTSUP)
s = self._socket_class(self, socket_type)
for opt, value in self.sockopts.items():
try:
s.setsockopt(opt, value)
except ZMQError:
# ignore ZMQErrors, which are likely for socket options
# that do not apply to a particular socket type, e.g.
# SUBSCRIBE for non-SUB sockets.
pass
return s
def setsockopt(self, opt, value):
"""set default socket options for new sockets created by this Context
.. versionadded:: 13.0
"""
self.sockopts[opt] = value
def getsockopt(self, opt):
"""get default socket options for new sockets created by this Context
.. versionadded:: 13.0
"""
return self.sockopts[opt]
def _set_attr_opt(self, name, opt, value):
"""set default sockopts as attributes"""
if name in constants.ctx_opt_names:
return self.set(opt, value)
else:
self.sockopts[opt] = value
def _get_attr_opt(self, name, opt):
"""get default sockopts as attributes"""
if name in constants.ctx_opt_names:
return self.get(opt)
else:
if opt not in self.sockopts:
raise AttributeError(name)
else:
return self.sockopts[opt]
def __delattr__(self, key):
"""delete default sockopts as attributes"""
key = key.upper()
try:
opt = getattr(constants, key)
except AttributeError:
raise AttributeError("no such socket option: %s" % key)
else:
if opt not in self.sockopts:
raise AttributeError(key)
else:
del self.sockopts[opt]
__all__ = ['Context']
|
ellisonbg/pyzmq
|
zmq/sugar/context.py
|
Python
|
lgpl-3.0
| 6,210
|
[
"Brian"
] |
5f7f36cc477aab501b6eeb1ac2fdd8b1b88415dc13390c9ccd60856a5429b847
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.xtb_reader'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import StringIO
import unittest
from grit import xtb_reader
from grit import clique
from grit import grd_reader
from grit import tclib
from grit import util
class XtbReaderUnittest(unittest.TestCase):
def testParsing(self):
xtb_file = StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE translationbundle>
<translationbundle lang="fr">
<translation id="5282608565720904145">Bingo.</translation>
<translation id="2955977306445326147">Bongo longo.</translation>
<translation id="238824332917605038">Hullo</translation>
<translation id="6629135689895381486"><ph name="PROBLEM_REPORT"/> peut <ph name="START_LINK"/>utilisation excessive de majuscules<ph name="END_LINK"/>.</translation>
<translation id="7729135689895381486">Hello
this is another line
and another
and another after a blank line.</translation>
</translationbundle>''')
messages = []
def Callback(id, structure):
messages.append((id, structure))
xtb_reader.Parse(xtb_file, Callback)
self.failUnless(len(messages[0][1]) == 1)
self.failUnless(messages[3][1][0]) # PROBLEM_REPORT placeholder
self.failUnless(messages[4][0] == '7729135689895381486')
self.failUnless(messages[4][1][7][1] == 'and another after a blank line.')
def testParsingIntoMessages(self):
grd = grd_reader.Parse(StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<messages>
<message name="ID_MEGA">Fantastic!</message>
<message name="ID_HELLO_USER">Hello <ph name="USERNAME">%s<ex>Joi</ex></ph></message>
</messages>'''), dir='.', flexible_root=True)
clique_mega = grd.children[0].GetCliques()[0]
msg_mega = clique_mega.GetMessage()
clique_hello_user = grd.children[1].GetCliques()[0]
msg_hello_user = clique_hello_user.GetMessage()
xtb_file = StringIO.StringIO('''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE translationbundle>
<translationbundle lang="is">
<translation id="%s">Meirihattar!</translation>
<translation id="%s">Saelir <ph name="USERNAME"/></translation>
</translationbundle>''' % (msg_mega.GetId(), msg_hello_user.GetId()))
xtb_reader.Parse(xtb_file, grd.UberClique().GenerateXtbParserCallback('is'))
self.failUnless(clique_mega.MessageForLanguage('is').GetRealContent() ==
'Meirihattar!')
self.failUnless(clique_hello_user.MessageForLanguage('is').GetRealContent() ==
'Saelir %s')
def testParseLargeFile(self):
def Callback(id, structure):
pass
xtb = file(util.PathFromRoot('grit/testdata/generated_resources_fr.xtb'))
xtb_reader.Parse(xtb, Callback)
xtb.close()
if __name__ == '__main__':
unittest.main()
|
JoKaWare/WTL-DUI
|
tools/grit/grit/xtb_reader_unittest.py
|
Python
|
bsd-3-clause
| 3,105
|
[
"xTB"
] |
aae23d4662bfc59a2541882bed6f0b25b46c0d78074b511fe3f6d21af5c8110b
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Craig J. Anderson
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Matt Keenan (matt.keenan@gmail.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Reports/Text Reports/Descendant Report.
"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
import copy
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.get_translation().gettext
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC, PARA_ALIGN_CENTER)
from gramps.gen.plug.menu import (NumberOption, PersonOption, BooleanOption,
EnumeratedListOption)
from gramps.gen.display.name import displayer as global_name_display
from gramps.gen.errors import ReportError
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.datehandler import get_date
from gramps.gen.sort import Sort
from gramps.gen.utils.db import (get_birth_or_fallback, get_death_or_fallback,
get_marriage_or_fallback, get_divorce_or_fallback)
#------------------------------------------------------------------------
#
# PrintSimple
# Simple numbering system
#
#------------------------------------------------------------------------
class PrintSimple():
def __init__(self, showdups):
self.showdups = showdups
self.num = {0:1}
def number(self, level):
if self.showdups:
# Just show original simple numbering
to_return = "%d." % level
else:
to_return = str(level)
if level > 1:
to_return += "-" + str(self.num[level-1])
to_return += "."
self.num[level] = 1
self.num[level-1] = self.num[level-1] + 1
return to_return
#------------------------------------------------------------------------
#
# PrintVlliers
# de_Villiers_Pama numbering system
#
#------------------------------------------------------------------------
class PrintVilliers():
def __init__(self):
self.pama = 'abcdefghijklmnopqrstuvwxyz'
self.num = {0:1}
def number(self, level):
to_return = self.pama[level-1]
if level > 1:
to_return += str(self.num[level-1])
to_return += "."
self.num[level] = 1
self.num[level-1] = self.num[level-1] + 1
return to_return
#------------------------------------------------------------------------
#
# class PrintMeurgey
# Meurgey_de_Tupigny numbering system
#
#------------------------------------------------------------------------
class PrintMeurgey():
def __init__(self):
self.childnum = [""]
def number(self, level):
if level == 1:
dash = ""
else:
dash = "-"
if len(self.childnum) < level:
self.childnum.append(1)
to_return = (ReportUtils.roman(level) + dash +
str(self.childnum[level-1]) + ".")
if level > 1:
self.childnum[level-1] += 1
return to_return
#------------------------------------------------------------------------
#
# Printinfo
#
#------------------------------------------------------------------------
class Printinfo():
"""
A base class used to help make the individual numbering system classes.
This class must first be initialized with set_class_vars
"""
def __init__(self, doc, database, numbering, showmarriage, showdivorce,\
name_display):
#classes
self._name_display = name_display
self.doc = doc
self.database = database
self.numbering = numbering
#variables
self.showmarriage = showmarriage
self.showdivorce = showdivorce
def __date_place(self,event):
if event:
date = get_date(event)
place_handle = event.get_place_handle()
if place_handle:
place = self.database.get_place_from_handle(
place_handle).get_title()
return("%(event_abbrev)s %(date)s - %(place)s" % {
'event_abbrev': event.type.get_abbreviation(),
'date' : date,
'place' : place,
})
else:
return("%(event_abbrev)s %(date)s" % {
'event_abbrev': event.type.get_abbreviation(),
'date' : date
})
return ""
def dump_string(self, person, family=None):
string = self.__date_place(
get_birth_or_fallback(self.database, person)
)
tmp = self.__date_place(get_death_or_fallback(self.database, person))
if string and tmp:
string += ", "
string += tmp
if string:
string = " (" + string + ")"
if family and self.showmarriage:
tmp = self.__date_place(get_marriage_or_fallback(self.database,
family))
if tmp:
string += ", " + tmp
if family and self.showdivorce:
tmp = self.__date_place(get_divorce_or_fallback(self.database,
family))
if tmp:
string += ", " + tmp
self.doc.write_text(string)
def print_person(self, level, person):
display_num = self.numbering.number(level)
self.doc.start_paragraph("DR-Level%d" % min(level, 32), display_num)
mark = ReportUtils.get_person_mark(self.database, person)
self.doc.write_text(self._name_display.display(person), mark)
self.dump_string(person)
self.doc.end_paragraph()
return display_num
def print_spouse(self, level, spouse_handle, family_handle):
#Currently print_spouses is the same for all numbering systems.
if spouse_handle:
spouse = self.database.get_person_from_handle(spouse_handle)
mark = ReportUtils.get_person_mark(self.database, spouse)
self.doc.start_paragraph("DR-Spouse%d" % min(level, 32))
name = self._name_display.display(spouse)
self.doc.write_text(_("sp. %(spouse)s") % {'spouse':name}, mark)
self.dump_string(spouse, family_handle)
self.doc.end_paragraph()
else:
self.doc.start_paragraph("DR-Spouse%d" % min(level, 32))
self.doc.write_text(_("sp. %(spouse)s") % {'spouse':'Unknown'})
self.doc.end_paragraph()
def print_reference(self, level, person, display_num):
#Person and their family have already been printed so
#print reference here
if person:
mark = ReportUtils.get_person_mark(self.database, person)
self.doc.start_paragraph("DR-Spouse%d" % min(level, 32))
name = self._name_display.display(person)
self.doc.write_text(_("sp. see %(reference)s : %(spouse)s") %
{'reference':display_num, 'spouse':name}, mark)
self.doc.end_paragraph()
#------------------------------------------------------------------------
#
# RecurseDown
#
#------------------------------------------------------------------------
class RecurseDown():
"""
A simple object to recurse from a person down through their descendants
The arguments are:
max_generations: The max number of generations
database: The database object
objPrint: A Printinfo derived class that prints person
information on the report
"""
def __init__(self, max_generations, database, objPrint, showdups):
self.max_generations = max_generations
self.database = database
self.objPrint = objPrint
self.showdups = showdups
self.person_printed = {}
def recurse(self, level, person, curdepth):
person_handle = person.get_handle()
display_num = self.objPrint.print_person(level, person)
if curdepth is None:
ref_str = display_num
else:
ref_str = curdepth + " " + display_num
if person_handle not in self.person_printed:
self.person_printed[person_handle] = ref_str
for family_handle in person.get_family_handle_list():
family = self.database.get_family_from_handle(family_handle)
spouse_handle = ReportUtils.find_spouse(person, family)
if not self.showdups and spouse_handle in self.person_printed:
# Just print a reference
spouse = self.database.get_person_from_handle(spouse_handle)
self.objPrint.print_reference(level, spouse,
self.person_printed[spouse_handle])
else:
self.objPrint.print_spouse(level, spouse_handle, family)
if spouse_handle:
spouse_num = _("%s sp." % (ref_str))
self.person_printed[spouse_handle] = spouse_num
if level >= self.max_generations:
continue
childlist = family.get_child_ref_list()[:]
for child_ref in childlist:
child = self.database.get_person_from_handle(child_ref.ref)
self.recurse(level+1, child, ref_str)
#------------------------------------------------------------------------
#
# DescendantReport
#
#------------------------------------------------------------------------
class DescendantReport(Report):
def __init__(self, database, options, user):
"""
Create the DescendantReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
gen - Maximum number of generations to include.
name_format - Preferred format to display names
dups - Whether to include duplicate descendant trees
"""
Report.__init__(self, database, options, user)
menu = options.menu
self.max_generations = menu.get_option_by_name('gen').get_value()
pid = menu.get_option_by_name('pid').get_value()
self.center_person = database.get_person_from_gramps_id(pid)
if (self.center_person == None) :
raise ReportError(_("Person %s is not in the Database") % pid )
sort = Sort(self.database)
#Initialize the Printinfo class
self._showdups = menu.get_option_by_name('dups').get_value()
numbering = menu.get_option_by_name('numbering').get_value()
if numbering == "Simple":
obj = PrintSimple(self._showdups)
elif numbering == "de Villiers/Pama":
obj = PrintVilliers()
elif numbering == "Meurgey de Tupigny":
obj = PrintMeurgey()
else:
raise AttributeError("no such numbering: '%s'" % self.numbering)
marrs = menu.get_option_by_name('marrs').get_value()
divs = menu.get_option_by_name('divs').get_value()
# Copy the global NameDisplay so that we don't change application defaults.
self._name_display = copy.deepcopy(global_name_display)
name_format = menu.get_option_by_name("name_format").get_value()
if name_format != 0:
self._name_display.set_default_format(name_format)
self.objPrint = Printinfo(self.doc, database, obj, marrs, divs,
self._name_display)
def write_report(self):
self.doc.start_paragraph("DR-Title")
name = self._name_display.display(self.center_person)
# feature request 2356: avoid genitive form
title = _("Descendants of %s") % name
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
recurse = RecurseDown(self.max_generations, self.database,
self.objPrint, self._showdups)
recurse.recurse(1, self.center_person, None)
#------------------------------------------------------------------------
#
# DescendantOptions
#
#------------------------------------------------------------------------
class DescendantOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
category_name = _("Report Options")
pid = PersonOption(_("Center Person"))
pid.set_help(_("The center person for the report"))
menu.add_option(category_name, "pid", pid)
# We must figure out the value of the first option before we can
# create the EnumeratedListOption
fmt_list = global_name_display.get_name_format()
name_format = EnumeratedListOption(_("Name format"), 0)
name_format.add_item(0, _("Default"))
for num, name, fmt_str, act in fmt_list:
name_format.add_item(num, name)
name_format.set_help(_("Select the format to display names"))
menu.add_option(category_name, "name_format", name_format)
numbering = EnumeratedListOption(_("Numbering system"), "Simple")
numbering.set_items([
("Simple", _("Simple numbering")),
("de Villiers/Pama", _("de Villiers/Pama numbering")),
("Meurgey de Tupigny", _("Meurgey de Tupigny numbering"))])
numbering.set_help(_("The numbering system to be used"))
menu.add_option(category_name, "numbering", numbering)
gen = NumberOption(_("Generations"), 10, 1, 15)
gen.set_help(_("The number of generations to include in the report"))
menu.add_option(category_name, "gen", gen)
marrs = BooleanOption(_('Show marriage info'), False)
marrs.set_help(_("Whether to show marriage information in the report."))
menu.add_option(category_name, "marrs", marrs)
divs = BooleanOption(_('Show divorce info'), False)
divs.set_help(_("Whether to show divorce information in the report."))
menu.add_option(category_name, "divs", divs)
dups = BooleanOption(_('Show duplicate trees'), True)
dups.set_help(_("Whether to show duplicate family trees in the report."))
menu.add_option(category_name, "dups", dups)
def make_default_style(self, default_style):
"""Make the default output style for the Descendant Report."""
f = FontStyle()
f.set_size(12)
f.set_type_face(FONT_SANS_SERIF)
f.set_bold(1)
p = ParagraphStyle()
p.set_header_level(1)
p.set_bottom_border(1)
p.set_top_margin(ReportUtils.pt2cm(3))
p.set_bottom_margin(ReportUtils.pt2cm(3))
p.set_font(f)
p.set_alignment(PARA_ALIGN_CENTER)
p.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("DR-Title", p)
f = FontStyle()
f.set_size(10)
for i in range(1, 33):
p = ParagraphStyle()
p.set_font(f)
p.set_top_margin(ReportUtils.pt2cm(f.get_size()*0.125))
p.set_bottom_margin(ReportUtils.pt2cm(f.get_size()*0.125))
p.set_first_indent(-0.5)
p.set_left_margin(min(10.0, float(i-0.5)))
p.set_description(_("The style used for the "
"level %d display.") % i)
default_style.add_paragraph_style("DR-Level%d" % min(i, 32), p)
p = ParagraphStyle()
p.set_font(f)
p.set_top_margin(ReportUtils.pt2cm(f.get_size()*0.125))
p.set_bottom_margin(ReportUtils.pt2cm(f.get_size()*0.125))
p.set_left_margin(min(10.0, float(i-0.5)))
p.set_description(_("The style used for the "
"spouse level %d display.") % i)
default_style.add_paragraph_style("DR-Spouse%d" % min(i, 32), p)
|
Forage/Gramps
|
gramps/plugins/textreport/descendreport.py
|
Python
|
gpl-2.0
| 17,656
|
[
"Brian"
] |
a998f6fb6a30b94f3a5a23f60cbb76c7a2a3a5ef46c4226f9b67b8c020d39a7b
|
#!/usr/bin/python
from contacts.models.interactions import Message, PhoneCall, Note
from contacts.models.visit import Visit, ScheduledPhoneCall
from contacts.models.misc import Connection, Practitioner, EventLog
from contacts.models.contact import Contact, StatusChange, get_contact_model
|
I-TECH-UW/mwachx
|
contacts/models/__init__.py
|
Python
|
apache-2.0
| 290
|
[
"VisIt"
] |
a8fa88d584d6af429fb4458920921f7fb93b7367825e220b965cfca510b5f622
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 18 14:57:46 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
from statsmodels.genmod.generalized_estimating_equations import GEE, GEEMargins
from statsmodels.genmod.families import Gaussian, Binomial, Poisson
from statsmodels.genmod.dependence_structures import (Exchangeable,
Independence, GlobalOddsRatio, Autoregressive, Nested)
from statsmodels.genmod.tests import gee_gaussian_simulation_check as gees
da,va = gees.gen_gendat_ar0(0.6)()
ga = Gaussian()
lhs = np.array([[0., 1, 1, 0, 0],])
rhs = np.r_[0.,]
example = []
if 'constraint' in example:
md = GEE(da.endog, da.exog, da.group, da.time, ga, va,
constraint=(lhs, rhs))
mdf = md.fit()
print(mdf.summary())
md2 = GEE(da.endog, da.exog, da.group, da.time, ga, va,
constraint=None)
mdf2 = md2.fit()
print('\n\n')
print(mdf2.summary())
mdf2.use_t = False
mdf2.df_resid = np.diff(mdf2.model.exog.shape)
tt2 = mdf2.t_test(np.eye(len(mdf2.params)))
# need master to get wald_test
#print mdf2.wald_test(np.eye(len(mdf2.params))[1:])
'''
>>> mdf2.predict(da.exog.mean(0))
Traceback (most recent call last):
File "<pyshell#11>", line 1, in <module>
mdf2.predict(da.exog.mean(0))
File "e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2_py27\statsmodels\statsmodels\base\model.py", line 963, in predict
return self.model.predict(self.params, exog, *args, **kwargs)
File "e:\josef\eclipsegworkspace\statsmodels-git\statsmodels-all-new2_py27\statsmodels\statsmodels\genmod\generalized_estimating_equations.py", line 621, in predict
fitted = offset + np.dot(exog, params)
TypeError: unsupported operand type(s) for +: 'NoneType' and 'numpy.float64'
'''
mdf2.predict(da.exog.mean(0), offset=0)
# -0.10867809062890971
marg2 = GEEMargins(mdf2, ())
print(marg2.summary())
mdf_nc = md2.fit(cov_type='naive')
mdf_bc = md2.fit(cov_type='bias_reduced')
mdf_nc.use_t = False
mdf_nc.df_resid = np.diff(mdf2.model.exog.shape)
mdf_bc.use_t = False
mdf_bc.df_resid = np.diff(mdf2.model.exog.shape)
tt_nc = mdf_nc.t_test(np.eye(len(mdf2.params)))
tt_bc = mdf_bc.t_test(np.eye(len(mdf2.params)))
print('\nttest robust')
print(tt2)
print('\nttest naive')
print(tt_nc)
print('\nttest bias corrected')
print(tt_bc)
print("\nbse after fit option ")
bse = np.column_stack((mdf2.bse, mdf2.bse, mdf_nc.bse, mdf_bc.bse))
print(bse)
print("\nimplemented `standard_errors`")
bse2 = np.column_stack((mdf2.bse, mdf2.standard_errors(),
mdf2.standard_errors(covariance_type='naive'),
mdf2.standard_errors(covariance_type='bias_reduced')))
print(bse2)
print("bse and `standard_errors` agree:", np.allclose(bse, bse2))
print("\nimplied standard errors in t_test")
bse1 = np.column_stack((mdf2.bse, tt2.sd, tt_nc.sd, tt_bc.sd))
print(bse1)
print("t_test uses correct cov_params:", np.allclose(bse1, bse2))
|
rgommers/statsmodels
|
statsmodels/examples/try_gee.py
|
Python
|
bsd-3-clause
| 2,980
|
[
"Gaussian"
] |
c3e24679684c5acce6f61dfabfa1b08bd5af9c93dc9dd7ec0328c9fa8c0a7cc5
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
try:
question = orm.SurveyQuestion.objects.get(label='Wait Time')
except orm.SurveyQuestion.DoesNotExist:
pass
else:
question.last_negative = True
question.save()
def backwards(self, orm):
"Write your backwards methods here."
try:
question = orm.SurveyQuestion.objects.get(label='Wait Time')
except orm.SurveyQuestion.DoesNotExist:
pass
else:
question.last_negative = False
question.save()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'unique_together': "[('clinic', 'serial')]", 'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'satisfied': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_completed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'survey_started': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'welcome_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'survey.displaylabel': {
'Meta': {'object_name': 'DisplayLabel'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'flow_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'survey.surveyquestion': {
'Meta': {'unique_together': "[('survey', 'label')]", 'object_name': 'SurveyQuestion'},
'categories': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'display_label': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.DisplayLabel']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'last_negative': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'question_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.surveyquestionresponse': {
'Meta': {'unique_together': "[('visit', 'question')]", 'object_name': 'SurveyQuestionResponse'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'display_on_dashboard': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.SurveyQuestion']"}),
'response': ('django.db.models.fields.TextField', [], {}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Visit']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey']
symmetrical = True
|
myvoice-nigeria/myvoice
|
myvoice/survey/migrations/0012_wait_time.py
|
Python
|
bsd-2-clause
| 13,101
|
[
"VisIt"
] |
df96056d032dd8a1019bf55c1d38337934da9302a28ee00baa557d9b4df7063d
|
# -*- coding: utf-8 -*-
import os
import swc2vtk
import swcfilelist
# input_dir = '/home/nebula/git/LAL-VPCmapping/converted_swc'
input_dir = './swc'
output_dir = '/home/nebula/work/paraview/standardbrain20170131/'
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
filelist = swcfilelist.filelist_lalvpc
for i, filename in enumerate(filelist):
vtkgen = swc2vtk.VtkGenerator()
vtkgen.add_swc(os.path.join(input_dir, filename + '.swc'))
vtkgen.write_vtk(os.path.join(output_dir, filename + '.vtk'), radius_data=True, normalize_diam=True)
for i, filename in enumerate(filelist):
vtkgen = swc2vtk.VtkGenerator()
vtkgen.add_swc(os.path.join(input_dir, filename + '.swc'), inv_x=True, shift_x=1024.0)
vtkgen.write_vtk(os.path.join(output_dir, filename + '_flip.vtk'), radius_data=True, normalize_diam=True)
|
DaisukeMiyamoto/swc2vtk
|
examples/convert_allswc.py
|
Python
|
apache-2.0
| 840
|
[
"ParaView",
"VTK"
] |
0fc5086f4908a25a01873eb6478f5f4ebcd845d2b3c42c6d4bde32843e06d422
|
from unittest import TestCase
import sys
from os import path
from PyContact.core.ContactAnalyzer import *
from PyContact.exampleData.datafiles import DCD, PSF, TPR, XTC
import MDAnalysis as mda
import multiprocessing
multiprocessing.log_to_stderr()
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
class PsfDcdReadingTest(TestCase):
def setUp(self):
self.dcdfile = DCD
self.psffile = PSF
self.tpr = TPR
self.xtc = XTC
def tearDown(self):
del self.dcdfile
del self.psffile
def test_import_dcd_file(self):
mda.Universe(self.psffile, self.dcdfile)
def test_import_xtc_file(self):
# seg_0_Protein_chain_U
# seg_1_Protein_chain_R
mda.Universe(self.tpr, self.xtc)
def test_singleCore_analysis(self):
analyzer = Analyzer(self.psffile, self.dcdfile, 5.0, 2.5, 120, "segid RN11", "segid UBQ")
analyzer.runFrameScan(1)
self.assertEqual(len(analyzer.contactResults), 50)
map1 = [0, 0, 1, 1, 0]
map2 = [0, 0, 1, 1, 0]
analyzer.runContactAnalysis(map1, map2, 1)
self.assertEqual(len(analyzer.finalAccumulatedContacts), 148)
hbond_sum = 0
for c in analyzer.finalAccumulatedContacts:
hbond_sum += c.hbond_percentage()
self.assertEqual(hbond_sum, 676.0)
def test_selfInteraction_analysis(self):
analyzer = Analyzer(self.psffile, self.dcdfile, 5.0, 2.5, 120, "segid RN11", "self")
analyzer.runFrameScan(1)
self.assertEqual(len(analyzer.contactResults), 50)
map1 = [0, 0, 1, 1, 0]
map2 = [0, 0, 1, 1, 0]
analyzer.runContactAnalysis(map1, map2, 1)
def test_zero_atomselection(self):
analyzer = Analyzer(self.psffile, self.dcdfile, 5.0, 2.5, 120, "segid A", "resid 100")
try:
analyzer.runFrameScan(1)
except:
print("Error in atom selection caught.")
try:
analyzer.runFrameScan(4)
except:
print("Error in atom selection (multicore) caught.")
def test_selfInteraction_analysis_parallel(self):
analyzer = Analyzer(self.psffile, self.dcdfile, 5.0, 2.5, 120, "segid RN11", "self")
analyzer.runFrameScan(2)
self.assertEqual(len(analyzer.contactResults), 50)
map1 = [0, 0, 1, 1, 0]
map2 = [0, 0, 1, 1, 0]
analyzer.runContactAnalysis(map1, map2, 1)
def test_multiCore_analysis(self):
analyzer = Analyzer(self.psffile, self.dcdfile, 5.0, 2.5, 120, "segid RN11", "segid UBQ")
analyzer.runFrameScan(2)
self.assertEqual(len(analyzer.contactResults), 50)
map1 = [0, 0, 1, 1, 0]
map2 = [0, 0, 1, 1, 0]
analyzer.runContactAnalysis(map1, map2, 2)
self.assertEqual(len(analyzer.finalAccumulatedContacts), 148)
hbond_sum = 0
for c in analyzer.finalAccumulatedContacts:
hbond_sum += c.hbond_percentage()
self.assertEqual(hbond_sum, 676.0)
def test_around_selection_patch(self):
univ = mda.Universe(self.psffile, self.dcdfile)
aroundText = "segid UBQ and around 5 segid RN11"
sel = univ.select_atoms(aroundText)
self.assertEqual(len(sel), 261)
|
maxscheurer/pycontact
|
tests/test_basic.py
|
Python
|
gpl-3.0
| 3,256
|
[
"MDAnalysis"
] |
4393e0f8aaab06584467666e8d3f5b5be33fd6cd67aa187b764907e0b5bfbefe
|
""" Entry points into the antlr4 parser.
"""
import sys
from antlr4 import FileStream, InputStream, CommonTokenStream
from ..antlr4.PLambdaLexer import PLambdaLexer
from ..antlr4.PLambdaParser import PLambdaParser
from .Visitor import Visitor
def main():
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} <plambda file>')
else:
codelist = parseFromFile(sys.argv[1])
for c in codelist:
print(str(c))
print(repr(c))
return 0
def parseFromFile(filename):
return parseFromStream(FileStream(filename), filename)
def parseFromString(string):
return parseFromStream(InputStream(string), "stdin")
def parseFromStream(stream, source):
lexer = PLambdaLexer(stream)
stream = CommonTokenStream(lexer)
parser = PLambdaParser(stream)
tree = parser.unit()
visitor = Visitor(source)
return visitor.visit(tree)
|
SRI-CSL/PLambda
|
plambda/visitor/Parser.py
|
Python
|
mit
| 897
|
[
"VisIt"
] |
e7ffff993db19b8ce30ee503b81d80729b0bf1dc12bb10537cc8c8529bcf832b
|
# (c) 2013-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import base64
import datetime
import imp
import json
import os
import shlex
import zipfile
import random
import re
from io import BytesIO
from ansible.release import __version__, __author__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins import module_utils_loader
from ansible.plugins.shell.powershell import async_watchdog, async_wrapper, become_wrapper, leaf_exec
# Must import strategy and use write_locks from there
# If we import write_locks directly then we end up binding a
# variable to the object and then it never gets updated.
from ansible.executor import action_write_locks
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
REPLACER = b"#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_VERSION = b"\"<<ANSIBLE_VERSION>>\""
REPLACER_COMPLEX = b"\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = b"# POWERSHELL_COMMON"
REPLACER_JSONARGS = b"<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_SELINUX = b"<<SELINUX_SPECIAL_FILESYSTEMS>>"
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = u'# -*- coding: utf-8 -*-'
# module_common is relative to module_utils, so fix the path
_MODULE_UTILS_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# ******************************************************************************
ANSIBALLZ_TEMPLATE = u'''%(shebang)s
%(coding)s
ANSIBALLZ_WRAPPER = True # For test-module script to tell this is a ANSIBALLZ_WRAPPER
# This code is part of Ansible, but is an independent component.
# The code in this particular templatable string, and this templatable string
# only, is BSD licensed. Modules which end up using this snippet, which is
# dynamically combined together by Ansible still belong to the author of the
# module, and they may assign their own license to the complete work.
#
# Copyright (c), James Cammarata, 2016
# Copyright (c), Toshio Kuratomi, 2016
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import os.path
import sys
import __main__
# For some distros and python versions we pick up this script in the temporary
# directory. This leads to problems when the ansible module masks a python
# library that another import needs. We have not figured out what about the
# specific distros and python versions causes this to behave differently.
#
# Tested distros:
# Fedora23 with python3.4 Works
# Ubuntu15.10 with python2.7 Works
# Ubuntu15.10 with python3.4 Fails without this
# Ubuntu16.04.1 with python3.5 Fails without this
# To test on another platform:
# * use the copy module (since this shadows the stdlib copy module)
# * Turn off pipelining
# * Make sure that the destination file does not exist
# * ansible ubuntu16-test -m copy -a 'src=/etc/motd dest=/var/tmp/m'
# This will traceback in shutil. Looking at the complete traceback will show
# that shutil is importing copy which finds the ansible module instead of the
# stdlib module
scriptdir = None
try:
scriptdir = os.path.dirname(os.path.abspath(__main__.__file__))
except (AttributeError, OSError):
# Some platforms don't set __file__ when reading from stdin
# OSX raises OSError if using abspath() in a directory we don't have
# permission to read.
pass
if scriptdir is not None:
sys.path = [p for p in sys.path if p != scriptdir]
import base64
import shutil
import zipfile
import tempfile
import subprocess
if sys.version_info < (3,):
bytes = str
PY3 = False
else:
unicode = str
PY3 = True
try:
# Python-2.6+
from io import BytesIO as IOStream
except ImportError:
# Python < 2.6
from StringIO import StringIO as IOStream
ZIPDATA = """%(zipdata)s"""
def invoke_module(module, modlib_path, json_params):
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((modlib_path, pythonpath))
else:
os.environ['PYTHONPATH'] = modlib_path
p = subprocess.Popen([%(interpreter)s, module], env=os.environ, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate(json_params)
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
def debug(command, zipped_mod, json_params):
# The code here normally doesn't run. It's only used for debugging on the
# remote machine.
#
# The subcommands in this function make it easier to debug ansiballz
# modules. Here's the basic steps:
#
# Run ansible with the environment variable: ANSIBLE_KEEP_REMOTE_FILES=1 and -vvv
# to save the module file remotely::
# $ ANSIBLE_KEEP_REMOTE_FILES=1 ansible host1 -m ping -a 'data=october' -vvv
#
# Part of the verbose output will tell you where on the remote machine the
# module was written to::
# [...]
# <host1> SSH: EXEC ssh -C -q -o ControlMaster=auto -o ControlPersist=60s -o KbdInteractiveAuthentication=no -o
# PreferredAuthentications=gssapi-with-mic,gssapi-keyex,hostbased,publickey -o PasswordAuthentication=no -o ConnectTimeout=10 -o
# ControlPath=/home/badger/.ansible/cp/ansible-ssh-%%h-%%p-%%r -tt rhel7 '/bin/sh -c '"'"'LANG=en_US.UTF-8 LC_ALL=en_US.UTF-8
# LC_MESSAGES=en_US.UTF-8 /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping'"'"''
# [...]
#
# Login to the remote machine and run the module file via from the previous
# step with the explode subcommand to extract the module payload into
# source files::
# $ ssh host1
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping explode
# Module expanded into:
# /home/badger/.ansible/tmp/ansible-tmp-1461173408.08-279692652635227/ansible
#
# You can now edit the source files to instrument the code or experiment with
# different parameter values. When you're ready to run the code you've modified
# (instead of the code from the actual zipped module), use the execute subcommand like this::
# $ /usr/bin/python /home/badger/.ansible/tmp/ansible-tmp-1461173013.93-9076457629738/ping execute
# Okay to use __file__ here because we're running from a kept file
basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug_dir')
args_path = os.path.join(basedir, 'args')
script_path = os.path.join(basedir, 'ansible_module_%(ansible_module)s.py')
if command == 'explode':
# transform the ZIPDATA into an exploded directory of code and then
# print the path to the code. This is an easy way for people to look
# at the code on the remote machine for debugging it in that
# environment
z = zipfile.ZipFile(zipped_mod)
for filename in z.namelist():
if filename.startswith('/'):
raise Exception('Something wrong with this module zip file: should not contain absolute paths')
dest_filename = os.path.join(basedir, filename)
if dest_filename.endswith(os.path.sep) and not os.path.exists(dest_filename):
os.makedirs(dest_filename)
else:
directory = os.path.dirname(dest_filename)
if not os.path.exists(directory):
os.makedirs(directory)
f = open(dest_filename, 'wb')
f.write(z.read(filename))
f.close()
# write the args file
f = open(args_path, 'wb')
f.write(json_params)
f.close()
print('Module expanded into:')
print('%%s' %% basedir)
exitcode = 0
elif command == 'execute':
# Execute the exploded code instead of executing the module from the
# embedded ZIPDATA. This allows people to easily run their modified
# code on the remote machine to see how changes will affect it.
# This differs slightly from default Ansible execution of Python modules
# as it passes the arguments to the module via a file instead of stdin.
# Set pythonpath to the debug dir
pythonpath = os.environ.get('PYTHONPATH')
if pythonpath:
os.environ['PYTHONPATH'] = ':'.join((basedir, pythonpath))
else:
os.environ['PYTHONPATH'] = basedir
p = subprocess.Popen([%(interpreter)s, script_path, args_path],
env=os.environ, shell=False, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if not isinstance(stderr, (bytes, unicode)):
stderr = stderr.read()
if not isinstance(stdout, (bytes, unicode)):
stdout = stdout.read()
if PY3:
sys.stderr.buffer.write(stderr)
sys.stdout.buffer.write(stdout)
else:
sys.stderr.write(stderr)
sys.stdout.write(stdout)
return p.returncode
elif command == 'excommunicate':
# This attempts to run the module in-process (by importing a main
# function and then calling it). It is not the way ansible generally
# invokes the module so it won't work in every case. It is here to
# aid certain debuggers which work better when the code doesn't change
# from one process to another but there may be problems that occur
# when using this that are only artifacts of how we're invoking here,
# not actual bugs (as they don't affect the real way that we invoke
# ansible modules)
# stub the args and python path
sys.argv = ['%(ansible_module)s', args_path]
sys.path.insert(0, basedir)
from ansible_module_%(ansible_module)s import main
main()
print('WARNING: Module returned to wrapper instead of exiting')
sys.exit(1)
else:
print('WARNING: Unknown debug command. Doing nothing.')
exitcode = 0
return exitcode
if __name__ == '__main__':
#
# See comments in the debug() method for information on debugging
#
ANSIBALLZ_PARAMS = %(params)s
if PY3:
ANSIBALLZ_PARAMS = ANSIBALLZ_PARAMS.encode('utf-8')
try:
# There's a race condition with the controller removing the
# remote_tmpdir and this module executing under async. So we cannot
# store this in remote_tmpdir (use system tempdir instead)
temp_path = tempfile.mkdtemp(prefix='ansible_')
zipped_mod = os.path.join(temp_path, 'ansible_modlib.zip')
modlib = open(zipped_mod, 'wb')
modlib.write(base64.b64decode(ZIPDATA))
modlib.close()
if len(sys.argv) == 2:
exitcode = debug(sys.argv[1], zipped_mod, ANSIBALLZ_PARAMS)
else:
z = zipfile.ZipFile(zipped_mod, mode='r')
module = os.path.join(temp_path, 'ansible_module_%(ansible_module)s.py')
f = open(module, 'wb')
f.write(z.read('ansible_module_%(ansible_module)s.py'))
f.close()
# When installed via setuptools (including python setup.py install),
# ansible may be installed with an easy-install.pth file. That file
# may load the system-wide install of ansible rather than the one in
# the module. sitecustomize is the only way to override that setting.
z = zipfile.ZipFile(zipped_mod, mode='a')
# py3: zipped_mod will be text, py2: it's bytes. Need bytes at the end
sitecustomize = u'import sys\\nsys.path.insert(0,"%%s")\\n' %% zipped_mod
sitecustomize = sitecustomize.encode('utf-8')
# Use a ZipInfo to work around zipfile limitation on hosts with
# clocks set to a pre-1980 year (for instance, Raspberry Pi)
zinfo = zipfile.ZipInfo()
zinfo.filename = 'sitecustomize.py'
zinfo.date_time = ( %(year)i, %(month)i, %(day)i, %(hour)i, %(minute)i, %(second)i)
z.writestr(zinfo, sitecustomize)
z.close()
exitcode = invoke_module(module, zipped_mod, ANSIBALLZ_PARAMS)
finally:
try:
shutil.rmtree(temp_path)
except OSError:
# tempdir creation probably failed
pass
sys.exit(exitcode)
'''
def _strip_comments(source):
# Strip comments and blank lines from the wrapper
buf = []
for line in source.splitlines():
l = line.strip()
if not l or l.startswith(u'#'):
continue
buf.append(line)
return u'\n'.join(buf)
if C.DEFAULT_KEEP_REMOTE_FILES:
# Keep comments when KEEP_REMOTE_FILES is set. That way users will see
# the comments with some nice usage instructions
ACTIVE_ANSIBALLZ_TEMPLATE = ANSIBALLZ_TEMPLATE
else:
# ANSIBALLZ_TEMPLATE stripped of comments for smaller over the wire size
ACTIVE_ANSIBALLZ_TEMPLATE = _strip_comments(ANSIBALLZ_TEMPLATE)
class ModuleDepFinder(ast.NodeVisitor):
# Caveats:
# This code currently does not handle:
# * relative imports from py2.6+ from . import urls
IMPORT_PREFIX_SIZE = len('ansible.module_utils.')
def __init__(self, *args, **kwargs):
"""
Walk the ast tree for the python module.
Save submodule[.submoduleN][.identifier] into self.submodules
self.submodules will end up with tuples like:
- ('basic',)
- ('urls', 'fetch_url')
- ('database', 'postgres')
- ('database', 'postgres', 'quote')
It's up to calling code to determine whether the final element of the
dotted strings are module names or something else (function, class, or
variable names)
"""
super(ModuleDepFinder, self).__init__(*args, **kwargs)
self.submodules = set()
def visit_Import(self, node):
# import ansible.module_utils.MODLIB[.MODLIBn] [as asname]
for alias in (a for a in node.names if a.name.startswith('ansible.module_utils.')):
py_mod = alias.name[self.IMPORT_PREFIX_SIZE:]
py_mod = tuple(py_mod.split('.'))
self.submodules.add(py_mod)
self.generic_visit(node)
def visit_ImportFrom(self, node):
# Specialcase: six is a special case because of its
# import logic
if node.names[0].name == '_six':
self.submodules.add(('_six',))
elif node.module.startswith('ansible.module_utils'):
where_from = node.module[self.IMPORT_PREFIX_SIZE:]
if where_from:
# from ansible.module_utils.MODULE1[.MODULEn] import IDENTIFIER [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [as asname]
# from ansible.module_utils.MODULE1[.MODULEn] import MODULEn+1 [,IDENTIFIER] [as asname]
py_mod = tuple(where_from.split('.'))
for alias in node.names:
self.submodules.add(py_mod + (alias.name,))
else:
# from ansible.module_utils import MODLIB [,MODLIB2] [as asname]
for alias in node.names:
self.submodules.add((alias.name,))
self.generic_visit(node)
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % os.path.abspath(path))
fd = open(path, 'rb')
data = fd.read()
fd.close()
return data
def _get_shebang(interpreter, task_vars, args=tuple()):
"""
Note not stellar API:
Returns None instead of always returning a shebang line. Doing it this
way allows the caller to decide to use the shebang it read from the
file rather than trust that we reformatted what they already have
correctly.
"""
interpreter_config = u'ansible_%s_interpreter' % os.path.basename(interpreter).strip()
if interpreter_config not in task_vars:
return (None, interpreter)
interpreter = task_vars[interpreter_config].strip()
shebang = u'#!' + interpreter
if args:
shebang = shebang + u' ' + u' '.join(args)
return (shebang, interpreter)
def recursive_finder(name, data, py_module_names, py_module_cache, zf):
"""
Using ModuleDepFinder, make sure we have all of the module_utils files that
the module its module_utils files needs.
"""
# Parse the module and find the imports of ansible.module_utils
tree = ast.parse(data)
finder = ModuleDepFinder()
finder.visit(tree)
#
# Determine what imports that we've found are modules (vs class, function.
# variable names) for packages
#
normalized_modules = set()
# Loop through the imports that we've found to normalize them
# Exclude paths that match with paths we've already processed
# (Have to exclude them a second time once the paths are processed)
module_utils_paths = [p for p in module_utils_loader._get_paths(subdirs=False) if os.path.isdir(p)]
module_utils_paths.append(_MODULE_UTILS_PATH)
for py_module_name in finder.submodules.difference(py_module_names):
module_info = None
if py_module_name[0] == 'six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('six', module_utils_paths)
py_module_name = ('six',)
idx = 0
elif py_module_name[0] == '_six':
# Special case the python six library because it messes up the
# import process in an incompatible way
module_info = imp.find_module('_six', [os.path.join(p, 'six') for p in module_utils_paths])
py_module_name = ('six', '_six')
idx = 0
else:
# Check whether either the last or the second to last identifier is
# a module name
for idx in (1, 2):
if len(py_module_name) < idx:
break
try:
module_info = imp.find_module(py_module_name[-idx],
[os.path.join(p, *py_module_name[:-idx]) for p in module_utils_paths])
break
except ImportError:
continue
# Could not find the module. Construct a helpful error message.
if module_info is None:
msg = ['Could not find imported module support code for %s. Looked for' % (name,)]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
# Found a byte compiled file rather than source. We cannot send byte
# compiled over the wire as the python version might be different.
# imp.find_module seems to prefer to return source packages so we just
# error out if imp.find_module returns byte compiled files (This is
# fragile as it depends on undocumented imp.find_module behaviour)
if module_info[2][2] not in (imp.PY_SOURCE, imp.PKG_DIRECTORY):
msg = ['Could not find python source for imported module support code for %s. Looked for' % name]
if idx == 2:
msg.append('either %s.py or %s.py' % (py_module_name[-1], py_module_name[-2]))
else:
msg.append(py_module_name[-1])
raise AnsibleError(' '.join(msg))
if idx == 2:
# We've determined that the last portion was an identifier and
# thus, not part of the module name
py_module_name = py_module_name[:-1]
# If not already processed then we've got work to do
if py_module_name not in py_module_names:
# If not in the cache, then read the file into the cache
# We already have a file handle for the module open so it makes
# sense to read it now
if py_module_name not in py_module_cache:
if module_info[2][2] == imp.PKG_DIRECTORY:
# Read the __init__.py instead of the module file as this is
# a python package
normalized_name = py_module_name + ('__init__',)
normalized_path = os.path.join(os.path.join(module_info[1], '__init__.py'))
normalized_data = _slurp(normalized_path)
else:
normalized_name = py_module_name
normalized_path = module_info[1]
normalized_data = module_info[0].read()
module_info[0].close()
py_module_cache[normalized_name] = (normalized_data, normalized_path)
normalized_modules.add(normalized_name)
# Make sure that all the packages that this module is a part of
# are also added
for i in range(1, len(py_module_name)):
py_pkg_name = py_module_name[:-i] + ('__init__',)
if py_pkg_name not in py_module_names:
pkg_dir_info = imp.find_module(py_pkg_name[-1],
[os.path.join(p, *py_pkg_name[:-1]) for p in module_utils_paths])
normalized_modules.add(py_pkg_name)
py_module_cache[py_pkg_name] = (_slurp(pkg_dir_info[1]), pkg_dir_info[1])
#
# iterate through all of the ansible.module_utils* imports that we haven't
# already checked for new imports
#
# set of modules that we haven't added to the zipfile
unprocessed_py_module_names = normalized_modules.difference(py_module_names)
for py_module_name in unprocessed_py_module_names:
py_module_path = os.path.join(*py_module_name)
py_module_file_name = '%s.py' % py_module_path
zf.writestr(os.path.join("ansible/module_utils",
py_module_file_name), py_module_cache[py_module_name][0])
display.vvv("Using module_utils file %s" % py_module_cache[py_module_name][1])
# Add the names of the files we're scheduling to examine in the loop to
# py_module_names so that we don't re-examine them in the next pass
# through recursive_finder()
py_module_names.update(unprocessed_py_module_names)
for py_module_file in unprocessed_py_module_names:
recursive_finder(py_module_file, py_module_cache[py_module_file][0], py_module_names, py_module_cache, zf)
# Save memory; the file won't have to be read again for this ansible module.
del py_module_cache[py_module_file]
def _is_binary(b_module_data):
textchars = bytearray(set([7, 8, 9, 10, 12, 13, 27]) | set(range(0x20, 0x100)) - set([0x7f]))
start = b_module_data[:1024]
return bool(start.translate(None, textchars))
def _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, module_compression):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_substyle = module_style = 'old'
# module_style is something important to calling code (ActionBase). It
# determines how arguments are formatted (json vs k=v) and whether
# a separate arguments file needs to be sent over the wire.
# module_substyle is extra information that's useful internally. It tells
# us what we have to look to substitute in the module files and whether
# we're using module replacer or ansiballz to format the module itself.
if _is_binary(b_module_data):
module_substyle = module_style = 'binary'
elif REPLACER in b_module_data:
# Do REPLACER before from ansible.module_utils because we need make sure
# we substitute "from ansible.module_utils basic" for REPLACER
module_style = 'new'
module_substyle = 'python'
b_module_data = b_module_data.replace(REPLACER, b'from ansible.module_utils.basic import *')
elif b'from ansible.module_utils.' in b_module_data:
module_style = 'new'
module_substyle = 'python'
elif REPLACER_WINDOWS in b_module_data or b'#Requires -Module' in b_module_data:
module_style = 'new'
module_substyle = 'powershell'
elif REPLACER_JSONARGS in b_module_data:
module_style = 'new'
module_substyle = 'jsonargs'
elif b'WANT_JSON' in b_module_data:
module_substyle = module_style = 'non_native_want_json'
shebang = None
# Neither old-style, non_native_want_json nor binary modules should be modified
# except for the shebang line (Done by modify_module)
if module_style in ('old', 'non_native_want_json', 'binary'):
return b_module_data, module_style, shebang
output = BytesIO()
py_module_names = set()
if module_substyle == 'python':
params = dict(ANSIBLE_MODULE_ARGS=module_args,)
python_repred_params = repr(json.dumps(params))
try:
compression_method = getattr(zipfile, module_compression)
except AttributeError:
display.warning(u'Bad module compression string specified: %s. Using ZIP_STORED (no compression)' % module_compression)
compression_method = zipfile.ZIP_STORED
lookup_path = os.path.join(C.DEFAULT_LOCAL_TMP, 'ansiballz_cache')
cached_module_filename = os.path.join(lookup_path, "%s-%s" % (module_name, module_compression))
zipdata = None
# Optimization -- don't lock if the module has already been cached
if os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: using cached module: %s' % cached_module_filename)
zipdata = open(cached_module_filename, 'rb').read()
else:
if module_name in action_write_locks.action_write_locks:
display.debug('ANSIBALLZ: Using lock for %s' % module_name)
lock = action_write_locks.action_write_locks[module_name]
else:
# If the action plugin directly invokes the module (instead of
# going through a strategy) then we don't have a cross-process
# Lock specifically for this module. Use the "unexpected
# module" lock instead
display.debug('ANSIBALLZ: Using generic lock for %s' % module_name)
lock = action_write_locks.action_write_locks[None]
display.debug('ANSIBALLZ: Acquiring lock')
with lock:
display.debug('ANSIBALLZ: Lock acquired: %s' % id(lock))
# Check that no other process has created this while we were
# waiting for the lock
if not os.path.exists(cached_module_filename):
display.debug('ANSIBALLZ: Creating module')
# Create the module zip data
zipoutput = BytesIO()
zf = zipfile.ZipFile(zipoutput, mode='w', compression=compression_method)
# Note: If we need to import from release.py first,
# remember to catch all exceptions: https://github.com/ansible/ansible/issues/16523
zf.writestr('ansible/__init__.py',
b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n__version__="' +
to_bytes(__version__) + b'"\n__author__="' +
to_bytes(__author__) + b'"\n')
zf.writestr('ansible/module_utils/__init__.py', b'from pkgutil import extend_path\n__path__=extend_path(__path__,__name__)\n')
zf.writestr('ansible_module_%s.py' % module_name, b_module_data)
py_module_cache = { ('__init__',): (b'', '[builtin]') }
recursive_finder(module_name, b_module_data, py_module_names, py_module_cache, zf)
zf.close()
zipdata = base64.b64encode(zipoutput.getvalue())
# Write the assembled module to a temp file (write to temp
# so that no one looking for the file reads a partially
# written file)
if not os.path.exists(lookup_path):
# Note -- if we have a global function to setup, that would
# be a better place to run this
os.makedirs(lookup_path)
display.debug('ANSIBALLZ: Writing module')
with open(cached_module_filename + '-part', 'wb') as f:
f.write(zipdata)
# Rename the file into its final position in the cache so
# future users of this module can read it off the
# filesystem instead of constructing from scratch.
display.debug('ANSIBALLZ: Renaming module')
os.rename(cached_module_filename + '-part', cached_module_filename)
display.debug('ANSIBALLZ: Done creating module')
if zipdata is None:
display.debug('ANSIBALLZ: Reading module after lock')
# Another process wrote the file while we were waiting for
# the write lock. Go ahead and read the data from disk
# instead of re-creating it.
try:
zipdata = open(cached_module_filename, 'rb').read()
except IOError:
raise AnsibleError('A different worker process failed to create module file.'
' Look at traceback for that process for debugging information.')
zipdata = to_text(zipdata, errors='surrogate_or_strict')
shebang, interpreter = _get_shebang(u'/usr/bin/python', task_vars)
if shebang is None:
shebang = u'#!/usr/bin/python'
# Enclose the parts of the interpreter in quotes because we're
# substituting it into the template as a Python string
interpreter_parts = interpreter.split(u' ')
interpreter = u"'{0}'".format(u"', '".join(interpreter_parts))
now=datetime.datetime.utcnow()
output.write(to_bytes(ACTIVE_ANSIBALLZ_TEMPLATE % dict(
zipdata=zipdata,
ansible_module=module_name,
params=python_repred_params,
shebang=shebang,
interpreter=interpreter,
coding=ENCODING_STRING,
year=now.year,
month=now.month,
day=now.day,
hour=now.hour,
minute=now.minute,
second=now.second,
)))
b_module_data = output.getvalue()
elif module_substyle == 'powershell':
# Powershell/winrm don't actually make use of shebang so we can
# safely set this here. If we let the fallback code handle this
# it can fail in the presence of the UTF8 BOM commonly added by
# Windows text editors
shebang = u'#!powershell'
# powershell wrapper build is currently handled in build_windows_module_payload, called in action
# _configure_module after this function returns.
elif module_substyle == 'jsonargs':
module_args_json = to_bytes(json.dumps(module_args))
# these strings could be included in a third-party module but
# officially they were included in the 'basic' snippet for new-style
# python modules (which has been replaced with something else in
# ansiballz) If we remove them from jsonargs-style module replacer
# then we can remove them everywhere.
python_repred_args = to_bytes(repr(module_args_json))
b_module_data = b_module_data.replace(REPLACER_VERSION, to_bytes(repr(__version__)))
b_module_data = b_module_data.replace(REPLACER_COMPLEX, python_repred_args)
b_module_data = b_module_data.replace(REPLACER_SELINUX, to_bytes(','.join(C.DEFAULT_SELINUX_SPECIAL_FS)))
# The main event -- substitute the JSON args string into the module
b_module_data = b_module_data.replace(REPLACER_JSONARGS, module_args_json)
facility = b'syslog.' + to_bytes(task_vars.get('ansible_syslog_facility', C.DEFAULT_SYSLOG_FACILITY), errors='surrogate_or_strict')
b_module_data = b_module_data.replace(b'syslog.LOG_USER', facility)
return (b_module_data, module_style, shebang)
def modify_module(module_name, module_path, module_args, task_vars=dict(), module_compression='ZIP_STORED'):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
For powershell, this code effectively no-ops, as the exec wrapper requires access to a number of
properties not available here.
"""
with open(module_path, 'rb') as f:
# read in the module source
b_module_data = f.read()
(b_module_data, module_style, shebang) = _find_module_utils(module_name, b_module_data, module_path, module_args, task_vars, module_compression)
if module_style == 'binary':
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
elif shebang is None:
lines = b_module_data.split(b"\n", 1)
if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter = to_bytes(interpreter)
new_shebang = to_bytes(_get_shebang(interpreter, task_vars, args[1:])[0], errors='surrogate_or_strict', nonstring='passthru')
if new_shebang:
lines[0] = shebang = new_shebang
if os.path.basename(interpreter).startswith(b'python'):
lines.insert(1, to_bytes(ENCODING_STRING))
else:
# No shebang, assume a binary module?
pass
b_module_data = b"\n".join(lines)
else:
shebang = to_bytes(shebang, errors='surrogate_or_strict')
return (b_module_data, module_style, to_text(shebang, nonstring='passthru'))
def build_windows_module_payload(module_name, module_path, b_module_data, module_args, task_vars, task, play_context, environment):
exec_manifest = dict(
module_entry=to_text(base64.b64encode(b_module_data)),
powershell_modules=dict(),
module_args=module_args,
actions=['exec'],
environment=environment
)
exec_manifest['exec'] = to_text(base64.b64encode(to_bytes(leaf_exec)))
if task.async > 0:
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["async_watchdog"] = base64.b64encode(to_bytes(async_watchdog))
exec_manifest["actions"].insert(0, 'async_wrapper')
exec_manifest["async_wrapper"] = base64.b64encode(to_bytes(async_wrapper))
exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
exec_manifest["async_timeout_sec"] = task.async
if play_context.become and play_context.become_method=='runas':
exec_manifest["actions"].insert(0, 'become')
exec_manifest["become_user"] = play_context.become_user
exec_manifest["become_password"] = play_context.become_pass
exec_manifest["become"] = base64.b64encode(to_bytes(become_wrapper))
lines = b_module_data.split(b'\n')
module_names = set()
requires_module_list = re.compile(r'(?i)^#requires \-module(?:s?) (.+)')
for line in lines:
# legacy, equivalent to #Requires -Modules powershell
if REPLACER_WINDOWS in line:
module_names.add(b'powershell')
# TODO: add #Requires checks for Ansible.ModuleUtils.X
for m in module_names:
m = to_text(m)
exec_manifest["powershell_modules"][m] = to_text(
base64.b64encode(
to_bytes(
_slurp(os.path.join(_MODULE_UTILS_PATH, m + ".ps1"))
)
)
)
# FUTURE: smuggle this back as a dict instead of serializing here; the connection plugin may need to modify it
b_module_data = json.dumps(exec_manifest)
return b_module_data
|
kevclarx/ansible
|
lib/ansible/executor/module_common.py
|
Python
|
gpl-3.0
| 39,218
|
[
"VisIt"
] |
d42f08a5e13516de1c699d756723b413a7ebbf646c9a9585e300b5968b606972
|
"""
This module contains code for generating toy examples
"""
#############################################################################################
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, see http://www.gnu.org/licenses #
# or write to the Free Software Foundation, Inc., 51 Franklin Street, #
# Fifth Floor, Boston, MA 02110-1301 USA #
# #
#############################################################################################
import sys
import parse
import random
from numpy.random import randn
from numpy import ones, concatenate, array, transpose
from esvm.mldata import DatasetFileFASTA, init_datasetfile
from esvm.mldata_arff import DatasetFileARFF
class MotifDataDef(object):
motif = ''
numseq = 0
seqlenmin = 0
seqlenmax = 0
posstart = 0
posend = 0
mutrate = 0.0
################################################################################
# data generation functions
def motifgen(motif, numseq, seqlenmin, seqlenmax, posstart, posend, mutrate):
"""Generate sequences with a particular motif at a particular location.
Also allow a possible mutation rate of the motif.
"""
metadata = 'motifgen(%s,%d,%d,%d,%d,%d,%1.2f)' % (motif, numseq, seqlenmin, seqlenmax, posstart, posend, mutrate)
acgt='acgt'
seqlist = []
for i in xrange(0,numseq):
str=[] ;
seqlen=random.randint(seqlenmin,seqlenmax) ;
for l in xrange(0,seqlen):
str.append(acgt[random.randint(0,3)])
pos=random.randint(posstart,posend) ;
for l in xrange(0,len(motif)):
if (random.random()>=mutrate) and (pos+l<seqlen) and (pos+l>=0):
str[pos+l]=motif[l]
seqlist.append(''.join(str).upper())
return metadata, seqlist
def cloudgen(numpoint, numfeat, fracpos, width):
"""Generate two Gaussian point clouds, centered around one and minus one."""
numpos = int(round(fracpos*numpoint))
numneg = numpoint - numpos
metadata = 'cloudgen(%d,%d,%d,%3.2f)' % (numpos, numneg, numfeat, width)
datapos = ones((numfeat, numpos)) + width*randn(numfeat, numpos)
dataneg = -ones((numfeat, numneg)) + width*randn(numfeat, numneg)
pointcloud = concatenate((datapos,dataneg),axis=1)
labels = concatenate((ones(numpos),-ones(numneg)))
return metadata, pointcloud, labels
################################################################################
# ARFF functions
def arffwrite_real(filename, numpoint, numfeat, fracpos=0.5, width=1.0):
"""Write an ARFF file containing a vectorial dataset"""
#import arff
(metadata, pointcloud, labels) = cloudgen(numpoint, numfeat, fracpos, width)
fp = init_datasetfile(filename,'vec')
fp.comment = metadata
fp.dataname = 'pointcloud'
fp.writelines(pointcloud,labels)
def arffwrite_sequence(filename,p, n):
"""Write an ARFF file containing a sequence dataset"""
#import arff
(metadatapos,seqlistpos) = motifgen(p.motif, p.numseq, p.seqlenmin, p.seqlenmax, p.posstart, p.posend, p.mutrate)
(metadataneg,seqlistneg) = motifgen(n.motif, n.numseq, n.seqlenmin, n.seqlenmax, n.posstart, n.posend, n.mutrate)
labels = concatenate((ones(len(seqlistpos)),-ones(len(seqlistneg))))
seqlist = seqlistpos + seqlistneg
fp = init_datasetfile(filename,'seq')
fp.comment = metadatapos+' '+metadataneg
fp.dataname = 'motif'
fp.writelines(seqlist,labels)
def arffread(kernelname,datafilename):
"""Decide based on kernelname whether to read a sequence or vectorial file"""
if kernelname == 'gauss' or kernelname == 'linear' or kernelname == 'poly' or kernelname == None:
fp = init_datasetfile(datafilename,'vec')
elif kernelname == 'wd' or kernelname == 'localalign' or kernelname == 'localimprove'\
or kernelname == 'spec' or kernelname == 'cumspec':
fp = init_datasetfile(datafilename,'seq')
elif kernelname == 'spec2' or kernelname == 'cumspec2':
fp = init_datasetfile(datafilename,'mseq')
else:
print 'Unknown kernel in arffread'
return fp.readlines()
################################################################################
# fasta functions
def fastawrite_sequence(filename,p):
"""Write a FASTA file containing a sequence dataset"""
import arff
(metadata,seqlist) = motifgen(p.motif, p.numseq, p.seqlenmin, p.seqlenmax, p.posstart, p.posend, p.mutrate)
labels = ones(len(seqlist))
fp = init_datasetfile(filename,'seq')
fp.writelines(seqlist,labels)
def fastaread(fnamepos,fnameneg=None):
"""Read two fasta files, the first positive, the second negative"""
fpos = init_datasetfile(fnamepos,'seq')
(fa1,lab1) = fpos.readlines()
if fnameneg is not None:
fneg = init_datasetfile(fnameneg,'seq')
(fa2,lab2) = fneg.readlines()
print 'positive: %d, negative %d' % (len(fa1),len(fa2))
all_labels = concatenate((ones(len(fa1)),-ones(len(fa2))))
all_examples = fa1 + fa2
else:
all_examples = fa1
all_labels = ones(len(fa1))
return all_examples, all_labels
|
AzamYahya/shogun
|
applications/easysvm/esvm/datafuncs.py
|
Python
|
gpl-3.0
| 6,419
|
[
"Gaussian"
] |
c0b76b743f502425f84ad106350909f07a06eed10ae2732425f673d900ab0fa2
|
from ..base import TestBase
from ..mocking.community import MockCommunity
from ...community import _DEFAULT_ADDRESSES
from ...peerdiscovery.discovery import RandomWalk
class TestRandomWalk(TestBase):
def setUp(self):
super(TestRandomWalk, self).setUp()
while _DEFAULT_ADDRESSES:
_DEFAULT_ADDRESSES.pop()
node_count = 3
self.overlays = [MockCommunity() for _ in range(node_count)]
self.strategies = [RandomWalk(self.overlays[i], reset_chance=0) for i in range(node_count)]
async def tearDown(self):
for overlay in self.overlays:
await overlay.unload()
return await super(TestRandomWalk, self).tearDown()
async def test_take_step(self):
"""
Check if we will walk to a random other node.
Unit test network layout:
NODE0 <-> NODE1 <-> NODE2
"""
self.overlays[0].network.add_verified_peer(self.overlays[1].my_peer)
self.overlays[0].network.discover_services(self.overlays[1].my_peer, [self.overlays[1].master_peer.mid, ])
self.overlays[1].network.add_verified_peer(self.overlays[2].my_peer)
self.overlays[1].network.discover_services(self.overlays[2].my_peer, [self.overlays[2].master_peer.mid, ])
# We expect NODE1 to introduce NODE0 to NODE2
self.strategies[0].take_step()
await self.deliver_messages()
self.strategies[0].take_step()
await self.deliver_messages()
self.assertEqual(len(self.overlays[0].network.verified_peers), 2)
async def test_take_step_into(self):
"""
Check if we will walk to an introduced node.
Unit test network layout:
NODE0 <-> (NODE1) <-> NODE2
NODE0 -> NODE2
"""
self.overlays[0].network.add_verified_peer(self.overlays[1].my_peer)
self.overlays[0].network.discover_address(self.overlays[1].my_peer, self.overlays[2].endpoint.wan_address,
MockCommunity.master_peer.mid)
self.overlays[0].network.discover_services(self.overlays[1].my_peer, [self.overlays[1].master_peer.mid, ])
# We expect NODE0 to visit NODE2
self.strategies[0].take_step()
await self.deliver_messages()
self.strategies[0].take_step()
await self.deliver_messages()
self.assertEqual(len(self.overlays[0].network.verified_peers), 2)
async def test_fail_step_into(self):
"""
Check if we drop an unreachable introduced node.
Unit test network layout:
NODE0 <-> (NODE1) <-> NODE2
NODE0 -> NODE2
"""
self.overlays[0].network.add_verified_peer(self.overlays[1].my_peer)
self.overlays[0].network.discover_address(self.overlays[1].my_peer, self.overlays[2].endpoint.wan_address,
MockCommunity.master_peer.mid)
self.overlays[0].network.discover_services(self.overlays[1].my_peer, [self.overlays[1].master_peer.mid, ])
# Fail immediately when unreachable
self.strategies[0].node_timeout = 0.0
# NODE0 attempts to reach NODE2
self.overlays[2].endpoint.close()
self.strategies[0].take_step()
# At this point the unreachable node should not have been removed yet
self.assertEqual(len(self.overlays[0].network.get_walkable_addresses()), 1)
await self.deliver_messages()
# We expect NODE0 to clean unreachable NODE2
self.strategies[0].take_step()
self.assertEqual(len(self.overlays[0].network.get_walkable_addresses()), 0)
self.assertEqual(len(self.overlays[0].network.verified_peers), 1)
async def test_retry_step_into(self):
"""
Check if we don't drop an introduced node immediately.
Unit test network layout:
NODE0 <-> (NODE1) <-> NODE2
NODE0 -> NODE2
"""
self.overlays[0].network.add_verified_peer(self.overlays[1].my_peer)
self.overlays[0].network.discover_address(self.overlays[1].my_peer, self.overlays[2].endpoint.wan_address,
MockCommunity.master_peer.mid)
self.overlays[0].network.discover_services(self.overlays[1].my_peer, [self.overlays[1].master_peer.mid, ])
self.strategies[0].node_timeout = 100000.0
# NODE0 attempts to reach NODE2
self.overlays[2].endpoint.close()
self.strategies[0].take_step()
await self.deliver_messages()
# NODE2 is still within its timeout and should not have been cleaned yet
self.strategies[0].take_step()
self.assertEqual(len(self.overlays[0].network.get_walkable_addresses()), 1)
self.assertEqual(len(self.overlays[0].network.verified_peers), 1)
|
qstokkink/py-ipv8
|
ipv8/test/peerdiscovery/test_random_discovery.py
|
Python
|
lgpl-3.0
| 4,806
|
[
"VisIt"
] |
2f9a19ce9715f052024efbea40390dbc3914d89d7aacfc71b2b4c89ef2684b30
|
# -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS Instructor Dashboard.
"""
from nose.plugins.attrib import attr
from ..helpers import UniqueCourseTest, get_modal_alert, EventsTestMixin
from ...pages.common.logout import LogoutPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.instructor_dashboard import InstructorDashboardPage
from ...fixtures.course import CourseFixture
class BaseInstructorDashboardTest(EventsTestMixin, UniqueCourseTest):
"""
Mixin class for testing the instructor dashboard.
"""
def log_in_as_instructor(self):
"""
Logs in as an instructor and returns the id.
"""
username = "test_instructor_{uuid}".format(uuid=self.unique_id[0:6])
auto_auth_page = AutoAuthPage(self.browser, username=username, course_id=self.course_id, staff=True)
return username, auto_auth_page.visit().get_user_id()
def visit_instructor_dashboard(self):
"""
Visits the instructor dashboard.
"""
instructor_dashboard_page = InstructorDashboardPage(self.browser, self.course_id)
instructor_dashboard_page.visit()
return instructor_dashboard_page
@attr('shard_5')
class AutoEnrollmentWithCSVTest(BaseInstructorDashboardTest):
"""
End-to-end tests for Auto-Registration and enrollment functionality via CSV file.
"""
def setUp(self):
super(AutoEnrollmentWithCSVTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.auto_enroll_section = instructor_dashboard_page.select_membership().select_auto_enroll_section()
def test_browse_and_upload_buttons_are_visible(self):
"""
Scenario: On the Membership tab of the Instructor Dashboard, Auto-Enroll Browse and Upload buttons are visible.
Given that I am on the Membership tab on the Instructor Dashboard
Then I see the 'REGISTER/ENROLL STUDENTS' section on the page with the 'Browse' and 'Upload' buttons
"""
self.assertTrue(self.auto_enroll_section.is_file_attachment_browse_button_visible())
self.assertTrue(self.auto_enroll_section.is_upload_button_visible())
def test_clicking_file_upload_button_without_file_shows_error(self):
"""
Scenario: Clicking on the upload button without specifying a CSV file results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I click the Upload Button without specifying a CSV file
Then I should be shown an Error Notification
And The Notification message should read 'File is not attached.'
"""
self.auto_enroll_section.click_upload_file_button()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "File is not attached.")
def test_uploading_correct_csv_file_results_in_success(self):
"""
Scenario: Uploading a CSV with correct data results in Success.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with correct data and click the Upload Button
Then I should be shown a Success Notification.
"""
self.auto_enroll_section.upload_correct_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_SUCCESS))
def test_uploading_csv_file_with_bad_data_results_in_errors_and_warnings(self):
"""
Scenario: Uploading a CSV with incorrect data results in error and warnings.
Given that I am on the Membership tab on the Instructor Dashboard
When I select a csv file with incorrect data and click the Upload Button
Then I should be shown an Error Notification
And a corresponding Error Message.
And I should be shown a Warning Notification
And a corresponding Warning Message.
"""
self.auto_enroll_section.upload_csv_file_with_errors_warnings()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Data in row #2 must have exactly four columns: email, username, full name, and country")
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_WARNING))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_WARNING), "ename (d@a.com): (An account with email d@a.com exists but the provided username ename is different. Enrolling anyway with d@a.com.)")
def test_uploading_non_csv_file_results_in_error(self):
"""
Scenario: Uploading an image file for auto-enrollment results in error.
Given that I am on the Membership tab on the Instructor Dashboard
When I select an image file (a non-csv file) and click the Upload Button
Then I should be shown an Error Notification
And The Notification message should read 'Make sure that the file you upload is in CSV..'
"""
self.auto_enroll_section.upload_non_csv_file()
self.assertTrue(self.auto_enroll_section.is_notification_displayed(section_type=self.auto_enroll_section.NOTIFICATION_ERROR))
self.assertEqual(self.auto_enroll_section.first_notification_message(section_type=self.auto_enroll_section.NOTIFICATION_ERROR), "Make sure that the file you upload is in CSV format with no extraneous characters or rows.")
@attr('shard_5')
class EntranceExamGradeTest(BaseInstructorDashboardTest):
"""
Tests for Entrance exam specific student grading tasks.
"""
def setUp(self):
super(EntranceExamGradeTest, self).setUp()
self.course_info.update({"settings": {"entrance_exam_enabled": "true"}})
CourseFixture(**self.course_info).install()
self.student_identifier = "johndoe_saee@example.com"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username="johndoe_saee",
email=self.student_identifier,
course_id=self.course_id,
staff=False
).visit()
LogoutPage(self.browser).visit()
# go to the student admin page on the instructor dashboard
self.log_in_as_instructor()
self.student_admin_section = self.visit_instructor_dashboard().select_student_admin()
def test_input_text_and_buttons_are_visible(self):
"""
Scenario: On the Student admin tab of the Instructor Dashboard, Student Email input box,
Reset Student Attempt, Rescore Student Submission, Delete Student State for entrance exam
and Show Background Task History for Student buttons are visible
Given that I am on the Student Admin tab on the Instructor Dashboard
Then I see Student Email input box, Reset Student Attempt, Rescore Student Submission,
Delete Student State for entrance exam and Show Background Task History for Student buttons
"""
self.assertTrue(self.student_admin_section.is_student_email_input_visible())
self.assertTrue(self.student_admin_section.is_reset_attempts_button_visible())
self.assertTrue(self.student_admin_section.is_rescore_submission_button_visible())
self.assertTrue(self.student_admin_section.is_delete_student_state_button_visible())
self.assertTrue(self.student_admin_section.is_background_task_history_button_visible())
def test_clicking_reset_student_attempts_button_without_email_shows_error(self):
"""
Scenario: Clicking on the Reset Student Attempts button without entering student email
address or username results in error.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment without enter an email address
Then I should be shown an Error Notification
And The Notification message should read 'Please enter a student email address or username.'
"""
self.student_admin_section.click_reset_attempts_button()
self.assertEqual(
'Please enter a student email address or username.',
self.student_admin_section.top_notification.text[0]
)
def test_clicking_reset_student_attempts_button_with_success(self):
"""
Scenario: Clicking on the Reset Student Attempts button with valid student email
address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_reset_attempts_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_reset_student_attempts_button_with_error(self):
"""
Scenario: Clicking on the Reset Student Attempts button with email address or username
of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Reset Student Attempts Button under Entrance Exam Grade
Adjustment after non existing student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_reset_attempts_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_rescore_submission_button_with_success(self):
"""
Scenario: Clicking on the Rescore Student Submission button with valid student email
address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Rescore Student Submission Button under Entrance Exam Grade
Adjustment after entering a valid student email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_rescore_submissions_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_rescore_submission_button_with_error(self):
"""
Scenario: Clicking on the Rescore Student Submission button with email address or username
of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Rescore Student Submission Button under Entrance Exam Grade
Adjustment after non existing student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_rescore_submissions_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_skip_entrance_exam_button_with_success(self):
"""
Scenario: Clicking on the Let Student Skip Entrance Exam button with
valid student email address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Let Student Skip Entrance Exam Button under
Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_skip_entrance_exam_button()
#first we have window.confirm
alert = get_modal_alert(self.student_admin_section.browser)
alert.accept()
# then we have alert confirming action
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_skip_entrance_exam_button_with_error(self):
"""
Scenario: Clicking on the Let Student Skip Entrance Exam button with
email address or username of a non existing student should result in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Let Student Skip Entrance Exam Button under
Entrance Exam Grade Adjustment after entering non existing
student email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_skip_entrance_exam_button()
#first we have window.confirm
alert = get_modal_alert(self.student_admin_section.browser)
alert.accept()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_delete_student_attempts_button_with_success(self):
"""
Scenario: Clicking on the Delete Student State for entrance exam button
with valid student email address or username should result in success prompt.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Delete Student State for entrance exam Button
under Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an alert with success message
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_delete_student_state_button()
alert = get_modal_alert(self.student_admin_section.browser)
alert.dismiss()
def test_clicking_delete_student_attempts_button_with_error(self):
"""
Scenario: Clicking on the Delete Student State for entrance exam button
with email address or username of a non existing student should result
in error message.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Delete Student State for entrance exam Button
under Entrance Exam Grade Adjustment after non existing student
email address or username
Then I should be shown an error message
"""
self.student_admin_section.set_student_email('non_existing@example.com')
self.student_admin_section.click_delete_student_state_button()
self.student_admin_section.wait_for_ajax()
self.assertGreater(len(self.student_admin_section.top_notification.text[0]), 0)
def test_clicking_task_history_button_with_success(self):
"""
Scenario: Clicking on the Show Background Task History for Student
with valid student email address or username should result in table of tasks.
Given that I am on the Student Admin tab on the Instructor Dashboard
When I click the Show Background Task History for Student Button
under Entrance Exam Grade Adjustment after entering a valid student
email address or username
Then I should be shown an table listing all background tasks
"""
self.student_admin_section.set_student_email(self.student_identifier)
self.student_admin_section.click_task_history_button()
self.assertTrue(self.student_admin_section.is_background_task_history_table_visible())
class DataDownloadsTest(BaseInstructorDashboardTest):
"""
Bok Choy tests for the "Data Downloads" tab.
"""
def setUp(self):
super(DataDownloadsTest, self).setUp()
self.course_fixture = CourseFixture(**self.course_info).install()
self.instructor_username, self.instructor_id = self.log_in_as_instructor()
instructor_dashboard_page = self.visit_instructor_dashboard()
self.data_download_section = instructor_dashboard_page.select_data_download()
def verify_report_requested_event(self, report_type):
"""
Verifies that the correct event is emitted when a report is requested.
"""
self.assert_matching_events_were_emitted(
event_filter={'name': u'edx.instructor.report.requested', 'report_type': report_type}
)
def verify_report_downloaded_event(self, report_url):
"""
Verifies that the correct event is emitted when a report is downloaded.
"""
self.assert_matching_events_were_emitted(
event_filter={'name': u'edx.instructor.report.downloaded', 'report_url': report_url}
)
def verify_report_download(self, report_name):
"""
Verifies that a report can be downloaded and an event fired.
"""
download_links = self.data_download_section.report_download_links
self.assertEquals(len(download_links), 1)
download_links[0].click()
expected_url = download_links.attrs('href')[0]
self.assertIn(report_name, expected_url)
self.verify_report_downloaded_event(expected_url)
def test_student_profiles_report_download(self):
"""
Scenario: Verify that an instructor can download a student profiles report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Download profile information as a CSV" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"student_profile_info"
self.data_download_section.generate_student_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_grade_report_download(self):
"""
Scenario: Verify that an instructor can download a grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Generate Grade Report" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"grade_report"
self.data_download_section.generate_grade_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
def test_problem_grade_report_download(self):
"""
Scenario: Verify that an instructor can download a problem grade report
Given that I am an instructor
And I visit the instructor dashboard's "Data Downloads" tab
And I click on the "Generate Problem Grade Report" button
Then a report should be generated
And a report requested event should be emitted
When I click on the report
Then a report downloaded event should be emitted
"""
report_name = u"problem_grade_report"
self.data_download_section.generate_problem_report_button.click()
self.data_download_section.wait_for_available_report()
self.verify_report_requested_event(report_name)
self.verify_report_download(report_name)
|
shubhdev/openedx
|
common/test/acceptance/tests/lms/test_lms_instructor_dashboard.py
|
Python
|
agpl-3.0
| 20,765
|
[
"VisIt"
] |
346454e146d8ace70e61b8e28346cf9f9d19f94aab93fd448a7a6f7cba493c99
|
# -*- coding: utf-8 -*-
#
# This file is part of PyGaze - the open-source toolbox for eye tracking
#
# PyGazeAnalyser is a Python module for easily analysing eye-tracking data
# Copyright (C) 2014 Edwin S. Dalmaijer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
# Gaze Plotter
#
# Produces different kinds of plots that are generally used in eye movement
# research, e.g. heatmaps, scanpaths, and fixation locations as overlays of
# images.
#
# version 2 (02 Jul 2014)
__author__ = "Edwin Dalmaijer"
# native
import os
# external
import numpy
import matplotlib
from matplotlib import pyplot, image
# # # # #
# LOOK
# COLOURS
# all colours are from the Tango colourmap, see:
# http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines#Color_Palette
COLS = { "butter": [ '#fce94f',
'#edd400',
'#c4a000'],
"orange": [ '#fcaf3e',
'#f57900',
'#ce5c00'],
"chocolate": [ '#e9b96e',
'#c17d11',
'#8f5902'],
"chameleon": [ '#8ae234',
'#73d216',
'#4e9a06'],
"skyblue": [ '#729fcf',
'#3465a4',
'#204a87'],
"plum": [ '#ad7fa8',
'#75507b',
'#5c3566'],
"scarletred":[ '#ef2929',
'#cc0000',
'#a40000'],
"aluminium": [ '#eeeeec',
'#d3d7cf',
'#babdb6',
'#888a85',
'#555753',
'#2e3436'],
}
# FONT
FONT = { 'family': 'Ubuntu',
'size': 12}
matplotlib.rc('font', **FONT)
# # # # #
# FUNCTIONS
def draw_fixations(fixations, dispsize, imagefile=None, durationsize=True, durationcolour=True, alpha=0.5, savefilename=None):
"""Draws circles on the fixation locations, optionally on top of an image,
with optional weigthing of the duration for circle size and colour
arguments
fixations - a list of fixation ending events from a single trial,
as produced by edfreader.read_edf, e.g.
edfdata[trialnr]['events']['Efix']
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
durationsize - Boolean indicating whether the fixation duration is
to be taken into account as a weight for the circle
size; longer duration = bigger (default = True)
durationcolour - Boolean indicating whether the fixation duration is
to be taken into account as a weight for the circle
colour; longer duration = hotter (default = True)
alpha - float between 0 and 1, indicating the transparancy of
the heatmap, where 0 is completely transparant and 1
is completely untransparant (default = 0.5)
savefilename - full path to the file in which the heatmap should be
saved, or None to not save the file (default = None)
returns
fig - a matplotlib.pyplot Figure instance, containing the
fixations
"""
# FIXATIONS
fix = parse_fixations(fixations)
# IMAGE
fig, ax = draw_display(dispsize, imagefile=imagefile)
# CIRCLES
# duration weigths
if durationsize:
siz = 1 * (fix['dur']/30.0)
else:
siz = 1 * numpy.median(fix['dur']/30.0)
if durationcolour:
col = fix['dur']
else:
col = COLS['chameleon'][2]
# draw circles
ax.scatter(fix['x'],fix['y'], s=siz, c=col, marker='o', cmap='jet', alpha=alpha, edgecolors='none')
# FINISH PLOT
# invert the y axis, as (0,0) is top left on a display
ax.invert_yaxis()
# save the figure if a file name was provided
if savefilename != None:
fig.savefig(savefilename)
return fig
def draw_heatmap(fixations, dispsize, imagefile=None, durationweight=True, alpha=0.5, savefilename=None):
"""Draws a heatmap of the provided fixations, optionally drawn over an
image, and optionally allocating more weight to fixations with a higher
duration.
arguments
fixations - a list of fixation ending events from a single trial,
as produced by edfreader.read_edf, e.g.
edfdata[trialnr]['events']['Efix']
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
durationweight - Boolean indicating whether the fixation duration is
to be taken into account as a weight for the heatmap
intensity; longer duration = hotter (default = True)
alpha - float between 0 and 1, indicating the transparancy of
the heatmap, where 0 is completely transparant and 1
is completely untransparant (default = 0.5)
savefilename - full path to the file in which the heatmap should be
saved, or None to not save the file (default = None)
returns
fig - a matplotlib.pyplot Figure instance, containing the
heatmap
"""
# FIXATIONS
fix = parse_fixations(fixations)
# IMAGE
fig, ax = draw_display(dispsize, imagefile=imagefile)
# HEATMAP
# Gaussian
gwh = 200
gsdwh = gwh/6
gaus = gaussian(gwh,gsdwh)
# matrix of zeroes
strt = gwh/2
heatmapsize = dispsize[1] + 2*strt, dispsize[0] + 2*strt
heatmap = numpy.zeros(heatmapsize, dtype=float)
# create heatmap
for i in range(0,len(fix['dur'])):
# get x and y coordinates
#x and y - indexes of heatmap array. must be integers
x = strt + int(fix['x'][i]) - int(gwh/2)
y = strt + int(fix['y'][i]) - int(gwh/2)
# correct Gaussian size if either coordinate falls outside of
# display boundaries
if (not 0 < x < dispsize[0]) or (not 0 < y < dispsize[1]):
hadj=[0,gwh];vadj=[0,gwh]
if 0 > x:
hadj[0] = abs(x)
x = 0
elif dispsize[0] < x:
hadj[1] = gwh - int(x-dispsize[0])
if 0 > y:
vadj[0] = abs(y)
y = 0
elif dispsize[1] < y:
vadj[1] = gwh - int(y-dispsize[1])
# add adjusted Gaussian to the current heatmap
try:
heatmap[y:y+vadj[1],x:x+hadj[1]] += gaus[vadj[0]:vadj[1],hadj[0]:hadj[1]] * fix['dur'][i]
except:
# fixation was probably outside of display
pass
else:
# add Gaussian to the current heatmap
heatmap[y:y+gwh,x:x+gwh] += gaus * fix['dur'][i]
# resize heatmap
heatmap = heatmap[strt:dispsize[1]+strt,strt:dispsize[0]+strt]
# remove zeros
lowbound = numpy.mean(heatmap[heatmap>0])
heatmap[heatmap<lowbound] = numpy.NaN
# draw heatmap on top of image
ax.imshow(heatmap, cmap='jet', alpha=alpha)
# FINISH PLOT
# invert the y axis, as (0,0) is top left on a display
ax.invert_yaxis()
# save the figure if a file name was provided
if savefilename != None:
fig.savefig(savefilename)
return fig
def draw_raw(x, y, dispsize, imagefile=None, savefilename=None):
"""Draws the raw x and y data
arguments
x - a list of x coordinates of all samples that are to
be plotted
y - a list of y coordinates of all samples that are to
be plotted
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
savefilename - full path to the file in which the heatmap should be
saved, or None to not save the file (default = None)
returns
fig - a matplotlib.pyplot Figure instance, containing the
fixations
"""
# image
fig, ax = draw_display(dispsize, imagefile=imagefile)
# plot raw data points
ax.plot(x, y, 'o', color=COLS['aluminium'][0], markeredgecolor=COLS['aluminium'][5])
# invert the y axis, as (0,0) is top left on a display
ax.invert_yaxis()
# save the figure if a file name was provided
if savefilename != None:
fig.savefig(savefilename)
return fig
def draw_scanpath(fixations, saccades, dispsize, imagefile=None, alpha=0.5, savefilename=None):
"""Draws a scanpath: a series of arrows between numbered fixations,
optionally drawn over an image
arguments
fixations - a list of fixation ending events from a single trial,
as produced by edfreader.read_edf, e.g.
edfdata[trialnr]['events']['Efix']
saccades - a list of saccade ending events from a single trial,
as produced by edfreader.read_edf, e.g.
edfdata[trialnr]['events']['Esac']
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
alpha - float between 0 and 1, indicating the transparancy of
the heatmap, where 0 is completely transparant and 1
is completely untransparant (default = 0.5)
savefilename - full path to the file in which the heatmap should be
saved, or None to not save the file (default = None)
returns
fig - a matplotlib.pyplot Figure instance, containing the
heatmap
"""
# image
fig, ax = draw_display(dispsize, imagefile=imagefile)
# FIXATIONS
# parse fixations
fix = parse_fixations(fixations)
# draw fixations
ax.scatter(fix['x'],fix['y'], s=(1 * fix['dur'] / 30.0), c=COLS['chameleon'][2], marker='o', cmap='jet', alpha=alpha, edgecolors='none')
# draw annotations (fixation numbers)
for i in range(len(fixations)):
ax.annotate(str(i+1), (fix['x'][i],fix['y'][i]), color=COLS['aluminium'][5], alpha=1, horizontalalignment='center', verticalalignment='center', multialignment='center')
# SACCADES
if saccades:
# loop through all saccades
for st, et, dur, sx, sy, ex, ey in saccades:
# draw an arrow between every saccade start and ending
ax.arrow(sx, sy, ex-sx, ey-sy, alpha=alpha, fc=COLS['aluminium'][0], ec=COLS['aluminium'][5], fill=True, shape='full', width=10, head_width=20, head_starts_at_zero=False, overhang=0)
# invert the y axis, as (0,0) is top left on a display
ax.invert_yaxis()
# save the figure if a file name was provided
if savefilename != None:
fig.savefig(savefilename)
return fig
# # # # #
# HELPER FUNCTIONS
def draw_display(dispsize, imagefile=None):
"""Returns a matplotlib.pyplot Figure and its axes, with a size of
dispsize, a black background colour, and optionally with an image drawn
onto it
arguments
dispsize - tuple or list indicating the size of the display,
e.g. (1024,768)
keyword arguments
imagefile - full path to an image file over which the heatmap
is to be laid, or None for no image; NOTE: the image
may be smaller than the display size, the function
assumes that the image was presented at the centre of
the display (default = None)
returns
fig, ax - matplotlib.pyplot Figure and its axes: field of zeros
with a size of dispsize, and an image drawn onto it
if an imagefile was passed
"""
# construct screen (black background)
_, ext = os.path.splitext(imagefile)
ext = ext.lower()
data_type = 'float32' if ext == '.png' else 'uint8'
screen = numpy.zeros((dispsize[1],dispsize[0],3), dtype=data_type)
# if an image location has been passed, draw the image
if imagefile != None:
# check if the path to the image exists
if not os.path.isfile(imagefile):
raise Exception("ERROR in draw_display: imagefile not found at '%s'" % imagefile)
# load image
img = image.imread(imagefile)
# flip image over the horizontal axis
# (do not do so on Windows, as the image appears to be loaded with
# the correct side up there; what's up with that? :/)
if not os.name == 'nt':
img = numpy.flipud(img)
# width and height of the image
w, h = len(img[0]), len(img)
# x and y position of the image on the display
x = dispsize[0]/2 - w/2
y = dispsize[1]/2 - h/2
# draw the image on the screen
screen[y:y+h,x:x+w,:] += img
# dots per inch
dpi = 100.0
# determine the figure size in inches
figsize = (dispsize[0]/dpi, dispsize[1]/dpi)
# create a figure
fig = pyplot.figure(figsize=figsize, dpi=dpi, frameon=False)
ax = pyplot.Axes(fig, [0,0,1,1])
ax.set_axis_off()
fig.add_axes(ax)
# plot display
ax.axis([0,dispsize[0],0,dispsize[1]])
ax.imshow(screen)#, origin='upper')
return fig, ax
def gaussian(x, sx, y=None, sy=None):
"""Returns an array of numpy arrays (a matrix) containing values between
1 and 0 in a 2D Gaussian distribution
arguments
x -- width in pixels
sx -- width standard deviation
keyword argments
y -- height in pixels (default = x)
sy -- height standard deviation (default = sx)
"""
# square Gaussian if only x values are passed
if y == None:
y = x
if sy == None:
sy = sx
# centers
xo = x/2
yo = y/2
# matrix of zeros
M = numpy.zeros([y,x],dtype=float)
# gaussian matrix
for i in range(x):
for j in range(y):
M[j,i] = numpy.exp(-1.0 * (((float(i)-xo)**2/(2*sx*sx)) + ((float(j)-yo)**2/(2*sy*sy)) ) )
return M
def parse_fixations(fixations):
"""Returns all relevant data from a list of fixation ending events
arguments
fixations - a list of fixation ending events from a single trial,
as produced by edfreader.read_edf, e.g.
edfdata[trialnr]['events']['Efix']
returns
fix - a dict with three keys: 'x', 'y', and 'dur' (each contain
a numpy array) for the x and y coordinates and duration of
each fixation
"""
# empty arrays to contain fixation coordinates
fix = { 'x':numpy.zeros(len(fixations)),
'y':numpy.zeros(len(fixations)),
'dur':numpy.zeros(len(fixations))}
# get all fixation coordinates
for fixnr in range(len( fixations)):
stime, etime, dur, ex, ey = fixations[fixnr]
fix['x'][fixnr] = ex
fix['y'][fixnr] = ey
fix['dur'][fixnr] = dur
return fix
|
esdalmaijer/PyGazeAnalyser
|
pygazeanalyser/gazeplotter.py
|
Python
|
gpl-3.0
| 14,526
|
[
"Gaussian"
] |
f9caa5bef67b98139fe4f1a65cf1e521db252dab073a50bf6d38fa48d71af884
|
#!/usr/bin/env python
"""
reid14_rotcurve.py
Utilities involving the Universal Rotation Curve (Persic+1996) from
Reid+2014.
Copyright(C) 2017-2020 by
Trey V. Wenger; tvwenger@gmail.com
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
2017-04-12 Trey V. Wenger
2020-02-19 Trey V. Wenger updates for v2.0
"""
import numpy as np
from kd import kd_utils
#
# Reid+2014 rotation curve parameters and uncertainties
#
__a1 = 241. # km/s V(R_opt)
__a1_err = 8.
__a2 = 0.90 # R_opt/ R0
__a2_err = 0.06
__a3 = 1.46 # 1.5*(L/L*)^0.2
__a3_err = 0.16
__R0 = 8.34 # kpc
__R0_err = 0.16
def nominal_params():
"""
Return a dictionary containing the nominal rotation curve
parameters.
Parameters: Nothing
Returns: params
params :: dictionary
params['a1'], etc. : scalar
The nominal rotation curve parameter
"""
params = {
'a1': __a1, 'a2': __a2, 'a3': __a3, 'R0': __R0}
return params
def resample_params(size=None):
"""
Resample the Reid+2014 rotation curve parameters within their
uncertainties assuming Gaussian probabilities.
Parameters:
size :: integer
The number of random samples to generate. If None, generate
only one sample and return a scalar.
Returns: params
params :: dictionary
params['a1'], etc. : scalar or array of scalars
The re-sampled parameters
"""
params = {
'a1': np.random.normal(loc=__a1, scale=__a1_err, size=size),
'a2': np.random.normal(loc=__a2, scale=__a2_err, size=size),
'a3': np.random.normal(loc=__a3, scale=__a3_err, size=size),
'R0': np.random.normal(loc=__R0, scale=__R0_err, size=size)}
return params
def calc_theta(R, a1=__a1, a2=__a2, a3=__a3, R0=__R0):
"""
Return circular orbit speed at a given Galactocentric radius.
Parameters:
R :: scalar or array of scalars
Galactocentric radius (kpc)
a1, a2, a3 :: scalars (optional)
Reid+2014 rotation curve parameters
R0 :: scalar (optional)
Solar Galactocentric radius (kpc)
Returns: theta
theta :: scalar or array of scalars
circular orbit speed at R (km/s)
"""
input_scalar = np.isscalar(R)
R = np.atleast_1d(R)
#
# Equations 8, 9, 10, 11a, 11b in Persic+1996
#
x = R/(a2 * R0)
LLstar = (a3/1.5)**5.
beta = 0.72 + 0.44*np.log10(LLstar)
# Disk component Vd^2 / V(R_opt)^2
Vd2 = beta * 1.97 * x**1.22 / (x**2. + 0.78**2.)**1.43
# Halo component Vh^2 / V(R_opt)^2
Vh2 = (1.-beta)*(1.+a3**2.)*x**2./(x**2. + a3**2.)
#
# Catch non-physical case where Vd2 + Vh2 < 0
#
Vtot = Vd2 + Vh2
Vtot[Vtot < 0.] = np.nan
#
# Circular velocity
#
theta = a1 * np.sqrt(Vtot)
if input_scalar:
return theta[0]
return theta
def calc_vlsr(glong, glat, dist, a1=__a1, a2=__a2, a3=__a3, R0=__R0):
"""
Return the LSR velocity at a given Galactic longitude and
line-of-sight distance.
Parameters:
glong, glat :: scalar or array of scalars
Galactic longitude and latitude (deg).
dist :: scalar or array of scalars
line-of-sight distance (kpc).
a1, a2, a3 :: scalars (optional)
Reid+2014 rotation curve parameters
R0 :: scalar (optional)
Solar Galactocentric radius (kpc)
Returns: vlsr
vlsr :: scalar or array of scalars
LSR velocity (km/s).
"""
input_scalar = np.isscalar(glong) and np.isscalar(glat) and np.isscalar(dist)
glong, glat, dist = np.atleast_1d(glong, glat, dist)
#
# Convert distance to Galactocentric radius, catch small Rgal
#
Rgal = kd_utils.calc_Rgal(glong, glat, dist, R0=R0)
Rgal[Rgal < 1.e-6] = 1.e-6
#
# Rotation curve circular velocity
#
theta = calc_theta(
Rgal, a1=a1, a2=a2, a3=a3, R0=R0)
theta0 = calc_theta(R0, a1=a1, a2=a2, a3=a3, R0=R0)
#
# Now take circular velocity and convert to LSR velocity
#
vlsr = R0 * np.sin(np.deg2rad(glong))
vlsr = vlsr * ((theta/Rgal) - (theta0/R0))
if input_scalar:
return vlsr[0]
return vlsr
|
tvwenger/kd
|
kd/reid14_rotcurve.py
|
Python
|
gpl-3.0
| 4,782
|
[
"Gaussian"
] |
e344518cc2fb31eaf1e33957c73d6c9ed9bba8942b082f7b4bca94f617ff9fd9
|
import sys
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed(object):
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers(object):
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'd1746364b48a020dab9ef0568e6c0cd2',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'd1746364b48a020dab9ef0568e6c0cd2'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (np.bool, bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (
np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=np.object)
high_o = np.array([high] * 10, dtype=np.object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
import hashlib
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
MSeifert04/numpy
|
numpy/random/tests/test_generator_mt19937.py
|
Python
|
bsd-3-clause
| 86,873
|
[
"Gaussian"
] |
550e9386874e28ac40902a8ba49f1d4d7355f78788bfd59b53f29dd48b85ea03
|
# coding=utf-8
# Copyright 2020 The SimCLR Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific simclr governing permissions and
# limitations under the License.
# ==============================================================================
"""Data preprocessing and augmentation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl import flags
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
CROP_PROPORTION = 0.875 # Standard for ImageNet.
def random_apply(func, p, x):
"""Randomly apply function func to x with probability p."""
return tf.cond(
tf.less(tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32),
tf.cast(p, tf.float32)),
lambda: func(x),
lambda: x)
def random_brightness(image, max_delta, impl='simclrv2'):
"""A multiplicative vs additive change of brightness."""
if impl == 'simclrv2':
factor = tf.random_uniform(
[], tf.maximum(1.0 - max_delta, 0), 1.0 + max_delta)
image = image * factor
elif impl == 'simclrv1':
image = tf.image.random_brightness(image, max_delta=max_delta)
else:
raise ValueError('Unknown impl {} for random brightness.'.format(impl))
return image
def to_grayscale(image, keep_channels=True):
image = tf.image.rgb_to_grayscale(image)
if keep_channels:
image = tf.tile(image, [1, 1, 3])
return image
def color_jitter(image, strength, random_order=True, impl='simclrv2'):
"""Distorts the color of the image.
Args:
image: The input image tensor.
strength: the floating number for the strength of the color augmentation.
random_order: A bool, specifying whether to randomize the jittering order.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
brightness = 0.8 * strength
contrast = 0.8 * strength
saturation = 0.8 * strength
hue = 0.2 * strength
if random_order:
return color_jitter_rand(
image, brightness, contrast, saturation, hue, impl=impl)
else:
return color_jitter_nonrand(
image, brightness, contrast, saturation, hue, impl=impl)
def color_jitter_nonrand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is fixed).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x, brightness, contrast, saturation, hue):
"""Apply the i-th transformation."""
if brightness != 0 and i == 0:
x = random_brightness(x, max_delta=brightness, impl=impl)
elif contrast != 0 and i == 1:
x = tf.image.random_contrast(
x, lower=1-contrast, upper=1+contrast)
elif saturation != 0 and i == 2:
x = tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
elif hue != 0:
x = tf.image.random_hue(x, max_delta=hue)
return x
for i in range(4):
image = apply_transform(i, image, brightness, contrast, saturation, hue)
image = tf.clip_by_value(image, 0., 1.)
return image
def color_jitter_rand(image,
brightness=0,
contrast=0,
saturation=0,
hue=0,
impl='simclrv2'):
"""Distorts the color of the image (jittering order is random).
Args:
image: The input image tensor.
brightness: A float, specifying the brightness for color jitter.
contrast: A float, specifying the contrast for color jitter.
saturation: A float, specifying the saturation for color jitter.
hue: A float, specifying the hue for color jitter.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
The distorted image tensor.
"""
with tf.name_scope('distort_color'):
def apply_transform(i, x):
"""Apply the i-th transformation."""
def brightness_foo():
if brightness == 0:
return x
else:
return random_brightness(x, max_delta=brightness, impl=impl)
def contrast_foo():
if contrast == 0:
return x
else:
return tf.image.random_contrast(x, lower=1-contrast, upper=1+contrast)
def saturation_foo():
if saturation == 0:
return x
else:
return tf.image.random_saturation(
x, lower=1-saturation, upper=1+saturation)
def hue_foo():
if hue == 0:
return x
else:
return tf.image.random_hue(x, max_delta=hue)
x = tf.cond(tf.less(i, 2),
lambda: tf.cond(tf.less(i, 1), brightness_foo, contrast_foo),
lambda: tf.cond(tf.less(i, 3), saturation_foo, hue_foo))
return x
perm = tf.random_shuffle(tf.range(4))
for i in range(4):
image = apply_transform(perm[i], image)
image = tf.clip_by_value(image, 0., 1.)
return image
def _compute_crop_shape(
image_height, image_width, aspect_ratio, crop_proportion):
"""Compute aspect ratio-preserving shape for central crop.
The resulting shape retains `crop_proportion` along one side and a proportion
less than or equal to `crop_proportion` along the other side.
Args:
image_height: Height of image to be cropped.
image_width: Width of image to be cropped.
aspect_ratio: Desired aspect ratio (width / height) of output.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
crop_height: Height of image after cropping.
crop_width: Width of image after cropping.
"""
image_width_float = tf.cast(image_width, tf.float32)
image_height_float = tf.cast(image_height, tf.float32)
def _requested_aspect_ratio_wider_than_image():
crop_height = tf.cast(tf.rint(
crop_proportion / aspect_ratio * image_width_float), tf.int32)
crop_width = tf.cast(tf.rint(
crop_proportion * image_width_float), tf.int32)
return crop_height, crop_width
def _image_wider_than_requested_aspect_ratio():
crop_height = tf.cast(
tf.rint(crop_proportion * image_height_float), tf.int32)
crop_width = tf.cast(tf.rint(
crop_proportion * aspect_ratio *
image_height_float), tf.int32)
return crop_height, crop_width
return tf.cond(
aspect_ratio > image_width_float / image_height_float,
_requested_aspect_ratio_wider_than_image,
_image_wider_than_requested_aspect_ratio)
def center_crop(image, height, width, crop_proportion):
"""Crops to center of image and rescales to desired size.
Args:
image: Image Tensor to crop.
height: Height of image to be cropped.
width: Width of image to be cropped.
crop_proportion: Proportion of image to retain along the less-cropped side.
Returns:
A `height` x `width` x channels Tensor holding a central crop of `image`.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
crop_height, crop_width = _compute_crop_shape(
image_height, image_width, width / height, crop_proportion)
offset_height = ((image_height - crop_height) + 1) // 2
offset_width = ((image_width - crop_width) + 1) // 2
image = tf.image.crop_to_bounding_box(
image, offset_height, offset_width, crop_height, crop_width)
image = tf.image.resize_bicubic([image], [height, width])[0]
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: `Tensor` of image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
(cropped image `Tensor`, distorted bbox `Tensor`).
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
shape = tf.shape(image)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
image = tf.image.crop_to_bounding_box(
image, offset_y, offset_x, target_height, target_width)
return image
def crop_and_resize(image, height, width):
"""Make a random crop and resize it to height `height` and width `width`.
Args:
image: Tensor representing the image.
height: Desired image height.
width: Desired image width.
Returns:
A `height` x `width` x channels Tensor holding a random crop of `image`.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
aspect_ratio = width / height
image = distorted_bounding_box_crop(
image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4 * aspect_ratio, 4. / 3. * aspect_ratio),
area_range=(0.08, 1.0),
max_attempts=100,
scope=None)
return tf.image.resize_bicubic([image], [height, width])[0]
def gaussian_blur(image, kernel_size, sigma, padding='SAME'):
"""Blurs the given image with separable convolution.
Args:
image: Tensor of shape [height, width, channels] and dtype float to blur.
kernel_size: Integer Tensor for the size of the blur kernel. This is should
be an odd number. If it is an even number, the actual kernel size will be
size + 1.
sigma: Sigma value for gaussian operator.
padding: Padding to use for the convolution. Typically 'SAME' or 'VALID'.
Returns:
A Tensor representing the blurred image.
"""
radius = tf.to_int32(kernel_size / 2)
kernel_size = radius * 2 + 1
x = tf.to_float(tf.range(-radius, radius + 1))
blur_filter = tf.exp(
-tf.pow(x, 2.0) / (2.0 * tf.pow(tf.to_float(sigma), 2.0)))
blur_filter /= tf.reduce_sum(blur_filter)
# One vertical and one horizontal filter.
blur_v = tf.reshape(blur_filter, [kernel_size, 1, 1, 1])
blur_h = tf.reshape(blur_filter, [1, kernel_size, 1, 1])
num_channels = tf.shape(image)[-1]
blur_h = tf.tile(blur_h, [1, 1, num_channels, 1])
blur_v = tf.tile(blur_v, [1, 1, num_channels, 1])
expand_batch_dim = image.shape.ndims == 3
if expand_batch_dim:
# Tensorflow requires batched input to convolutions, which we can fake with
# an extra dimension.
image = tf.expand_dims(image, axis=0)
blurred = tf.nn.depthwise_conv2d(
image, blur_h, strides=[1, 1, 1, 1], padding=padding)
blurred = tf.nn.depthwise_conv2d(
blurred, blur_v, strides=[1, 1, 1, 1], padding=padding)
if expand_batch_dim:
blurred = tf.squeeze(blurred, axis=0)
return blurred
def random_crop_with_resize(image, height, width, p=1.0):
"""Randomly crop and resize an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: Probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
def _transform(image): # pylint: disable=missing-docstring
image = crop_and_resize(image, height, width)
return image
return random_apply(_transform, p=p, x=image)
def random_color_jitter(image, p=1.0, impl='simclrv2'):
def _transform(image):
color_jitter_t = functools.partial(
color_jitter, strength=FLAGS.color_jitter_strength, impl=impl)
image = random_apply(color_jitter_t, p=0.8, x=image)
return random_apply(to_grayscale, p=0.2, x=image)
return random_apply(_transform, p=p, x=image)
def random_blur(image, height, width, p=1.0):
"""Randomly blur an image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
p: probability of applying this transformation.
Returns:
A preprocessed image `Tensor`.
"""
del width
def _transform(image):
sigma = tf.random.uniform([], 0.1, 2.0, dtype=tf.float32)
return gaussian_blur(
image, kernel_size=height//10, sigma=sigma, padding='SAME')
return random_apply(_transform, p=p, x=image)
def batch_random_blur(images_list, height, width, blur_probability=0.5):
"""Apply efficient batch data transformations.
Args:
images_list: a list of image tensors.
height: the height of image.
width: the width of image.
blur_probability: the probaility to apply the blur operator.
Returns:
Preprocessed feature list.
"""
def generate_selector(p, bsz):
shape = [bsz, 1, 1, 1]
selector = tf.cast(
tf.less(tf.random_uniform(shape, 0, 1, dtype=tf.float32), p),
tf.float32)
return selector
new_images_list = []
for images in images_list:
images_new = random_blur(images, height, width, p=1.)
selector = generate_selector(blur_probability, tf.shape(images)[0])
images = images_new * selector + images * (1 - selector)
images = tf.clip_by_value(images, 0., 1.)
new_images_list.append(images)
return new_images_list
def preprocess_for_train(image,
height,
width,
color_distort=True,
crop=True,
flip=True,
impl='simclrv2'):
"""Preprocesses the given image for training.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
color_distort: Whether to apply the color distortion.
crop: Whether to crop the image.
flip: Whether or not to flip left and right of an image.
impl: 'simclrv1' or 'simclrv2'. Whether to use simclrv1 or simclrv2's
version of random brightness.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = random_crop_with_resize(image, height, width)
if flip:
image = tf.image.random_flip_left_right(image)
if color_distort:
image = random_color_jitter(image, impl=impl)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_for_eval(image, height, width, crop=True):
"""Preprocesses the given image for evaluation.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
crop: Whether or not to (center) crop the test images.
Returns:
A preprocessed image `Tensor`.
"""
if crop:
image = center_crop(image, height, width, crop_proportion=CROP_PROPORTION)
image = tf.reshape(image, [height, width, 3])
image = tf.clip_by_value(image, 0., 1.)
return image
def preprocess_image(image, height, width, is_training=False,
color_distort=True, test_crop=True):
"""Preprocesses the given image.
Args:
image: `Tensor` representing an image of arbitrary size.
height: Height of output image.
width: Width of output image.
is_training: `bool` for whether the preprocessing is for training.
color_distort: whether to apply the color distortion.
test_crop: whether or not to extract a central crop of the images
(as for standard ImageNet evaluation) during the evaluation.
Returns:
A preprocessed image `Tensor` of range [0, 1].
"""
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if is_training:
return preprocess_for_train(image, height, width, color_distort)
else:
return preprocess_for_eval(image, height, width, test_crop)
|
google-research/simclr
|
data_util.py
|
Python
|
apache-2.0
| 18,042
|
[
"Gaussian"
] |
798d9c22fa98748b606c073f77d3285738c2faba1a58f15c8581f9997edcc71c
|
from brian import *
N = 5
duration = 100 * ms
Vr = -60 * mV
Vt = -50 * mV
tau = 10 * ms
Rmin = 1 * Mohm
Rmax = 10 * Mohm
freq = 50 * Hz
k = 10 * nA
eqs = '''
dV/dt = (-(V-Vr)+R*I)/tau : volt
R : ohm
I : amp
'''
G = NeuronGroup(N, eqs, reset='V=Vr', threshold='V>Vt')
G.R = linspace(Rmin, Rmax, N)
t = linspace(0 * second, duration, int(duration / defaultclock.dt))
I = clip(k * sin(2 * pi * freq * t), 0, Inf)
G.I = TimedArray(I)
M = MultiStateMonitor(G, record=True)
run(duration)
subplot(211)
M['I'].plot()
ylabel('I (amp)')
subplot(212)
M['V'].plot()
ylabel('V (volt)')
show()
|
mac389/mayer
|
timed-array.py
|
Python
|
gpl-2.0
| 586
|
[
"Brian"
] |
5662f1a78df181f924c85fa6e89e8eeaaa373ccd01e6787cd02d9f33416f794c
|
from setuptools import setup, find_packages
from codecs import open
from os import path
__version__ = '0.0.9'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if x.startswith('git+')]
setup(
name='GPoFM',
version=__version__,
description='GPoFM: Gaussian Process Training with Optimized Feature Maps for Shift-Invariant Kernels',
long_description=long_description,
url='https://github.com/MaxInGaussian/GPoFM',
download_url='https://github.com/MaxInGaussian/GPoFM/tarball/' + __version__,
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='Max W. Y. Lam',
install_requires=install_requires,
dependency_links=dependency_links,
author_email='maxingaussian@gmail.com'
)
|
MaxInGaussian/GPoFM
|
setup.py
|
Python
|
bsd-3-clause
| 1,363
|
[
"Gaussian"
] |
f0e75696d4ada27dcf40d8e697bf1181e6c8e335d2dbea0a641b82203f0d1ee3
|
import os, sys, getopt
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
import vtk
import time
from ExodusResult import ExodusResult
import glob, math
from ContourChoices import *
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
pathname = os.path.dirname(os.path.realpath(sys.argv[0]))
pathname = os.path.abspath(pathname)
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class ExodusResultRenderWidget(QtGui.QWidget):
def __init__(self, input_file_widget, execution_widget, qt_app, application):
QtGui.QWidget.__init__(self)
self.input_file_widget = input_file_widget
self.qt_app = qt_app
self.application = application
self.plane = vtk.vtkPlane()
self.plane.SetOrigin(-1000, 0, 0)
self.plane.SetNormal(1, 0, 0)
self.exodus_result = None
# The multiple (from adaptivity)
self.exodus_results = []
self.timestep_to_exodus_result = {}
self.file_name = None
self.setupLuts()
# The multiple (from adaptivity) file names we know of
self.file_names = []
self.current_max_timestep = 0
# Whether or not there is new data to read
self.new_stuff_to_read = False
self.timer = QtCore.QTimer()
self.timer.stop()
self.timer.setInterval(100)
self.timer.timeout.connect(self._updateData)
self.execution_widget = execution_widget
self.execution_widget.run_started.connect(self._runStarted)
self.execution_widget.run_stopped.connect(self._runStopped)
self.execution_widget.timestep_begin.connect(self._timestepBegin)
self.execution_widget.timestep_end.connect(self._timestepEnd)
self.main_layout = QtGui.QHBoxLayout()
# self.main_layout.setSpacing(0)
self.right_layout = QtGui.QVBoxLayout()
self.left_layout = QtGui.QVBoxLayout()
self.left_widget = QtGui.QWidget()
self.left_widget.setMaximumWidth(1)
self.left_widget.setLayout(self.left_layout)
self.left_layout.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.main_layout.addWidget(self.left_widget)
self.right_layout.setStretchFactor(self.left_layout, 0.01)
self.main_layout.addLayout(self.right_layout)
# self.setMinimumWidth(700)
self.setLayout(self.main_layout)
self.vtkwidget = QVTKRenderWindowInteractor(self)
# self.vtkwidget.setMinimumHeight(300)
# Create background, default to the gradient look
self.renderer = vtk.vtkRenderer()
self._showBlackBackgroundChanged(0)
self.renderer.ResetCamera()
self.right_layout.addWidget(self.vtkwidget)
self.right_layout.setStretchFactor(self.vtkwidget, 100)
self.vtkwidget.show()
self.vtkwidget.GetRenderWindow().AddRenderer(self.renderer)
self.interactor = self.vtkwidget.GetRenderWindow().GetInteractor()
self.interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
self.show()
self.interactor.Initialize()
self.first = True
self.exodus_result = None
self.has_displacements = False
self.current_displacement_magnitude = 1.0
self.current_scale_x_magnitude = 1.0
self.current_scale_y_magnitude = 1.0
self.current_scale_z_magnitude = 1.0
self.current_variable = None
self.current_component = None
# Holds a mapping of variable name to contour choices so they can be restored when variables are selected
self.contour_choices = {}
# If we are currently restoring contours then don't save the intermediate ones
self.currently_restoring_contours = False
self.setupControls()
self.modifyUI()
''' This will be called after the interface is completely setup to allow an application to modify this tab '''
def modifyUI(self):
pass
''' Return the name to use for this tab '''
def name(self):
return 'Visualize'
def setupControls(self):
self.controls_widget = QtGui.QWidget()
self.controls_layout = QtGui.QVBoxLayout()
self.bottom_controls_layout = QtGui.QHBoxLayout()
self.left_layout.addLayout(self.controls_layout)
self.main_layout.setStretchFactor(self.left_layout, 0.1)
# self.main_layout.addLayout(self.bottom_controls_layout)
self.leftest_controls_layout = QtGui.QVBoxLayout()
self.left_controls_layout = QtGui.QVBoxLayout()
self.right_controls_layout = QtGui.QVBoxLayout()
### Output selection controls ###
# Create the box, layout, and control
self.output_control_group_box = QtGui.QGroupBox("Select Output") # adds a box for storing widget
self.output_control_layout = QtGui.QVBoxLayout() # creates a layout
self.output_control = QtGui.QComboBox() # adds the actual dropdown menu
# Set-up the control
self.output_control.setToolTip('Select output file to view') # sets menu tooltip
self.updateOutputControl() # populate the list of outputs
self.output_control.activated[str].connect(self._outputChanged) # set the callback function
# Add the control to the GUI
self.output_control_layout.addWidget(self.output_control) # add the dropdown widget to the layout
self.output_control_group_box.setLayout(self.output_control_layout) # add the layout to the box
self.leftest_controls_layout.addWidget(self.output_control_group_box) # add the box to the gui control layout
self.block_view_group_box = QtGui.QGroupBox('Show Blocks')
# self.block_view_group_box.setMaximumWidth(200)
# self.block_view_group_box.setMaximumHeight(200)
self.block_view_layout = QtGui.QVBoxLayout()
self.block_view_list = QtGui.QListView()
self.block_view_model = QtGui.QStandardItemModel()
self.block_view_model.itemChanged.connect(self._blockViewItemChanged)
self.block_view_list.setModel(self.block_view_model)
self.block_view_layout.addWidget(self.block_view_list)
self.block_view_group_box.setLayout(self.block_view_layout)
self.leftest_controls_layout.addWidget(self.block_view_group_box)
self.controls_layout.addLayout(self.leftest_controls_layout)
self.controls_layout.addLayout(self.left_controls_layout)
self.controls_layout.addLayout(self.right_controls_layout)
self.controls_layout.setStretchFactor(self.leftest_controls_layout,1.0)
self.controls_layout.setStretchFactor(self.left_controls_layout,1.5)
self.controls_layout.setStretchFactor(self.right_controls_layout,4.0)
self.automatic_update_checkbox = QtGui.QCheckBox("Automatically Update")
self.automatic_update_checkbox.setToolTip('Toggle automattically reading new timesteps as they finish computing')
self.automatic_update_checkbox.setCheckState(QtCore.Qt.Checked)
self.automatically_update = True
self.automatic_update_checkbox.stateChanged[int].connect(self._automaticUpdateChanged)
# self.left_controls_layout.addWidget(self.automatic_update_checkbox)
# Create Group for viewer controls
# Create the View Mesh toggle
self.toggle_groupbox = QtGui.QGroupBox("View")
self.toggle_groupbox.setMaximumHeight(70)
self.toggle_layout = QtGui.QHBoxLayout()
self.toggle_groupbox.setMaximumHeight(70)
self.draw_edges_checkbox = QtGui.QCheckBox("View Mesh")
self.draw_edges_checkbox.setToolTip('Show mesh elements')
self.draw_edges_checkbox.stateChanged[int].connect(self._drawEdgesChanged)
self.toggle_layout.addWidget(self.draw_edges_checkbox, alignment=QtCore.Qt.AlignHCenter)
# Add a button for toggling the scalebar legend
self.hide_scalebar_checkbox = QtGui.QCheckBox("Scalebar")
self.hide_scalebar_checkbox.setToolTip('Toggle visibility of colorbar')
self.hide_scalebar_checkbox.setCheckState(QtCore.Qt.Checked)
self.hide_scalebar_checkbox.stateChanged[int].connect(self._hideScalebarChanged)
self.toggle_layout.addWidget(self.hide_scalebar_checkbox, alignment=QtCore.Qt.AlignHCenter)
# Add a button for toggling background to black
self.show_black_background_checkbox = QtGui.QCheckBox("Black")
self.show_black_background_checkbox.setToolTip('Toggle a black/gradient background')
self.show_black_background_checkbox.stateChanged[int].connect(self._showBlackBackgroundChanged)
self.toggle_layout.addWidget(self.show_black_background_checkbox, alignment=QtCore.Qt.AlignHCenter)
# Create a vertical layout and add the toggles
self.toggle_groupbox.setLayout(self.toggle_layout)
self.reset_layout = QtGui.QVBoxLayout()
self.reset_layout.addWidget(self.toggle_groupbox)
self.displace_groupbox = QtGui.QGroupBox("Displace")
self.displace_groupbox.setCheckable(True)
self.displace_groupbox.setChecked(True)
self.displace_groupbox.setDisabled(True)
self.displace_groupbox.setMaximumHeight(70)
self.displace_groupbox.toggled[bool].connect(self._displaceToggled)
self.displace_layout = QtGui.QHBoxLayout()
self.displace_layout.setSpacing(0)
self.displace_groupbox.setLayout(self.displace_layout)
self.displace_magnitude_label = QtGui.QLabel("Multiplier: ")
self.displace_magnitude_text = QtGui.QLineEdit("1.0")
self.displace_magnitude_text.setMaximumWidth(50)
self.displace_magnitude_text.setMinimumWidth(10)
self.displace_magnitude_text.returnPressed.connect(self._displaceMagnitudeTextReturn)
self.displace_layout.addWidget(self.displace_magnitude_label, alignment=QtCore.Qt.AlignRight)
self.displace_layout.addWidget(self.displace_magnitude_text, alignment=QtCore.Qt.AlignLeft)
self.reset_layout.addWidget(self.displace_groupbox)
self.scale_groupbox = QtGui.QGroupBox("Scale")
self.scale_groupbox.setCheckable(True)
self.scale_groupbox.setChecked(False)
self.scale_groupbox.setDisabled(False)
self.scale_groupbox.setMaximumHeight(70)
self.scale_groupbox.toggled[bool].connect(self._scaleToggled)
self.scale_layout = QtGui.QHBoxLayout()
self.scale_layout.setSpacing(0)
self.scale_groupbox.setLayout(self.scale_layout)
self.scale_x_label = QtGui.QLabel("x: ")
self.scale_x_text = QtGui.QLineEdit("1.0")
self.scale_x_text.setMinimumWidth(10)
self.scale_x_text.setMaximumWidth(50)
self.scale_y_label = QtGui.QLabel("y: ")
self.scale_y_text = QtGui.QLineEdit("1.0")
self.scale_y_text.setMinimumWidth(10)
self.scale_y_text.setMaximumWidth(50)
self.scale_z_label = QtGui.QLabel("z: ")
self.scale_z_text = QtGui.QLineEdit("1.0")
self.scale_z_text.setMinimumWidth(10)
self.scale_z_text.setMaximumWidth(50)
self.scale_x_text.returnPressed.connect(self._scaleMagnitudeTextReturn)
self.scale_y_text.returnPressed.connect(self._scaleMagnitudeTextReturn)
self.scale_z_text.returnPressed.connect(self._scaleMagnitudeTextReturn)
self.scale_layout.addWidget(self.scale_x_label, alignment=QtCore.Qt.AlignRight)
self.scale_layout.addWidget(self.scale_x_text, alignment=QtCore.Qt.AlignLeft)
self.scale_layout.addWidget(self.scale_y_label, alignment=QtCore.Qt.AlignRight)
self.scale_layout.addWidget(self.scale_y_text, alignment=QtCore.Qt.AlignLeft)
self.scale_layout.addWidget(self.scale_z_label, alignment=QtCore.Qt.AlignRight)
self.scale_layout.addWidget(self.scale_z_text, alignment=QtCore.Qt.AlignLeft)
self.reset_layout.addWidget(self.scale_groupbox)
self.clip_groupbox = QtGui.QGroupBox("Clip")
self.clip_groupbox.setToolTip('Toggle clipping mode where the solution can be sliced open')
self.clip_groupbox.setCheckable(True)
self.clip_groupbox.setChecked(False)
self.clip_groupbox.setMaximumHeight(70)
self.clip_groupbox.toggled[bool].connect(self._clippingToggled)
clip_layout = QtGui.QHBoxLayout()
self.clip_plane_combobox = QtGui.QComboBox()
self.clip_plane_combobox.setToolTip('Direction of the normal for the clip plane')
self.clip_plane_combobox.addItem('x')
self.clip_plane_combobox.addItem('y')
self.clip_plane_combobox.addItem('z')
self.clip_plane_combobox.currentIndexChanged[str].connect(self._clipNormalChanged)
clip_layout.addWidget(self.clip_plane_combobox)
self.clip_plane_slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.clip_plane_slider.setToolTip('Slide to change plane position')
self.clip_plane_slider.setRange(0, 100)
self.clip_plane_slider.setSliderPosition(50)
self.clip_plane_slider.sliderReleased.connect(self._clipSliderReleased)
self.clip_plane_slider.sliderMoved[int].connect(self._clipSliderMoved)
clip_layout.addWidget(self.clip_plane_slider)
# vbox->addStretch(1);
self.clip_groupbox.setLayout(clip_layout)
self.reset_layout.addWidget(self.clip_groupbox)
self.view_layout = QtGui.QHBoxLayout()
self.open_button = QtGui.QPushButton('Open')
self.open_button.setMaximumWidth(100)
self.open_button.setToolTip('Open an existing result')
self.open_button.clicked.connect(self._clickedOpen)
self.view_layout.addWidget(self.open_button, alignment=QtCore.Qt.AlignHCenter)
self.save_button = QtGui.QPushButton('Save')
self.save_button.setMaximumWidth(100)
self.save_button.setToolTip('Save the current view to a file')
self.save_button.clicked.connect(self._saveView)
self.view_layout.addWidget(self.save_button, alignment=QtCore.Qt.AlignHCenter)
self.reset_button = QtGui.QPushButton('Reset')
self.reset_button.setMaximumWidth(100)
self.reset_button.setToolTip('Recenter the camera on the current result')
self.reset_button.clicked.connect(self._resetView)
self.view_layout.addWidget(self.reset_button, alignment=QtCore.Qt.AlignHCenter)
self.reset_layout.addLayout(self.view_layout)
self.right_controls_layout.addLayout(self.reset_layout)
self.contour_groupbox = QtGui.QGroupBox("Contour")
# self.contour_groupbox.setMaximumHeight(10)
# self.contour_groupbox.setMaximumHeight(70)
# contour_groupbox_policy = QtGui.QSizePolicy()
self.contour_groupbox.setSizePolicy(QtGui.QSizePolicy.Fixed,QtGui.QSizePolicy.Fixed)
self.contour_layout = QtGui.QVBoxLayout()
self.contour_groupbox.setLayout(self.contour_layout)
self.variable_contour_layout = QtGui.QHBoxLayout()
self.contour_layout.addLayout(self.variable_contour_layout)
self.contour_label = QtGui.QLabel("Contour:")
self.variable_contour = QtGui.QComboBox()
self.variable_contour_is_nodal = {}
self.variable_contour.setToolTip('Which variable to color by')
self.variable_contour.currentIndexChanged[int].connect(self._contourVariableSelected)
# self.variable_contour_layout.addWidget(self.contour_label, alignment=QtCore.Qt.AlignRight)
self.variable_contour_layout.addWidget(self.variable_contour, alignment=QtCore.Qt.AlignHCenter)
# self.component_layout = QtGui.QHBoxLayout()
self.component_label = QtGui.QLabel("Component:")
self.variable_component = QtGui.QComboBox()
self.variable_component.setToolTip('If the variable is a vector this selects what component of that vector (or the Magnitude) to color by')
self.variable_component.currentIndexChanged[str].connect(self._variableComponentSelected)
# self.component_layout.addWidget(self.component_label, alignment=QtCore.Qt.AlignRight)
# self.component_layout.addWidget(self.variable_component, alignment=QtCore.Qt.AlignLeft)
# self.variable_contour_layout.addLayout(self.component_layout)
self.variable_contour_layout.addWidget(self.variable_component, alignment=QtCore.Qt.AlignHCenter)
self.minmax_contour_layout = QtGui.QVBoxLayout()
self.contour_layout.addLayout(self.minmax_contour_layout)
self.min_groupbox = QtGui.QGroupBox("Min")
self.min_layout = QtGui.QHBoxLayout()
self.min_groupbox.setLayout(self.min_layout)
self.min_radio_layout = QtGui.QVBoxLayout()
self.min_current_radio = QtGui.QRadioButton('Current')
self.min_current_radio.setChecked(QtCore.Qt.Checked)
self.min_current_radio.toggled.connect(self._updateContours)
self.min_global_radio = QtGui.QRadioButton('Global')
self.min_global_radio.toggled.connect(self._updateContours)
self.min_radio_layout.addWidget(self.min_current_radio)
# self.min_radio_layout.addWidget(self.min_global_radio)
self.min_custom_layout = QtGui.QHBoxLayout()
self.min_custom_layout.setSpacing(0)
self.min_custom_radio = QtGui.QRadioButton()
self.min_custom_radio.toggled.connect(self._updateContours)
self.min_custom_text = QtGui.QLineEdit()
self.min_custom_text.returnPressed.connect(self._updateContours)
self.min_custom_text.setDisabled(True)
self.min_custom_text.setMaximumWidth(100)
self.min_custom_layout.addWidget(self.min_custom_radio, alignment=QtCore.Qt.AlignLeft)
self.min_custom_layout.addWidget(self.min_custom_text, alignment=QtCore.Qt.AlignLeft)
self.min_custom_layout.addStretch()
self.min_layout.addLayout(self.min_radio_layout)
self.min_layout.addLayout(self.min_custom_layout)
self.minmax_contour_layout.addWidget(self.min_groupbox)
self.max_groupbox = QtGui.QGroupBox("Max")
self.max_layout = QtGui.QHBoxLayout()
self.max_groupbox.setLayout(self.max_layout)
self.max_radio_layout = QtGui.QVBoxLayout()
self.max_current_radio = QtGui.QRadioButton('Current')
self.max_current_radio.setChecked(QtCore.Qt.Checked)
self.max_current_radio.toggled.connect(self._updateContours)
self.max_global_radio = QtGui.QRadioButton('Global')
self.max_global_radio.toggled.connect(self._updateContours)
self.max_radio_layout.addWidget(self.max_current_radio)
# self.max_radio_layout.addWidget(self.max_global_radio)
self.max_custom_layout = QtGui.QHBoxLayout()
self.max_custom_layout.setSpacing(0)
self.max_custom_radio = QtGui.QRadioButton()
self.max_custom_radio.toggled.connect(self._updateContours)
self.max_custom_text = QtGui.QLineEdit()
self.max_custom_text.returnPressed.connect(self._updateContours)
self.max_custom_text.setDisabled(True)
self.max_custom_text.setMaximumWidth(100)
self.max_custom_layout.addWidget(self.max_custom_radio, alignment=QtCore.Qt.AlignLeft)
self.max_custom_layout.addWidget(self.max_custom_text, alignment=QtCore.Qt.AlignLeft)
self.max_custom_layout.addStretch()
self.max_layout.addLayout(self.max_radio_layout)
self.max_layout.addLayout(self.max_custom_layout)
self.minmax_contour_layout.addWidget(self.max_groupbox)
# self.component_layout = QtGui.QHBoxLayout()
self.color_scheme_label = QtGui.QLabel("Color Scheme:")
self.color_scheme_component = QtGui.QComboBox()
self.color_scheme_component.addItem('HSV (Cool to Warm)')
self.color_scheme_component.addItem('Diverging (Blue to Red)')
self.color_scheme_component.addItem('Shock')
self.color_scheme_component.setToolTip('The color scheme used byt the render view')
self.color_scheme_component.currentIndexChanged[str].connect(self._colorSchemeSelected)
# self.component_layout.addWidget(self.component_label, alignment=QtCore.Qt.AlignRight)
# self.component_layout.addWidget(self.variable_component, alignment=QtCore.Qt.AlignLeft)
# self.variable_contour_layout.addLayout(self.component_layout)
self.minmax_contour_layout.addWidget(self.color_scheme_component)
self.left_controls_layout.addWidget(self.contour_groupbox)
self.beginning_button = QtGui.QToolButton()
self.beginning_button.setToolTip('Go to first timestep')
self.beginning_button.setIcon(QtGui.QIcon(pathname + '/resources/from_paraview/pqVcrFirst32.png'))
self.beginning_button.clicked.connect(self._beginningClicked)
self.back_button = QtGui.QToolButton()
self.back_button.setToolTip('Previous timestep')
self.back_button.setIcon(QtGui.QIcon(pathname + '/resources/from_paraview/pqVcrBack32.png'))
self.back_button.clicked.connect(self._backClicked)
self.play_button = QtGui.QToolButton()
self.play_button.setToolTip('Play through the currently computed timesteps')
self.play_button.setIcon(QtGui.QIcon(pathname + '/resources/from_paraview/pqVcrPlay32.png'))
self.play_button.clicked.connect(self._playClicked)
self.pause_button = QtGui.QToolButton()
self.pause_button.setToolTip('If playing this will pause playback')
self.pause_button.setDisabled(True)
self.pause_button.setIcon(QtGui.QIcon(pathname + '/resources/from_paraview/pqVcrPause32.png'))
self.pause_button.clicked.connect(self._pauseClicked)
self.forward_button = QtGui.QToolButton()
self.forward_button.setToolTip('Next timestep')
self.forward_button.setIcon(QtGui.QIcon(pathname + '/resources/from_paraview/pqVcrForward32.png'))
self.forward_button.clicked.connect(self._forwardClicked)
self.last_button = QtGui.QToolButton()
self.last_button.setToolTip('Go to last timestep')
self.last_button.setIcon(QtGui.QIcon(pathname + '/resources/from_paraview/pqVcrLast32.png'))
self.last_button.clicked.connect(self._lastClicked)
self.loop_button = QtGui.QToolButton()
self.loop_button.setToolTip('Toggle replaying all timesteps when the end is reached during playback. Note that as new timesteps finish they will automatically be picked up and added to the end of the sequence.')
self.loop_button.setCheckable(True)
self.loop_button.setIcon(QtGui.QIcon(pathname + '/resources/from_paraview/pqVcrLoop24.png'))
self.loop_button.toggled.connect(self._loopClicked)
self.currently_looping = False
self.time_slider_label = QtGui.QLabel("Timestep:")
self.time_slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.time_slider.setToolTip('Slide to select a timestep to display')
# self.time_slider.setMaximumWidth(600)
self.time_slider.valueChanged.connect(self._timeSliderChanged)
self.time_slider.sliderReleased.connect(self._timeSliderReleased)
self.time_slider_textbox = QtGui.QLineEdit()
self.time_slider_textbox.setToolTip('Enter a number and press Enter to go to that timestep')
self.time_slider_textbox.setMaximumWidth(30)
self.time_slider_textbox.setMinimumWidth(30)
self.time_slider_textbox.returnPressed.connect(self._sliderTextboxReturn)
self.time_groupbox = QtGui.QGroupBox("Time")
self.time_groupbox.setMaximumHeight(70)
self.time_layout = QtGui.QHBoxLayout()
self.time_layout.addWidget(self.beginning_button)
self.time_layout.addWidget(self.back_button)
self.time_layout.addWidget(self.play_button)
self.time_layout.addWidget(self.pause_button)
self.time_layout.addWidget(self.forward_button)
self.time_layout.addWidget(self.last_button)
self.time_layout.addWidget(self.loop_button)
self.time_layout.addWidget(self.time_slider_label, alignment=QtCore.Qt.AlignRight)
self.time_layout.addWidget(self.time_slider)
self.time_layout.addWidget(self.time_slider_textbox, alignment=QtCore.Qt.AlignLeft)
self.time_groupbox.setLayout(self.time_layout)
self.right_layout.addWidget(self.time_groupbox)
def _updateControls(self):
self.old_contour = self.variable_contour.currentText()
self.variable_contour.clear()
self.has_displacements = False
for variable in self.exodus_result.current_nodal_variables:
if 'ObjectId' not in variable:
self.variable_contour.addItem(variable)
item_num = self.variable_contour.count()-1
self.variable_contour.setItemIcon(item_num,QtGui.QIcon(pathname + '/resources/from_paraview/pqNodalData16.png'))
self.variable_contour_is_nodal[item_num] = True
if 'disp' in variable:
self.has_displacements = True
for variable in self.exodus_result.current_elemental_variables:
if 'ObjectId' not in variable:
self.variable_contour.addItem(variable)
item_num = self.variable_contour.count()-1
self.variable_contour.setItemIcon(item_num,QtGui.QIcon(pathname + '/resources/from_paraview/pqElemData16.png'))
self.variable_contour_is_nodal[item_num] = False
if 'disp' in variable:
self.has_displacements = True
if self.has_displacements:
self.displace_groupbox.setDisabled(False)
self.block_view_model.clear()
for block in self.exodus_result.blocks:
block_display_name = str(block)
if block in self.exodus_result.block_to_name:
block_display_name += ' : ' + self.exodus_result.block_to_name[block]
item = QtGui.QStandardItem(str(block_display_name))
item.exodus_block = block
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Checked)
self.block_view_model.appendRow(item)
# Try to restore back to the view of the variable we were looking at
found_index = self.variable_contour.findText(self.old_contour)
if found_index != -1:
self.variable_contour.setCurrentIndex(found_index)
else: # If this variable doesn't exist then we are probably running a new simulation... try to reset the camera
self._resetView()
self.time_slider.setMinimum(0)
self.time_slider.setMaximum(self.current_max_timestep)
##
# Updates the list of available output file names
def updateOutputControl(self):
# Get the current item selected
name = self.output_control.currentText()
# Clear the existing list
self.output_control.clear()
# Update the list block names and store the filename as data
data = self.input_file_widget.getOutputFileAndBlockNames();
if data != None:
for i in range(len(data[0])):
self.output_control.addItem(data[1][i], data[0][i])
# Restore the selected name, if it is available
idx = self.output_control.findText(name)
if idx != -1:
self.output_control.setCurrentIndex(idx)
##
# Executes when the user selects an item from the output selection dropdown box
def _outputChanged(self):
idx = self.output_control.currentIndex()
name = self.output_control.itemData(idx)
if hasattr(QtCore, 'QVariant') and isinstance(name, QtCore.QVariant):
name = str(name.toString())
self._openFile(name, False)
def setupLuts(self):
self.luts = []
# HSV (Blue to REd) Default
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.667, 0.0)
lut.SetNumberOfColors(256)
lut.Build()
self.luts.append(lut)
# Diverging (Cool to Warm) color scheme
ctf = vtk.vtkColorTransferFunction()
ctf.SetColorSpaceToDiverging()
ctf.AddRGBPoint(0.0, 0.230, 0.299, 0.754)
ctf.AddRGBPoint(1.0, 0.706, 0.016, 0.150)
cc = list()
for i in xrange(256):
cc.append(ctf.GetColor(float(i) / 255.0))
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
for i, item in enumerate(cc):
lut.SetTableValue(i, item[0], item[1], item[2], 1.0)
lut.Build()
self.luts.append(lut)
# Shock
ctf = vtk.vtkColorTransferFunction()
min = 93698.4
max = 230532
ctf.AddRGBPoint(self._normalize(min, max, 93698.4), 0.0, 0.0, 1.0)
ctf.AddRGBPoint(self._normalize(min, max, 115592.0), 0.0, 0.905882, 1.0)
ctf.AddRGBPoint(self._normalize(min, max, 138853.0), 0.0941176, 0.733333, 0.027451)
ctf.AddRGBPoint(self._normalize(min, max, 159378.0), 1.0, 0.913725, 0.00784314)
ctf.AddRGBPoint(self._normalize(min, max, 181272.0), 1.0, 0.180392, 0.239216)
ctf.AddRGBPoint(self._normalize(min, max, 203165.0), 1.0, 0.701961, 0.960784)
ctf.AddRGBPoint(self._normalize(min, max, 230532.0), 1.0, 1.0, 1.0)
cc = list()
for i in xrange(256):
cc.append(ctf.GetColor(float(i) / 255.0))
lut = vtk.vtkLookupTable()
lut.SetNumberOfColors(256)
for i, item in enumerate(cc):
lut.SetTableValue(i, item[0], item[1], item[2], 1.0)
lut.Build()
self.luts.append(lut)
self.current_lut = self.luts[0]
def _normalize(self, min, max, value):
return (value - min) / (max - min)
def _blockViewItemChanged(self, item):
if item.checkState() == QtCore.Qt.Checked:
self.exodus_result.showBlock(item.exodus_block)
self.exodus_result.reader.Update()
self.exodus_result.geom.Update()
self.current_bounds = self.exodus_result.actor.GetBounds()
self._updateContours()
else:
self.exodus_result.hideBlock(item.exodus_block)
self.exodus_result.reader.Update()
self.exodus_result.geom.Update()
self.current_bounds = self.exodus_result.actor.GetBounds()
self._updateContours()
def _displaceToggled(self, value):
self._timeSliderReleased()
def _scaleToggled(self, value):
self._timeSliderReleased()
def _displaceMagnitudeTextReturn(self):
self.current_displacement_magnitude = float(self.displace_magnitude_text.text())
self._timeSliderReleased()
def _scaleMagnitudeTextReturn(self):
self.current_scale_x_magnitude = float(self.scale_x_text.text())
self.current_scale_y_magnitude = float(self.scale_y_text.text())
self.current_scale_z_magnitude = float(self.scale_z_text.text())
self._timeSliderReleased()
def _drawEdgesChanged(self, value):
if value == QtCore.Qt.Checked:
self.exodus_result.actor.GetProperty().EdgeVisibilityOn()
self.exodus_result.clip_actor.GetProperty().EdgeVisibilityOn()
else:
self.exodus_result.actor.GetProperty().EdgeVisibilityOff()
self.exodus_result.clip_actor.GetProperty().EdgeVisibilityOff()
self.vtkwidget.repaint()
##
# A method for toggling visiability of the scale bar legend, it is controlled
# by the 'Hide Scalebar' toggle on the Visualize tab
# @param value The interger value from the checkbox (1=checked)
def _hideScalebarChanged(self, value):
# Show when checked
if value == QtCore.Qt.Checked:
self.exodus_result.scalar_bar.VisibilityOn()
# Hide when unchecked
else:
self.exodus_result.scalar_bar.VisibilityOff()
# Update the GUI
self.vtkwidget.repaint()
##
# A method for toggling black background or gradient background, it is controlled
# by the 'Black Background' toggle on the Visualize tab
# @param value The interger value from the checkbox (1=checked)
def _showBlackBackgroundChanged(self, value):
# Black when checked
if value == QtCore.Qt.Checked:
self.renderer.SetBackground(0,0,0)
self.renderer.SetGradientBackground(0)
#self.renderer.ResetCamera()
# Gradient when unchecked
else:
self.renderer.SetBackground(0,0,0)
self.renderer.SetBackground(0.2,0.2,0.2)
self.renderer.SetBackground2(1,1,1)
self.renderer.SetGradientBackground(1)
#self.renderer.ResetCamera()
# Update thew GUI
self.vtkwidget.repaint()
def _fillComponentCombo(self, variable_name, components):
self.variable_component.clear()
self.variable_component.addItem('Magnitude')
num_components = components[variable_name]
if num_components > 1 and self.exodus_result.current_dim >= 2:
self.variable_component.setDisabled(False)
self.variable_component.addItem('X')
self.variable_component.addItem('Y')
else:
self.variable_component.setDisabled(True)
if num_components > 1 and self.exodus_result.current_dim == 3:
self.variable_component.addItem('Z')
def _contourVariableSelected(self, index):
value_string = str(self.variable_contour.itemText(index))
self.current_variable = value_string
self.current_variable_index = index
if index in self.variable_contour_is_nodal:
self.current_variable_is_nodal = self.variable_contour_is_nodal[index]
else:
self.current_variable_is_nodal = True
self.currently_restoring_contours = True
# Maybe results haven't been written yet...
if not self.exodus_result.data.GetPointData().GetVectors(value_string) and not self.exodus_result.data.GetCellData().GetVectors(value_string):
return
if value_string in self.exodus_result.current_nodal_components:
self._fillComponentCombo(value_string, self.exodus_result.current_nodal_components)
elif value_string in self.exodus_result.current_elemental_components:
self._fillComponentCombo(value_string, self.exodus_result.current_elemental_components)
if self.current_variable not in self.contour_choices:
self.contour_choices[self.current_variable] = ContourChoices()
self.contour_choices[self.current_variable].restore(self)
self.currently_restoring_contours = False
def _variableComponentSelected(self, value):
value_string = str(value)
self.current_component = value_string
if value_string == 'Magnitude':
self.component_index = -1
elif value_string == 'X':
self.component_index = 0
elif value_string == 'Y':
self.component_index = 1
elif value_string == 'Z':
self.component_index = 2
self._updateContours()
def _updateContours(self):
self.exodus_result.setColorScheme(self.current_lut)
if self.component_index == -1:
self.exodus_result.lut.SetVectorModeToMagnitude()
elif self.component_index == 0:
self.exodus_result.lut.SetVectorModeToComponent()
self.exodus_result.lut.SetVectorComponent(0)
elif self.component_index == 1:
self.exodus_result.lut.SetVectorModeToComponent()
self.exodus_result.lut.SetVectorComponent(1)
elif self.component_index == 2:
self.exodus_result.lut.SetVectorModeToComponent()
self.exodus_result.lut.SetVectorComponent(2)
if self.clip_groupbox.isChecked():
self.exodus_result.clipper.Modified()
self.exodus_result.clipper.Update()
self.exodus_result.clip_geom.Update()
self.exodus_result.clip_mapper.Update()
data = None
if self.current_variable_is_nodal and self.current_variable in self.exodus_result.current_nodal_components:
data = self.exodus_result.data.GetPointData().GetVectors(self.current_variable)
self.exodus_result.mapper.SetScalarModeToUsePointFieldData()
self.exodus_result.clip_mapper.SetScalarModeToUsePointFieldData()
elif self.current_variable in self.exodus_result.current_elemental_components:
data = self.exodus_result.data.GetCellData().GetVectors(self.current_variable)
self.exodus_result.mapper.SetScalarModeToUseCellFieldData()
self.exodus_result.clip_mapper.SetScalarModeToUseCellFieldData()
self.exodus_result.mapper.SelectColorArray(self.current_variable)
self.exodus_result.clip_mapper.SelectColorArray(self.current_variable)
if data:
self.current_range = data.GetRange(self.component_index)
if self.min_current_radio.isChecked():
self.min_custom_text.setText(str(self.current_range[0]))
self.min_custom_text.setCursorPosition(0)
if self.max_current_radio.isChecked():
self.max_custom_text.setText(str(self.current_range[1]))
self.max_custom_text.setCursorPosition(0)
if self.min_custom_radio.isChecked():
self.min_custom_text.setDisabled(False)
else:
self.min_custom_text.setDisabled(True)
if self.max_custom_radio.isChecked():
self.max_custom_text.setDisabled(False)
else:
self.max_custom_text.setDisabled(True)
min = 0.0
try:
min = float(self.min_custom_text.displayText())
except:
min = 0.0
max = 0.0
try:
max = float(self.max_custom_text.displayText())
except:
max = 0.0
if self.current_variable not in self.contour_choices:
self.contour_choices[self.current_variable] = ContourChoices()
if not self.currently_restoring_contours:
self.contour_choices[self.current_variable].save(self)
the_range = (min, max)
if min <= max:
self.exodus_result.mapper.SetScalarRange(the_range)
self.exodus_result.clip_mapper.SetScalarRange(the_range)
self.exodus_result.scalar_bar.SetTitle(self.current_variable)
self.renderer.AddActor2D(self.exodus_result.scalar_bar)
self.vtkwidget.repaint()
def _colorSchemeSelected(self, value):
self.current_lut = self.luts[self.color_scheme_component.currentIndex()]
self._updateContours()
def _openFile(self, file_name, reset=True):
self._clear()
self.base_stamp = os.path.getmtime(file_name)
self.file_name = str(file_name)
self.new_stuff_to_read = True
self._updateData()
self._updateData() # Call it again to read any adaptive results
self._lastClicked() # Go to the last timestep
if reset:
self._resetView() # Reset the camera
def _clickedOpen(self):
file_name = QtGui.QFileDialog.getOpenFileName(self, "Open Result", "~/", "Input Files (*.e)")
if isinstance(file_name, QtCore.QString):
file_name = str(file_name)
if not isinstance(file_name, basestring): # This happens when using pyside
file_name = file_name[0]
if file_name:
self._openFile(file_name)
def _resetView(self):
self.renderer.ResetCamera()
fp = self.renderer.GetActiveCamera().GetFocalPoint()
p = self.renderer.GetActiveCamera().GetPosition()
dist = math.sqrt( (p[0]-fp[0])**2 + (p[1]-fp[1])**2 + (p[2]-fp[2])**2 )
self.renderer.GetActiveCamera().SetPosition(fp[0], fp[1], fp[2]+dist)
self.renderer.GetActiveCamera().SetViewUp(0.0, 1.0, 0.0)
self.vtkwidget.repaint()
def _saveView(self):
file_name = QtGui.QFileDialog.getSaveFileName(self, "Image File Name", "~/", "Image Files (*.png)")
if isinstance(file_name, QtCore.QString):
file_name = str(file_name)
if not isinstance(file_name, basestring): # This happens when using pyside
file_name = file_name[0]
if file_name != '':
w2i = vtk.vtkWindowToImageFilter()
writer = vtk.vtkPNGWriter()
w2i.SetInput(self.vtkwidget.GetRenderWindow())
w2i.Update()
writer.SetInputConnection(w2i.GetOutputPort())
writer.SetFileName(str(file_name))
self.vtkwidget.GetRenderWindow().Render()
writer.Write()
def _automaticUpdateChanged(self, value):
if value == QtCore.Qt.Checked:
self.automatically_update = True
else:
self.automatically_update = False
def _beginningClicked(self):
self.time_slider.setSliderPosition(0)
self._timeSliderReleased()
def _backClicked(self):
self.time_slider.setSliderPosition(self.time_slider.sliderPosition()-1)
self._timeSliderReleased()
def _playClicked(self):
self.play_button.setDisabled(True)
self.pause_button.setDisabled(False)
self.currently_playing = True
first = True
while((first or self.currently_looping) and self.currently_playing):
first = False
# If the slider is at the end then start over
self.qt_app.processEvents()
time.sleep(0.02)
self.qt_app.processEvents()
if self.time_slider.sliderPosition() == self.time_slider.maximum():
self.time_slider.setSliderPosition(0)
while self.time_slider.sliderPosition() < self.time_slider.maximum():
self.time_slider.setSliderPosition(self.time_slider.sliderPosition()+1)
self.qt_app.processEvents()
self._timeSliderReleased()
time.sleep(0.02)
self.qt_app.processEvents()
if not self.currently_playing:
break
self.play_button.setDisabled(False)
self.pause_button.setDisabled(True)
def _pauseClicked(self):
self.play_button.setDisabled(False)
self.pause_button.setDisabled(True)
self.currently_playing = False
def _forwardClicked(self):
self.time_slider.setSliderPosition(self.time_slider.sliderPosition()+1)
self._timeSliderReleased()
def _lastClicked(self):
self.time_slider.setSliderPosition(self.time_slider.maximum())
self._timeSliderReleased()
def _loopClicked(self, state):
if state:
self.currently_looping = True
else:
self.currently_looping = False
def _timeSliderChanged(self):
self.time_slider_textbox.setText(str(self.time_slider.sliderPosition()))
def _timeSliderReleased(self):
textbox_string = self.time_slider_textbox.text()
if textbox_string == '':
textbox_string = str(self.exodus_result.min_timestep)
if int(textbox_string) in self.timestep_to_exodus_result:
for actor in self.exodus_result.current_actors:
self.renderer.RemoveActor(actor)
self.exodus_result = self.timestep_to_exodus_result[int(textbox_string)]
if self.clip_groupbox.isChecked():
self.renderer.AddActor(self.exodus_result.clip_actor)
if self.draw_edges_checkbox.checkState() == QtCore.Qt.Checked:
self.exodus_result.clip_actor.GetProperty().EdgeVisibilityOn()
else:
self.exodus_result.clip_actor.GetProperty().EdgeVisibilityOff()
else:
self.renderer.AddActor(self.exodus_result.actor)
if self.draw_edges_checkbox.checkState() == QtCore.Qt.Checked:
self.exodus_result.actor.GetProperty().EdgeVisibilityOn()
else:
self.exodus_result.actor.GetProperty().EdgeVisibilityOff()
num_block_view_items = self.block_view_model.rowCount()
for i in xrange(num_block_view_items):
item = self.block_view_model.item(i)
if item.checkState() == QtCore.Qt.Checked:
self.exodus_result.showBlock(item.exodus_block)
else:
self.exodus_result.hideBlock(item.exodus_block)
if self.has_displacements and self.displace_groupbox.isChecked():
self.exodus_result.reader.SetApplyDisplacements(1)
self.exodus_result.reader.SetDisplacementMagnitude(float(self.current_displacement_magnitude))
else:
self.exodus_result.reader.SetApplyDisplacements(0)
if self.scale_groupbox.isChecked():
self.exodus_result.actor.SetScale(self.current_scale_x_magnitude, self.current_scale_y_magnitude, self.current_scale_z_magnitude)
else:
self.exodus_result.actor.SetScale(1.0, 1.0, 1.0)
if self.exodus_results and self.exodus_result.reader:
self.exodus_result.reader.SetTimeStep(self.timestep_to_timestep[int(textbox_string)])
self.exodus_result.reader.Update()
self.exodus_result.geom.Update()
self.current_bounds = self.exodus_result.actor.GetBounds()
self._updateContours()
def _sliderTextboxReturn(self):
self.time_slider.setSliderPosition(int(self.time_slider_textbox.text()))
self._timeSliderReleased()
def _associateResultsWithTimesteps(self):
self.timestep_to_exodus_result = {}
self.timestep_to_timestep = {}
self.current_max_timestep = -1
for result in self.exodus_results:
result.reader.UpdateTimeInformation()
min = result.reader.GetTimeStepRange()[0]
max = result.reader.GetTimeStepRange()[1]
for timestep in xrange(min, max+1):
self.current_max_timestep += 1
self.timestep_to_exodus_result[self.current_max_timestep] = result
self.timestep_to_timestep[self.current_max_timestep] = timestep
def _updateData(self):
# Check to see if there are new exodus files with adapted timesteps in them.
if self.file_name and self.exodus_result:
for file_name in sorted(glob.glob(self.file_name + '-s*')):
file_stamp = os.path.getmtime(file_name)
if int(file_stamp) >= int(self.base_stamp) and int(file_stamp) <= int(time.time() - 1) and file_name not in self.file_names:
self.file_names.append(file_name)
exodus_result = ExodusResult(self, self.plane)
exodus_result.setFileName(file_name, self.current_lut)
self.exodus_results.append(exodus_result)
self.new_stuff_to_read = True
if not self.exodus_result:
# If the file_name is not set in the object, set if from the dropdown selection, otherwise use the stored name
if not self.file_name: # Might have been set by opening a file or from drop-down
idx = self.output_control.currentIndex()
file_name = self.output_control.itemData(idx)
if hasattr(QtCore, 'QVariant') and isinstance(file_name, QtCore.QVariant):
file_name = str(file_name.toString())
else:
file_name = self.file_name
if os.path.exists(file_name):
file_stamp = os.path.getmtime(file_name)
if int(file_stamp) >= int(self.base_stamp) and int(file_stamp) <= int(time.time() - 1) and file_name not in self.file_names:
self.file_name = file_name
self.exodus_result = ExodusResult(self, self.plane)
self.exodus_result.setFileName(file_name, self.current_lut)
self.exodus_results.append(self.exodus_result)
self.current_max_timestep = self.exodus_result.max_timestep
self.renderer.AddActor(self.exodus_result.actor)
self._drawEdgesChanged(self.draw_edges_checkbox.checkState())
if self.first:
self.first = False
self.renderer.ResetCamera()
# Avoid z-buffer fighting
vtk.vtkPolyDataMapper().SetResolveCoincidentTopologyToPolygonOffset()
if self.clip_groupbox.isChecked():
_clippingToggled(True)
self.vtkwidget.repaint()
self._updateControls()
self.time_slider.setSliderPosition(self.current_max_timestep)
if self.new_stuff_to_read and self.exodus_result and self.automatically_update:
self._associateResultsWithTimesteps()
# self.exodus_result.reader.UpdateTimeInformation()
# range = self.exodus_result.reader.GetTimeStepRange()
# self.exodus_result.min_timestep = range[0]
# self.exodus_result.max_timestep = range[1]
self.time_slider.setMinimum(0)
# Only automatically move forward if they're on the current step
if self.time_slider.sliderPosition() == self.time_slider.maximum():
self.time_slider.setMaximum(self.current_max_timestep)
self.time_slider.setSliderPosition(self.current_max_timestep)
self._timeSliderReleased()
if self.clip_groupbox.isChecked():
self._clipSliderReleased()
self.vtkwidget.repaint()
else:
self.time_slider.setMaximum(self.current_max_timestep)
self.new_stuff_to_read = False
def _timestepBegin(self):
self.new_stuff_to_read = True
def _timestepEnd(self):
pass
def _clear(self):
self.application.addExodusResultActors(self.renderer)
self.file_name = None
self.file_names = []
if not self.exodus_result:
return
for actor in self.exodus_result.current_actors:
self.renderer.RemoveActor(actor)
del self.exodus_result.current_actors[:]
self.exodus_result = None
self.exodus_results = []
self.timestep_to_exodus_result = {}
def _runStarted(self):
# Set the base time
self.base_stamp = time.time()
self._clear()
self.timer.start()
def _finalRead(self):
self.new_stuff_to_read = True # Set this to true so we get one more update
# Do it twice in case of adapted results
self._updateData()
self._updateData()
def _runStopped(self):
self.timer.stop()
self.run_stopped_timer = QtCore.QTimer()
self.run_stopped_timer.setInterval(1000) # Wait a second before updating the plots one last time
self.run_stopped_timer.setSingleShot(True)
self.run_stopped_timer.timeout.connect(self._finalRead)
self.run_stopped_timer.start()
def _clippingToggled(self, value):
if value:
self.renderer.RemoveActor(self.exodus_result.current_actor)
self.renderer.AddActor(self.exodus_result.clip_actor)
self.exodus_result.current_actor = self.exodus_result.clip_actor
self.clip_plane_slider.setSliderPosition(50)
self._clipSliderMoved(50)
self._clipSliderReleased()
else:
self.renderer.RemoveActor(self.exodus_result.current_actor)
self.renderer.AddActor(self.exodus_result.actor)
self.exodus_result.current_actor = self.exodus_result.actor
self.vtkwidget.repaint()
def _clipNormalChanged(self, value):
self.plane.SetOrigin(self.current_bounds[0],
self.current_bounds[2],
self.current_bounds[4])
if value == 'x':
self.plane.SetNormal(1, 0, 0)
elif value == 'y':
self.plane.SetNormal(0, 1, 0)
else:
self.plane.SetNormal(0, 0, 1)
self.clip_plane_slider.setSliderPosition(50)
self._clipSliderMoved(50)
self.vtkwidget.repaint()
def _clipSliderReleased(self):
self._updateContours()
self.vtkwidget.repaint()
def _clipSliderMoved(self, value):
direction = str(self.clip_plane_combobox.currentText())
min = 0
max = 0
if direction == 'x':
min = self.current_bounds[0]
max = self.current_bounds[1]
elif direction == 'y':
min = self.current_bounds[2]
max = self.current_bounds[3]
elif direction == 'z':
min = self.current_bounds[4]
max = self.current_bounds[5]
step_size = (max - min)/100.0
steps = value
distance = float(steps)*step_size
position = min + distance
old = self.plane.GetOrigin()
self.plane.SetOrigin(position if direction == 'x' else old[0],
position if direction == 'y' else old[1],
position if direction == 'z' else old[2])
self._updateContours()
self.vtkwidget.repaint()
|
gleicher27/Tardigrade
|
moose/gui/vtk/ExodusResultRenderWidget.py
|
Python
|
lgpl-2.1
| 49,289
|
[
"VTK"
] |
7477eb73b6bc2e60849eff6437b103fa09e1b752a027ed20491bfff456fbcad0
|
import os, logging, threading, time
from Queue import Queue, Empty
from galaxy import model
from paste.deploy.converters import asbool
import pkg_resources
try:
pkg_resources.require( "DRMAA_python" )
DRMAA = __import__( "DRMAA" )
except:
DRMAA = None
log = logging.getLogger( __name__ )
if DRMAA is not None:
DRMAA_state = {
DRMAA.Session.UNDETERMINED: 'process status cannot be determined',
DRMAA.Session.QUEUED_ACTIVE: 'job is queued and waiting to be scheduled',
DRMAA.Session.SYSTEM_ON_HOLD: 'job is queued and in system hold',
DRMAA.Session.USER_ON_HOLD: 'job is queued and in user hold',
DRMAA.Session.USER_SYSTEM_ON_HOLD: 'job is queued and in user and system hold',
DRMAA.Session.RUNNING: 'job is running',
DRMAA.Session.SYSTEM_SUSPENDED: 'job is system suspended',
DRMAA.Session.USER_SUSPENDED: 'job is user suspended',
DRMAA.Session.DONE: 'job finished normally',
DRMAA.Session.FAILED: 'job finished, but failed',
}
sge_template = """#!/bin/sh
#$ -S /bin/sh
GALAXY_LIB="%s"
if [ "$GALAXY_LIB" != "None" ]; then
if [ -n "$PYTHONPATH" ]; then
PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"
else
PYTHONPATH="$GALAXY_LIB"
fi
export PYTHONPATH
fi
cd %s
%s
"""
class SGEJobState( object ):
def __init__( self ):
"""
Encapsulates state related to a job that is being run via SGE and
that we need to monitor.
"""
self.job_wrapper = None
self.job_id = None
self.old_state = None
self.running = False
self.job_file = None
self.ofile = None
self.efile = None
self.runner_url = None
class SGEJobRunner( object ):
"""
Job runner backed by a finite pool of worker threads. FIFO scheduling
"""
STOP_SIGNAL = object()
def __init__( self, app ):
"""Initialize this job runner and start the monitor thread"""
# Check if SGE was importable, fail if not
if DRMAA is None:
raise Exception( "SGEJobRunner requires DRMAA_python which was not found" )
self.app = app
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.queue = Queue()
self.default_cell = self.determine_sge_cell( self.app.config.default_cluster_job_runner )
self.ds = DRMAA.Session()
self.ds.init( self.default_cell )
self.monitor_thread = threading.Thread( target=self.monitor )
self.monitor_thread.start()
log.debug( "ready" )
def determine_sge_cell( self, url ):
"""Determine what SGE cell we are using"""
url_split = url.split("/")
if url_split[0] == 'sge:':
return url_split[2]
# this could happen if sge is started, but is not the default runner
else:
return ''
def determine_sge_queue( self, url ):
"""Determine what SGE queue we are submitting to"""
url_split = url.split("/")
queue = url_split[3]
if queue == "":
# None == server's default queue
queue = None
return queue
def queue_job( self, job_wrapper ):
"""Create SGE script for a job and submit it to the SGE queue"""
try:
job_wrapper.prepare()
command_line = job_wrapper.get_command_line()
except:
job_wrapper.fail( "failure preparing job", exception=True )
log.exception("failure running job %d" % job_wrapper.job_id)
return
runner_url = job_wrapper.tool.job_runner
# This is silly, why would we queue a job with no command line?
if not command_line:
job_wrapper.finish( '', '' )
return
# Check for deletion before we change state
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
job_wrapper.cleanup()
return
# Change to queued state immediately
job_wrapper.change_state( model.Job.states.QUEUED )
if self.determine_sge_cell( runner_url ) != self.default_cell:
# TODO: support multiple cells
log.warning( "(%s) Using multiple SGE cells is not supported. This job will be submitted to the default cell." % job_wrapper.job_id )
sge_queue_name = self.determine_sge_queue( runner_url )
# define job attributes
ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job_wrapper.job_id)
efile = "%s/database/pbs/%s.e" % (os.getcwd(), job_wrapper.job_id)
jt = self.ds.createJobTemplate()
jt.remoteCommand = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job_wrapper.job_id)
jt.outputPath = ":%s" % ofile
jt.errorPath = ":%s" % efile
if sge_queue_name is not None:
jt.setNativeSpecification( "-q %s" % sge_queue_name )
script = sge_template % (job_wrapper.galaxy_lib_dir, os.getcwd(), command_line)
fh = file( jt.remoteCommand, "w" )
fh.write( script )
fh.close()
os.chmod( jt.remoteCommand, 0750 )
# job was deleted while we were preparing it
if job_wrapper.get_state() == model.Job.states.DELETED:
log.debug( "Job %s deleted by user before it entered the SGE queue" % job_wrapper.job_id )
self.cleanup( ( ofile, efile, jt.remoteCommand ) )
job_wrapper.cleanup()
return
galaxy_job_id = job_wrapper.job_id
log.debug("(%s) submitting file %s" % ( galaxy_job_id, jt.remoteCommand ) )
log.debug("(%s) command is: %s" % ( galaxy_job_id, command_line ) )
# runJob will raise if there's a submit problem
job_id = self.ds.runJob(jt)
if sge_queue_name is None:
log.debug("(%s) queued in default queue as %s" % (galaxy_job_id, job_id) )
else:
log.debug("(%s) queued in %s queue as %s" % (galaxy_job_id, sge_queue_name, job_id) )
# store runner information for tracking if Galaxy restarts
job_wrapper.set_runner( runner_url, job_id )
# Store SGE related state information for job
sge_job_state = SGEJobState()
sge_job_state.job_wrapper = job_wrapper
sge_job_state.job_id = job_id
sge_job_state.ofile = ofile
sge_job_state.efile = efile
sge_job_state.job_file = jt.remoteCommand
sge_job_state.old_state = 'new'
sge_job_state.running = False
sge_job_state.runner_url = runner_url
# delete the job template
self.ds.deleteJobTemplate( jt )
# Add to our 'queue' of jobs to monitor
self.queue.put( sge_job_state )
def monitor( self ):
"""
Watches jobs currently in the PBS queue and deals with state changes
(queued to running) and job completion
"""
while 1:
# Take any new watched jobs and put them on the monitor list
try:
while 1:
sge_job_state = self.queue.get_nowait()
if sge_job_state is self.STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.ds.exit()
return
self.watched.append( sge_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
self.check_watched_items()
# Sleep a bit before the next state check
time.sleep( 1 )
def check_watched_items( self ):
"""
Called by the monitor thread to look at each watched job and deal
with state changes.
"""
new_watched = []
for sge_job_state in self.watched:
job_id = sge_job_state.job_id
galaxy_job_id = sge_job_state.job_wrapper.job_id
old_state = sge_job_state.old_state
try:
state = self.ds.getJobProgramStatus( job_id )
except DRMAA.InvalidJobError:
# we should only get here if an orphaned job was put into the queue at app startup
log.debug("(%s/%s) job left SGE queue" % ( galaxy_job_id, job_id ) )
self.finish_job( sge_job_state )
continue
except Exception, e:
# so we don't kill the monitor thread
log.exception("(%s/%s) Unable to check job status" % ( galaxy_job_id, job_id ) )
log.warning("(%s/%s) job will now be errored" % ( galaxy_job_id, job_id ) )
sge_job_state.job_wrapper.fail( "Cluster could not complete job" )
continue
if state != old_state:
log.debug("(%s/%s) state change: %s" % ( galaxy_job_id, job_id, DRMAA_state[state] ) )
if state == DRMAA.Session.RUNNING and not sge_job_state.running:
sge_job_state.running = True
sge_job_state.job_wrapper.change_state( model.Job.states.RUNNING )
if state == DRMAA.Session.DONE:
self.finish_job( sge_job_state )
continue
if state == DRMAA.Session.FAILED:
sge_job_state.job_wrapper.fail( "Cluster could not complete job" )
sge_job_state.job_wrapper.cleanup()
continue
sge_job_state.old_state = state
new_watched.append( sge_job_state )
# Replace the watch list with the updated version
self.watched = new_watched
def finish_job( self, sge_job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the SGE temporary files.
"""
ofile = sge_job_state.ofile
efile = sge_job_state.efile
job_file = sge_job_state.job_file
# collect the output
try:
ofh = file(ofile, "r")
efh = file(efile, "r")
stdout = ofh.read()
stderr = efh.read()
except:
stdout = ''
stderr = 'Job output not returned from cluster'
log.debug(stderr)
try:
sge_job_state.job_wrapper.finish( stdout, stderr )
except:
log.exception("Job wrapper finish method failed")
# clean up the sge files
self.cleanup( ( ofile, efile, job_file ) )
def cleanup( self, files ):
if not asbool( self.app.config.get( 'debug', False ) ):
for file in files:
if os.access( file, os.R_OK ):
os.unlink( file )
def put( self, job_wrapper ):
"""Add a job to the queue (by job identifier)"""
self.queue_job( job_wrapper )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "sending stop signal to worker threads" )
self.queue.put( self.STOP_SIGNAL )
log.info( "sge job runner stopped" )
def stop_job( self, job ):
"""Attempts to delete a job from the SGE queue"""
try:
self.ds.control( job.job_runner_external_id, DRMAA.Session.TERMINATE )
log.debug( "(%s/%s) Removed from SGE queue at user's request" % ( job.id, job.job_runner_external_id ) )
except DRMAA.InvalidJobError:
log.debug( "(%s/%s) User killed running job, but it was already dead" ( job.id, job.job_runner_external_id ) )
def recover( self, job, job_wrapper ):
"""Recovers jobs stuck in the queued/running state when Galaxy started"""
sge_job_state = SGEJobState()
sge_job_state.ofile = "%s/database/pbs/%s.o" % (os.getcwd(), job.id)
sge_job_state.efile = "%s/database/pbs/%s.e" % (os.getcwd(), job.id)
sge_job_state.job_file = "%s/database/pbs/galaxy_%s.sh" % (os.getcwd(), job.id)
sge_job_state.job_id = str( job.job_runner_external_id )
sge_job_state.runner_url = job_wrapper.tool.job_runner
job_wrapper.command_line = job.command_line
sge_job_state.job_wrapper = job_wrapper
if job.state == model.Job.states.RUNNING:
log.debug( "(%s/%s) is still in running state, adding to the SGE queue" % ( job.id, job.job_runner_external_id ) )
sge_job_state.old_state = DRMAA.Session.RUNNING
sge_job_state.running = True
self.queue.put( sge_job_state )
elif job.state == model.Job.states.QUEUED:
log.debug( "(%s/%s) is still in SGE queued state, adding to the SGE queue" % ( job.id, job.job_runner_external_id ) )
sge_job_state.old_state = DRMAA.Session.QUEUED
sge_job_state.running = False
self.queue.put( sge_job_state )
|
dbcls/dbcls-galaxy
|
lib/galaxy/jobs/runners/sge.py
|
Python
|
mit
| 13,180
|
[
"Galaxy"
] |
eeba1243cb9d20f0f6a50255e60259ef5dc40e958a651aa2b3dabb04893f98a9
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This tests the scafacos p2nfft dipolar calculations by matching against
# reference data from direct summation. In 2d, reference data from the mdlc
# test case is used
import numpy as np
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.magnetostatics
import tests_common
@utx.skipIfMissingFeatures(["SCAFACOS_DIPOLES"])
class Scafacos1d2d(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.time_step = 0.01
system.cell_system.skin = 0.5
system.periodicity = [1, 1, 1]
def tearDown(self):
self.system.part.clear()
self.system.actors.clear()
self.system.periodicity = [1, 1, 1]
def vector_error(self, a, b):
return np.sum(np.linalg.norm(a - b, axis=1)) / np.sqrt(a.shape[0])
def test_scafacos(self):
s = self.system
rho = 0.3
# This is only for box size calculation. The actual particle number is
# lower, because particles are removed from the mdlc gap region
n_particle = 100
particle_radius = 0.5
box_l = np.cbrt(4 * n_particle * np.pi / (3 * rho)) * particle_radius
s.box_l = 3 * [box_l]
for dim in (2, 1):
print("Dimension", dim)
# Read reference data
if dim == 2:
file_prefix = "data/mdlc"
s.periodicity = [1, 1, 0]
else:
s.periodicity = [1, 0, 0]
file_prefix = "data/scafacos_dipoles_1d"
ref_E_path = tests_common.abspath(
file_prefix + "_reference_data_energy.dat")
ref_E = float(np.genfromtxt(ref_E_path))
# Particles
data = np.genfromtxt(tests_common.abspath(
file_prefix + "_reference_data_forces_torques.dat"))
s.part.add(pos=data[:, 1:4], dip=data[:, 4:7])
s.part.all().rotation = 3 * [True]
if dim == 2:
scafacos = espressomd.magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 0,
"pnfft_N": "80,80,160",
"pnfft_window_name": "bspline",
"pnfft_m": "4",
"p2nfft_ignore_tolerance": "1",
"pnfft_diff_ik": "0",
"p2nfft_r_cut": "6",
"p2nfft_alpha": "0.8",
"p2nfft_epsB": "0.05"})
s.actors.add(scafacos)
# change box geometry in x,y direction to ensure that
# scafacos survives it
s.box_l = np.array((1, 1, 1.3)) * box_l
else:
# 1d periodic in x
scafacos = espressomd.magnetostatics.Scafacos(
prefactor=1,
method_name="p2nfft",
method_params={
"p2nfft_verbose_tuning": 1,
"pnfft_N": "32,128,128",
"pnfft_direct": 0,
"p2nfft_r_cut": 2.855,
"p2nfft_alpha": "1.5",
"p2nfft_intpol_order": "-1",
"p2nfft_reg_kernel_name": "ewald",
"p2nfft_p": "16",
"p2nfft_ignore_tolerance": "1",
"pnfft_window_name": "bspline",
"pnfft_m": "8",
"pnfft_diff_ik": "1",
"p2nfft_epsB": "0.125"})
s.box_l = np.array((1, 1, 1)) * box_l
s.actors.add(scafacos)
s.integrator.run(0)
# Calculate errors
err_f = self.vector_error(s.part.all().f, data[:, 7:10])
err_t = self.vector_error(s.part.all().torque_lab, data[:, 10:13])
err_e = s.analysis.energy()["dipolar"] - ref_E
tol_f = 2E-3
tol_t = 2E-3
tol_e = 1E-3
self.assertLessEqual(
abs(err_e), tol_e, "Energy difference too large")
self.assertLessEqual(
abs(err_t), tol_t, "Torque difference too large")
self.assertLessEqual(
abs(err_f), tol_f, "Force difference too large")
s.part.clear()
s.actors.clear()
if __name__ == "__main__":
ut.main()
|
espressomd/espresso
|
testsuite/python/scafacos_dipoles_1d_2d.py
|
Python
|
gpl-3.0
| 5,192
|
[
"ESPResSo"
] |
b20fbfca6b245c1e2b818c4a6e133586a99419358fe3695ec8430197a409da6f
|
from ase.atoms import Atoms
from ase.parallel import paropen
def read_xyz(fileobj, index=-1):
if isinstance(fileobj, str):
fileobj = open(fileobj)
lines = fileobj.readlines()
L1 = lines[0].split()
if len(L1) == 1:
del lines[:2]
natoms = int(L1[0])
else:
natoms = len(lines)
images = []
while len(lines) >= natoms:
positions = []
symbols = []
for line in lines[:natoms]:
symbol, x, y, z = line.split()[:4]
symbol = symbol.lower().capitalize()
symbols.append(symbol)
positions.append([float(x), float(y), float(z)])
images.append(Atoms(symbols=symbols, positions=positions))
del lines[:natoms + 2]
return images[index]
def write_xyz(fileobj, images):
if isinstance(fileobj, str):
fileobj = paropen(fileobj, 'w')
if not isinstance(images, (list, tuple)):
images = [images]
symbols = images[0].get_chemical_symbols()
natoms = len(symbols)
for atoms in images:
fileobj.write('%d\n\n' % natoms)
for s, (x, y, z) in zip(symbols, atoms.get_positions()):
fileobj.write('%-2s %22.15f %22.15f %22.15f\n' % (s, x, y, z))
|
JConwayAWT/PGSS14CC
|
lib/python/multimetallics/ase/io/xyz.py
|
Python
|
gpl-2.0
| 1,231
|
[
"ASE"
] |
4297e469f86df67c1195abb0493dc45c113af225d3910971bc4536b1353ae297
|
from ase import Atoms
from gpaw import GPAW
from gpaw.tddft import TDDFT
from gpaw.tddft.ehrenfest import EhrenfestVelocityVerlet
from gpaw.test import equal
d = 4.0
atoms = Atoms('NaCl', [(0,0,0),(0,0,d)])
atoms.center(vacuum=4.5)
gs_calc = GPAW(nbands=4, eigensolver='cg', gpts=(32, 32, 44), xc='LDA',
setups={'Na': '1'})
atoms.set_calculator(gs_calc)
atoms.get_potential_energy()
gs_calc.write('nacl_gs.gpw', 'all')
td_calc = TDDFT('nacl_gs.gpw', propagator='EFSICN')
evv = EhrenfestVelocityVerlet(td_calc, 0.001)
i=0
evv.get_energy()
r = evv.x[1][2] - evv.x[0][2]
# print 'E = ', [i, r, evv.Etot, evv.Ekin, evv.Epot]
for i in range(5):
evv.propagate(1.0)
evv.get_energy()
r = evv.x[1][2] - evv.x[0][2]
print 'E = ', [i+1, r, evv.Etot, evv.Ekin, evv.Epot]
equal(r, 7.558883144, 1e-7)
equal(evv.Etot, -0.1036763317, 1e-7)
|
robwarm/gpaw-symm
|
gpaw/test/ehrenfest_nacl.py
|
Python
|
gpl-3.0
| 866
|
[
"ASE",
"GPAW"
] |
aa81226106bb9949b93d8ee1797b6bcf99a453f8d0ea8c68b57b6d947daba25a
|
#! /usr/bin/env python2.7
"""
This script post-processes amplicon sequenced DBLa-tags basecalled using Multipass.
"""
import os, sys, time, copy
from optparse import OptionParser
import subprocess as Sp
from shlex import split as cmdsplit
# Constants
USEARCH = "/usr/local/bin/usearch_v5.2.32_i86osx32"
HMMSEARCH = "/usr/local/bin/hmmsearch_v3.1"
BLASTN = "/usr/local/bin/blastn_v2.2.25"
RESOURCES = "./resources"
# Script version
VERSION = "1.0.0"
#############################################################################################
#############################################################################################
# Command options
def build_parser():
""" Builds command line parser """
vers = "%%prog %s" %VERSION
use = """usage: %prog [options] <basecalls_file>
<basecalls_file> File with '.basecalls'-extension produced by Multipass."""
parser = OptionParser(usage=use, version=vers)
parser.set_description("This script post-processes amplicon sequenced DBLa-tags basecalled using Multipass.")
parser.add_option("-v", dest="v", action="store_true",
help="print verbose information [False]",
default=False)
parser.add_option("-R", type="string", dest="R", metavar="RESULT_DIR",
help="directory for result files [<basecall_file>.postprocess]",
default="")
parser.add_option("-i", dest="i", action="store_true",
help="remove 3D7 sequences using BLASTN [False]",
default=False)
parser.add_option("-m", type="int", dest="m", metavar="METHOD",
help="basecalling method. 0=Multipass, 1=Multipass_FRF [1]",
default=1)
return parser
#############################################################################################
#############################################################################################
### Main action
def main():
""" Main method """
#########################
global options
### Parse commandline
parser = build_parser()
if len(sys.argv) < 2:
parser.print_help(); sys.exit()
(options, args) = parser.parse_args()
if not os.path.isfile(args[0]):
raise SystemExit("Error, could not find file: %s"% args[0])
if not os.path.isdir(RESOURCES):
raise SystemExit("Error, could not find directory with resources: %s"% RESOURCES)
fn = os.path.split(args[0])[1].strip(".basecalls")
#########################
### Create resultdir
resultdir = options.R if options.R else args[0]+".postprocess"
if not os.path.isdir(resultdir):
printv("Creating directory for results: %s"% resultdir)
os.mkdir(resultdir)
else: printv("Result directory: %s"% resultdir)
resultPathFn = os.path.join(resultdir, fn)
### Create fasta with most likely sequence for each flowgram cluster
bcFastaFile = resultPathFn+'.bc.fas'
with open(bcFastaFile, 'w') as fh:
for (sid, seq) in getMLSeqs(args[0], fn, options.m):
fh.write(">%s\n%s\n"%(sid, seq))
### Identity clustering with seeds as output
clusterSeedsFastaFile = resultPathFn+'.clu.fas'
cmd = "%s --usersort --cluster %s --nofastalign --id 0.96 --seedsout %s --uc %s.clu.uc --sizein --sizeout"%(USEARCH, bcFastaFile, clusterSeedsFastaFile, resultPathFn)
printv("Executing command: %s"%cmd); sys.stdout.flush()
P = Sp.Popen(cmdsplit(cmd), stdout=Sp.PIPE, stdin=Sp.PIPE, stderr=Sp.PIPE)
(sout, serr) = P.communicate()
if P.wait() != 0: raise SystemExit("Problem executing: %s\n%s"%(cmd, serr))
### Sort by size
sortedSeedsFastaFile = resultPathFn+'.clus.fas'
sortSize(clusterSeedsFastaFile, sortedSeedsFastaFile)
### Chimera detection de-novo mode
nonChimerasDenovo = resultPathFn+'.clus.nc.fas'
cmd = "%s --uchime %s --uchimeout %s.clus.uchimeout --uchimealns %s.clus.uchimealns --chimeras %s.clus.ch.fas --nonchimeras %s"% \
(USEARCH, sortedSeedsFastaFile, resultPathFn, resultPathFn, resultPathFn, nonChimerasDenovo)
printv("Executing command: %s"%cmd); sys.stdout.flush()
P = Sp.Popen(cmdsplit(cmd), stdout=Sp.PIPE, stdin=Sp.PIPE, stderr=Sp.PIPE)
(sout, serr) = P.communicate()
if P.wait() != 0: raise SystemExit("Problem executing: %s\n%s"%(cmd, serr))
### Chimera detection database mode
dbFile = resultPathFn+'.clus.nc.db'
# Use clusters of at least size 2 as self (i.e. dont allow single sequence clusters to remove larger clusters)
sortSize(nonChimerasDenovo, dbFile, mins=2)
nonChimerasSelf = resultPathFn+'.clus.ncself.fas'
# Remove chimeras by search against self - increase sensitivity by setting --minh 0.2
cmd = "%s --uchime %s --db %s --minh 0.2 --self --uchimeout %s.clus.uchimeoutself --uchimealns %s.clus.uchimealnsself --chimeras %s.clus.chself.fas --nonchimeras %s"% \
(USEARCH, nonChimerasDenovo, dbFile, resultPathFn, resultPathFn, resultPathFn, nonChimerasSelf)
printv("Executing command: %s"%cmd); sys.stdout.flush()
P = Sp.Popen(cmdsplit(cmd), stdout=Sp.PIPE, stdin=Sp.PIPE, stderr=Sp.PIPE)
(sout, serr) = P.communicate()
if P.wait() != 0: raise SystemExit("Problem executing: %s\n%s"%(cmd, serr))
### Remove least supported sequences
trimmedFastaFile = resultPathFn+'.clust.fas'
sortSize(nonChimerasSelf, trimmedFastaFile, minc=3)
### Identify non-DBLa
(sdic, slis) = fastaRead(trimmedFastaFile)
nondbla = copy.deepcopy(slis)
nonatagFastaFile = resultPathFn+'.nonatag.fas'
cmd = "%s --cut_ga --domtblout %s.atag.hmmsearch -o /dev/null %s/atag.hmm %s"%(HMMSEARCH, resultPathFn, RESOURCES, trimmedFastaFile)
printv("Executing command: %s"%cmd); sys.stdout.flush()
P = Sp.Popen(cmdsplit(cmd), stdout=Sp.PIPE, stdin=Sp.PIPE, stderr=Sp.PIPE)
(sout, serr) = P.communicate()
if P.wait() != 0: raise SystemExit("Problem executing: %s\n%s"%(cmd, serr))
with open(resultPathFn+".atag.hmmsearch") as fh:
found = set([])
for line in fh:
if line.startswith("#"): continue
ssl = line.strip().split()
if ssl and (ssl[0] not in found):
nondbla.remove(ssl[0])
found.add(ssl[0])
with open(nonatagFastaFile, 'w') as fh:
for sid in nondbla:
fh.write(">%s\n%s\n"%(sid, sdic[sid]))
### Identify DBLb
dblb = []
btagFastaFile = resultPathFn+'.btag.fas'
cmd = "%s --cut_ga --domtblout %s.btag.hmmsearch -o /dev/null %s/btag.hmm %s"%(HMMSEARCH, resultPathFn, RESOURCES, trimmedFastaFile)
printv("Executing command: %s"%cmd); sys.stdout.flush()
P = Sp.Popen(cmdsplit(cmd), stdout=Sp.PIPE, stdin=Sp.PIPE, stderr=Sp.PIPE)
(sout, serr) = P.communicate()
if P.wait() != 0: raise SystemExit("Problem executing: %s\n%s"%(cmd, serr))
with open(resultPathFn+".btag.hmmsearch") as fh:
found = set([])
for line in fh:
if line.startswith("#"): continue
ssl = line.strip().split()
if ssl and (ssl[0] not in found):
dblb.append(ssl[0])
found.add(ssl[0])
with open(btagFastaFile, 'w') as fh:
for sid in dblb:
fh.write(">%s\n%s\n"%(sid, sdic[sid]))
### Blast 3D7
b3d7 = []
if options.i:
### Blast sequences against 3d7 DBLa-tags
cmd = "%s -query %s -out /dev/stdout -db %s/3D7_dblatag/3D7dblatags.fas -evalue 1e-150 -perc_identity 96 -num_threads 2 -max_target_seqs 1 -outfmt '6 qseqid' "% \
(BLASTN, trimmedFastaFile, RESOURCES)
printv("Executing command: %s"%cmd); sys.stdout.flush()
P = Sp.Popen(cmdsplit(cmd), stdout=Sp.PIPE, stdin=Sp.PIPE, stderr=Sp.PIPE)
(sout,serr) = P.communicate()
if P.wait() != 0: raise SystemExit("Problem executing: %s\n%s"%(cmd, serr))
with open(resultPathFn+'.3d7atags.fas', 'w') as bfh:
for line in sout.splitlines():
sid = line.strip()
if not sid: continue
b3d7.append(sid)
bfh.write(">%s\n%s\n"%(sid, sdic[sid]))
### Blast sequences against 3d7 remaining genome
cmd = "%s -strand 'plus' -query %s -out /dev/stdout -db %s/3D7_nondblatag/PlasmoDB-9.3_Pfalciparum3D7_Genome_mar2013.allfwd.fas -evalue 1e-50 -perc_identity 96 -num_threads 2 -max_target_seqs 1 -outfmt '6 qseqid' "% \
(BLASTN, trimmedFastaFile, RESOURCES)
printv("Executing command: %s"%cmd); sys.stdout.flush()
P = Sp.Popen(cmdsplit(cmd), stdout=Sp.PIPE, stdin=Sp.PIPE, stderr=Sp.PIPE)
(sout,serr) = P.communicate()
if P.wait() != 0: raise SystemExit("Problem executing: %s\n%s"%(cmd, serr))
with open(resultPathFn+'.3d7nonatags.fas', 'w') as bfh:
for line in sout.splitlines():
sid = line.strip()
if not sid: continue
b3d7.append(sid)
bfh.write(">%s\n%s\n"%(sid, sdic[sid]))
### Write clean DBLa-tags
atagFastaFile = resultPathFn+'.clean.fas'
with open(atagFastaFile, 'w') as fh:
for sid in slis:
if sid not in (dblb+nondbla+b3d7):
fh.write(">%s\n%s\n"%(sid, sdic[sid]))
#############################################################################################
#############################################################################################
### Sort fasta file by cluster size
def sortSize(inFile, outFile, minc=1, mins=1):
""" Sort fasta file by cluster size """
(sdic, slis) = fastaRead(inFile)
lol = []
for sid in slis:
remain = sid
(v, remain) = remain.rsplit("_c",1)
(c, s) = remain.rsplit(";size=")
if (int(c) >= minc) and (int(s) >= mins):
lol.append((int(s), int(c), sid))
lol.sort(reverse = True)
with open(outFile, 'w') as outFileHand:
for (s, c, sid) in lol:
outFileHand.write(">%s\n%s\n"%(sid, sdic[sid]))
#############################################################################################
#############################################################################################
### Read fasta file
def fastaRead(fasFile):
""" Read fasta file """
seqDict = {}
seqList = []
with open(fasFile) as fasFileHand:
for line in fasFileHand:
sline=line.strip()
if sline=='' or sline[0]=='#': continue
elif sline[0]=='>':
sid=sline[1:]
seqDict[sid]=""
seqList.append(sid)
else:
seqDict[sid] += sline.upper()
return(seqDict, seqList)
#############################################################################################
#############################################################################################
### Get most likely sequence from each flowgram cluster
def getMLSeqs(mpFileName, fn, meth):
""" Get most likely sequence from each flowgram cluster """
mLSeqs = []
for line in open(mpFileName):
if line.startswith(">Cluster"):
if mLSeqs:
if len(mLSeqs) < 10: raise SystemExit("Sorry, this method requires at least 10 most likely sequences per cluster.")
mLSeq = max(mLSeqs[:10])
sid = "%s_%s_c%s;size=%s"%(fn, clusterInfo[2].replace("_","."), cSize, cSize)
yield(sid, mLSeq[1])
mLSeqs = []
clusterInfo = line.strip().split()
cSize = int(clusterInfo[3])
readML = False
elif line.startswith(">>>"):
readML = True
elif readML:
ssline = line.strip().split()
if len(ssline) == 3:
if (meth == 0) or (cSize == 1): mLSeqs.append([float(ssline[1]), ssline[2]])
elif (meth == 1): mLSeqs.append([float(ssline[1]) * Pcbs_frf(ssline[2]), ssline[2]])
else: raise SystemExit("Illegal method value: %s"%(meth))
if mLSeqs:
mLSeq = max(mLSeqs)
sid = "%s_%s_c%s;size=%s"%(fn, clusterInfo[2].replace("_","."), cSize, cSize)
yield(sid, mLSeq[1])
#############################################################################################
#############################################################################################
### Probability that the sequence is correctly basecalled given the presence/absence
### of a full length reading frame
def Pcbs_frf(seq):
if findORF(seq): return(0.489)
else: return(2.58e-4)
#############################################################################################
#############################################################################################
### Find ORF in forward reading frame 1, 2, and 3
stdDnaCode = {'---':'-','TTT':'F','TTC':'F','TTA':'L','TTG':'L','TCT':'S','TCC':'S','TCA':'S','TCG':'S','TAT':'Y','TAC':'Y','TAA':'*','TAG':'*','TGT':'C','TGC':'C','TGA':'*','TGG':'W','CTT':'L','CTC':'L','CTA':'L','CTG':'L','CCT':'P','CCC':'P','CCA':'P','CCG':'P','CAT':'H','CAC':'H','CAA':'Q','CAG':'Q','CGT':'R','CGC':'R','CGA':'R','CGG':'R','ATT':'I','ATC':'I','ATA':'I','ATG':'M','ACT':'T','ACC':'T','ACA':'T','ACG':'T','AAT':'N','AAC':'N','AAA':'K','AAG':'K','AGT':'S','AGC':'S','AGA':'R','AGG':'R','GTT':'V','GTC':'V','GTA':'V','GTG':'V','GCT':'A','GCC':'A','GCA':'A','GCG':'A','GAT':'D','GAC':'D','GAA':'E','GAG':'E','GGT':'G','GGC':'G','GGA':'G','GGG':'G'}
stopCodon = ("TAG", "TGA", "TAA")
def findORF(s):
for k in range(3):
orf = []
for i in range(k, len(s), 3):
cod = s[i:i+3].upper()
if (len(cod) != 3): continue
if cod in stopCodon: break
else: orf.append(stdDnaCode.get(cod,'X'))
else:
return("".join(orf))
#############################################################################################
#############################################################################################
### Print verbose
def printv(txt):
if options.v: print(txt)
#############################################################################################
#############################################################################################
### Main
if __name__ == "__main__":
main()
|
454data/postprocess
|
postprocess_basecalls.py
|
Python
|
mit
| 13,075
|
[
"BLAST"
] |
47d978e7a341eca96867304d699abac405c921e3f7c179a0e35cc265337810d2
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Python modules repository
:author: Thomas Calmant
:license: Apache Software License 2.0
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Standard library
import ast
import imp
import logging
import os
# ######### added by: Bassem D.
import json
# #########
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Provides, Property, \
Invalidate, Validate
from pelix.utilities import is_string
# Repository beans
import cohorte
import cohorte.repositories
from cohorte.repositories.beans import Artifact, Version
# ------------------------------------------------------------------------------
# Bundle version
import cohorte.version
__version__=cohorte.version.__version__
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
class Module(Artifact):
"""
Represents a bundle
"""
def __init__(self, name, version, imports, filename):
"""
Sets up the bundle details
:param name: Name of the module
:param version: Version of the module (as a string)
:param imports: List of names of imported modules
:param filename: Path to the .py file
:raise ValueError: Invalid argument
"""
Artifact.__init__(self, "python", name, version, filename)
# Store information
self.all_imports = imports
def imports(self, artifact):
"""
Tests if this module might import the given artifact
:param artifact: Another artifact
:return: True if this module imports the given one
"""
if artifact.language != self.language:
# No inter-language imports
return False
return artifact.name in self.all_imports
# ------------------------------------------------------------------------------
class AstVisitor(ast.NodeVisitor):
"""
AST visitor to extract imports and version
"""
# pylint: disable=invalid-name
def __init__(self, module_name, is_package):
"""
Sets up the visitor
:param module_name: The module name
:param is_package: Whether the name is a package name
"""
ast.NodeVisitor.__init__(self)
self.imports = set()
self.version = None
self.module_parts = module_name.split(".")
# Drop module name, keeping only packages' names
if not is_package:
self.module_parts = self.module_parts[:-1]
self.module_name = module_name
def generic_visit(self, node):
"""
Custom default visit method that avoids to visit further that the
module level.
"""
if type(node) is ast.Module:
ast.NodeVisitor.generic_visit(self, node)
def resolve_relative_import_from(self, node):
"""
Converts a relative import (import .module) into an absolute one
:param node: An ImportFrom AST node
:return: The absolute module name
"""
if node.level > 0:
# Relative import
if node.level == 1:
parent = '.'.join(self.module_parts)
else:
parent = '.'.join(self.module_parts[:-node.level + 1])
if node.module:
# from .module import ...
return '.'.join((parent, node.module))
else:
# from . import ...
return parent
else:
# Absolute import
return node.module
def visit_Import(self, node):
"""
Found an "import"
"""
for alias in node.names:
self.imports.add(alias.name)
def visit_ImportFrom(self, node):
"""
Found a "from ... import ..."
"""
imported = self.resolve_relative_import_from(node)
self.imports.add(imported)
def visit_Assign(self, node):
"""
Found an assignment
"""
field = getattr(node.targets[0], 'id', None)
if not self.version \
and field in ('__version__', '__version_info__'):
try:
version_parsed = ast.literal_eval(node.value)
if isinstance(version_parsed, (tuple, list)):
self.version = ".".join(str(version_parsed))
else:
self.version = str(version_parsed)
except ValueError:
# Ignore errors
pass
def _extract_module_info(filename, module_name, is_package):
"""
Extract the version and the imports from the given Python file
:param filename: Path to the file to parse
:param module_name: The fully-qualified module name
:param is_package: Whether the name is a package name
:return: A (version, [imports]) tuple
:raise ValueError: Unreadable file
"""
try:
with open(filename) as filep:
source = filep.read()
except (OSError, IOError) as ex:
raise ValueError("Error reading {0}: {1}".format(filename, ex))
visitor = AstVisitor(module_name, is_package)
try:
module = ast.parse(source, filename, 'exec')
except (ValueError, SyntaxError, TypeError) as ex:
raise ValueError("Error parsing {0}: {1}".format(filename, ex))
visitor.visit(module)
return visitor.version, visitor.imports
# ------------------------------------------------------------------------------
@ComponentFactory("cohorte-repository-artifacts-python-factory")
@Provides(cohorte.repositories.SERVICE_REPOSITORY_ARTIFACTS)
@Property('_language', cohorte.repositories.PROP_REPOSITORY_LANGUAGE, "python")
class PythonModuleRepository(object):
"""
Represents a repository
"""
def __init__(self):
"""
Sets up the repository
"""
self._language = "python"
# Name -> [Modules]
self._modules = {}
# Directory name -> Package name
self._directory_package = {}
# File -> Module
self._files = {}
def __contains__(self, item):
"""
Tests if the given item is in the repository
:param item: Item to be tested
:return: True if the item is in the repository
"""
if isinstance(item, Artifact):
# Test artifact language
if item.language != "python":
return False
# Test if the name is in the modules
return item.name in self._modules
elif item in self._modules:
# Item matches a module name
return True
else:
# Test the file name
for name in (item, os.path.realpath(item)):
if name in self._files:
return True
# No match
return False
def __len__(self):
"""
Length of a repository <=> number of individual artifacts
"""
return sum((len(modules) for modules in self._modules.values()))
def __add_module(self, module, registry=None):
"""
Adds a module to the registry
:param module: A Module object
:param registry: Registry where to store the module
"""
if registry is None:
registry = self._modules
# Add the module to the registry
modules_list = registry.setdefault(module.name, [])
if module not in modules_list:
modules_list.append(module)
modules_list.sort(reverse=True)
# Associate the file name with the module
self._files[module.file] = module
@staticmethod
def __compute_name(root, filename):
"""
Computes the module name of the given file by looking for '__init__.py'
files in its parent directories
:param filename: Path of the module file
:return: The Python name of the module, and a boolean indicating
whether the name is a package name
:raise ValueError: Invalid directory name
"""
# Subtract the root part
filename = os.path.relpath(filename, root)
# Drop extension
filename = os.path.splitext(filename)[0]
name_parts = filename.split(os.path.sep)
is_package = name_parts[len(name_parts)-1] == "__init__"
if is_package:
name_parts = name_parts[:-1]
return ".".join(name_parts), is_package
@staticmethod
def __test_import(name):
"""
Tries to import the given module, using imp.find_module().
:param name: A module name
:return: True if the module can be imported
"""
try:
# find_module() uses a path-like name, not a dotted one
path_name = name.replace('.', os.sep)
result = imp.find_module(path_name)
except ImportError:
# Module not found
return False
else:
# Module found: close the file opened by find_module(), if any
if result[0] is not None:
result[0].close()
return True
def add_file(self, root, filename):
"""
Adds a Python file to the repository
:param root: Path to the python package base of the added file
:param filename: A Python full-path file name
:raise ValueError: Unreadable file
"""
# Compute the real name of the Python file
realfile = os.path.realpath(filename)
if realfile in self._files:
# Already read it: ignore
return
if os.path.basename(filename).startswith('.'):
# Hidden file: ignore
return
# Compute the complete module name
name, is_package = self.__compute_name(root, filename)
# Parse the file
version, imports = _extract_module_info(realfile, name, is_package)
# Store the module
self.__add_module(Module(name, version, imports, realfile))
@staticmethod
def __is_module(dirname):
"""
Class method testing whether a directory, given its name, contains a
valid python package.
:param dirname: The directory' name
:return: True if the directory contains a valid python package.
False otherwise.
"""
init_file = os.path.join(dirname, "__init__.py")
return os.path.exists(init_file)
def add_directory(self, dirname):
"""
Recursively adds all .py modules found in the given directory into the
repository
:param dirname: A path to a directory
"""
for root, dirnames, filenames in os.walk(dirname, followlinks=True):
# Check if the current directory, ie. root, is either the base
# directory or a valid python package.
# Otherwise, do not walk through sub-directories.
if not os.path.samefile(dirname, root) \
and not self.__is_module(root):
continue
for filename in filenames:
if os.path.splitext(filename)[1] == '.py':
fullname = os.path.join(root, filename)
try:
self.add_file(dirname, fullname)
except ValueError as ex:
_logger.warning("Error analyzing %s: %s", fullname, ex)
def clear(self):
"""
Clears the repository content
"""
self._modules.clear()
self._files.clear()
self._directory_package.clear()
def get_artifact(self, name=None, version=None, filename=None,
registry=None):
"""
Retrieves a module from the repository
:param name: The module name (mutually exclusive with filename)
:param version: The module version (None or '0.0.0' for any), ignored
if filename is used
:param filename: The module file name (mutually exclusive with name)
:param registry: Registry where to look for the module
:return: The first matching module
:raise ValueError: If the module can't be found
"""
if registry is None:
registry = self._modules
if filename:
# Use the file name (direct search)
module = self._files.get(filename)
if module:
# Found it
return module
for bundle_file in self._files:
# Search by file base name
if os.path.basename(bundle_file) == filename:
return self._files[bundle_file]
if not name:
# Not found by file name, and no name to look for
raise ValueError("Module file not found: {0}".format(filename))
if isinstance(name, Module):
# Got a module
module = name
if module in registry:
return module
else:
# Use the module name and version
name = module.name
version = module.version
matching = registry.get(name, None)
if not matching:
raise ValueError('Module {0} not found.'.format(name))
for module in matching:
if module.version.matches(version):
return module
raise ValueError('Module {0} not found for version {1}'
.format(name, version))
def get_language(self):
"""
Retrieves the language of the artifacts stored in this repository
"""
return self._language
def resolve_installation(self, artifacts, system_artifacts=None):
"""
Returns all the artifacts that must be installed in order to have the
given modules resolved.
:param artifacts: A list of bundles to be modules
:param system_artifacts: Modules considered as available
:return: A tuple: (modules, dependencies, missing artifacts, [])
"""
# Name -> Module for this resolution
local_modules = {}
# Module -> [Modules]
dependencies = {}
# Missing elements
missing_modules = set()
# Consider system modules already installed
if system_artifacts:
for module in system_artifacts:
if is_string(module):
if module in self._modules:
module = self._modules[module]
else:
module = Module(str(module), None, None, None)
if isinstance(module, Module):
# Only accept modules
self.__add_module(module, local_modules)
# Resolution loop
to_install = [self.get_artifact(name) for name in artifacts]
i = 0
while i < len(to_install):
# Loop control
module = to_install[i]
i += 1
# Add the current module
self.__add_module(module, local_modules)
dependencies[module] = []
# Resolve import ...
for imported in module.all_imports:
# Find the module
registry = None
provider = None
for registry in (local_modules, self._modules):
try:
provider = self.get_artifact(imported, None, None,
registry)
# Found one
break
except ValueError:
# Try next
pass
else:
# No provider found, try to import the file
if not self.__test_import(imported):
# Totally unknown module
missing_modules.add(imported)
# Resolve next import
continue
# Store the module we found
dependencies[module].append(provider)
if registry is self._modules:
# The provider was found in the global registry, store it
self.__add_module(provider, local_modules)
# Store the dependency
dependencies[module].append(provider)
# The new module will be resolved later
if provider not in to_install:
# We'll have to resolve it
to_install.append(provider)
return to_install, dependencies, missing_modules, []
def walk(self):
"""
# Walk through the known artifacts
"""
for modules in self._modules.values():
for module in modules:
yield module
# ######### added by: Bassem D.
def load_cache(self):
"""
Loads the cache from system file to memory
"""
use_cache = os.environ.get('COHORTE_USE_CACHE')
if use_cache and use_cache.lower() == "true":
try:
with open('cache.js') as input_file:
cache = json.load(input_file)
if cache:
_logger.info("loading repository from cache...")
# load modules
for module in cache["modules"]:
name = module["name"]
version = Version(module["version"])
filename = module["filename"]
module_bean = Module(name, version, [], filename)
self.__add_module(module_bean, self._modules)
for directory in cache["directories"]:
self._directory_package[directory["dir_name"]] \
= directory["pkg_name"]
return True
except (IOError, ValueError):
# Error reading/parsing cache file
return False
# No cache
return False
def save_cache(self):
"""
Saves the cache from memory to system file
"""
use_cache = os.environ.get('COHORTE_USE_CACHE')
if use_cache and use_cache.lower() == "true":
# dump modules
_logger.info("Dumping cache info...")
# Name -> [Modules]
cache_modules = [
{"name": module.name, "version": str(module.version),
"language": module.language, "filename": module.file}
for name, modules in self._modules.items()
for module in modules]
# Directory name -> Package name
cache_directories = [
{"dir_name": dir_name,
"pkg_name": self._directory_package[dir_name]}
for dir_name in self._directory_package]
# Write cache
cache = {"modules": cache_modules,
"directories": cache_directories}
with open('cache.js', 'w') as outfile:
json.dump(cache, outfile, indent=4)
# #########
@Validate
def validate(self, context):
"""
Component validated
"""
# ######### added by: Bassem D.
# check if there is a cache file, load it if so
# visit repo files and check if the modification date is changed
# if so, load the file and update the cached entry
# if there were no cache file, we create it at the end of the parsing
status = self.load_cache()
if not status:
_logger.info("Loading repository from file system...")
# #########
# Load repositories in another thread
# Home/Base repository
for key in (cohorte.PROP_BASE, cohorte.PROP_HOME):
repository = os.path.join(context.get_property(key), "repo")
self.add_directory(repository)
# Python path directories
python_path = os.getenv("PYTHONPATH", None)
if python_path:
for path in python_path.split(os.pathsep):
self.add_directory(path)
# ######### added by: Bassem D.
self.save_cache()
# #########
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
self.clear()
|
isandlaTech/cohorte-devtools
|
qualifier/deploy/cohorte-home/repo/cohorte/repositories/python/modules.py
|
Python
|
apache-2.0
| 21,139
|
[
"VisIt"
] |
03527569ccaa82d2cc400de22398fd9e5707fff1d6cf7f93fa501c710c6b681a
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from ._base import _validate
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def gini_index(data, method='rectangles'):
"""Calculate the Gini index.
The Gini index is defined as
.. math::
G=\\frac{A}{A+B}
where :math:`A` is the area between :math:`y=x` and the Lorenz curve and
:math:`B` is the area under the Lorenz curve. Simplifies to :math:`1-2B`
since :math:`A+B=0.5`.
Parameters
----------
data : 1-D array_like
Vector of counts, abundances, proportions, etc. All entries must be
non-negative.
method : {'rectangles', 'trapezoids'}
Method for calculating the area under the Lorenz curve. If
``'rectangles'``, connects the Lorenz curve points by lines parallel to
the x axis. This is the correct method (in our opinion) though
``'trapezoids'`` might be desirable in some circumstances. If
``'trapezoids'``, connects the Lorenz curve points by linear segments
between them. Basically assumes that the given sampling is accurate and
that more features of given data would fall on linear gradients between
the values of this data.
Returns
-------
double
Gini index.
Raises
------
ValueError
If `method` isn't one of the supported methods for calculating the area
under the curve.
Notes
-----
The Gini index was introduced in [1]_. The formula for
``method='rectangles'`` is
.. math::
dx\\sum_{i=1}^n h_i
The formula for ``method='trapezoids'`` is
.. math::
dx(\\frac{h_0+h_n}{2}+\sum_{i=1}^{n-1} h_i)
References
----------
.. [1] Gini, C. (1912). "Variability and Mutability", C. Cuppini, Bologna,
156 pages. Reprinted in Memorie di metodologica statistica (Ed. Pizetti
E, Salvemini, T). Rome: Libreria Eredi Virgilio Veschi (1955).
"""
# Suppress cast to int because this method supports ints and floats.
data = _validate(data, suppress_cast=True)
lorenz_points = _lorenz_curve(data)
B = _lorenz_curve_integrator(lorenz_points, method)
return 1 - 2 * B
def _lorenz_curve(data):
"""Calculate the Lorenz curve for input data.
Notes
-----
Formula available on wikipedia.
"""
sorted_data = np.sort(data)
Sn = sorted_data.sum()
n = sorted_data.shape[0]
return np.arange(1, n + 1) / n, sorted_data.cumsum() / Sn
def _lorenz_curve_integrator(lc_pts, method):
"""Calculates the area under a Lorenz curve.
Notes
-----
Could be utilized for integrating other simple, non-pathological
"functions" where width of the trapezoids is constant.
"""
x, y = lc_pts
# each point differs by 1/n
dx = 1 / x.shape[0]
if method == 'trapezoids':
# 0 percent of the population has zero percent of the goods
h_0 = 0.0
h_n = y[-1]
# the 0th entry is at x=1/n
sum_hs = y[:-1].sum()
return dx * ((h_0 + h_n) / 2 + sum_hs)
elif method == 'rectangles':
return dx * y.sum()
else:
raise ValueError("Method '%s' not implemented. Available methods: "
"'rectangles', 'trapezoids'." % method)
|
demis001/scikit-bio
|
skbio/diversity/alpha/_gini.py
|
Python
|
bsd-3-clause
| 3,694
|
[
"scikit-bio"
] |
97a4cdc8fc481e51d86f0285e90e866c3df03410dd6a7e9813d4859325037657
|
# Orca
#
# Copyright 2006-2008 Sun Microsystems Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
"""Custom script for planner."""
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2006-2008 Sun Microsystems Inc."
__license__ = "LGPL"
from script import Script
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/share/pyshared/orca/scripts/apps/planner/__init__.py
|
Python
|
gpl-3.0
| 1,007
|
[
"ORCA"
] |
52431212b0f89f9c077b455327993b344a6d5ca0c56998cf4b7719ee3a104dc6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Module for Latent Semantic Indexing.
This module actually contains several algorithms for decomposition of large corpora, a
combination of which effectively and transparently allows building LSI models for:
* corpora much larger than RAM: only constant memory is needed, independent of
the corpus size (though still dependent on the feature set size)
* corpora that are streamed: documents are only accessed sequentially, no
random-access
* corpora that cannot be even temporarily stored: each document can only be
seen once and must be processed immediately (one-pass algorithm)
* distributed computing for very large corpora, making use of a cluster of
machines
Wall-clock performance on the English Wikipedia (2G corpus positions, 3.2M
documents, 100K features, 0.5G non-zero entries in the final TF-IDF matrix),
requesting the top 400 LSI factors:
====================================================== ============ ==================
algorithm serial distributed
====================================================== ============ ==================
one-pass merge algorithm 5h14m 1h41m
multi-pass stochastic algo (with 2 power iterations) 5h39m N/A [1]_
====================================================== ============ ==================
*serial* = Core 2 Duo MacBook Pro 2.53Ghz, 4GB RAM, libVec
*distributed* = cluster of four logical nodes on three physical machines, each
with dual core Xeon 2.0GHz, 4GB RAM, ATLAS
.. [1] The stochastic algo could be distributed too, but most time is already spent
reading/decompressing the input from disk in its 4 passes. The extra network
traffic due to data distribution across cluster nodes would likely make it
*slower*.
"""
import logging
import itertools
import sys
import numpy
import scipy.sparse
from scipy.sparse import sparsetools
from gensim import interfaces, matutils, utils
logger = logging.getLogger('gensim.models.lsimodel')
# accuracy defaults for the multi-pass stochastic algo
P2_EXTRA_DIMS = 100 # set to `None` for dynamic P2_EXTRA_DIMS=k
P2_EXTRA_ITERS = 2
def clip_spectrum(s, k, discard=0.001):
"""
Given eigenvalues `s`, return how many factors should be kept to avoid
storing spurious (tiny, numerically instable) values.
This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).
The returned value is clipped against `k` (= never return more than `k`).
"""
# compute relative contribution of eigenvalues towards the energy spectrum
rel_spectrum = numpy.abs(1.0 - numpy.cumsum(s / numpy.sum(s)))
# ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
small = 1 + len(numpy.where(rel_spectrum > min(discard, 1.0 / k))[0])
k = min(k, small) # clip against k
logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)" %
(k, 100 * rel_spectrum[k - 1]))
return k
def asfarray(a, name=''):
if not a.flags.f_contiguous:
logger.debug("converting %s array %s to FORTRAN order" % (a.shape, name))
a = numpy.asfortranarray(a)
return a
def ascarray(a, name=''):
if not a.flags.contiguous:
logger.debug("converting %s array %s to C order" % (a.shape, name))
a = numpy.ascontiguousarray(a)
return a
class Projection(utils.SaveLoad):
def __init__(self, m, k, docs=None, use_svdlibc=False, power_iters=P2_EXTRA_ITERS, extra_dims=P2_EXTRA_DIMS):
"""
Construct the (U, S) projection from a corpus `docs`. The projection can
be later updated by merging it with another Projection via `self.merge()`.
This is the class taking care of the 'core math'; interfacing with corpora,
splitting large corpora into chunks and merging them etc. is done through
the higher-level `LsiModel` class.
"""
self.m, self.k = m, k
self.power_iters = power_iters
self.extra_dims = extra_dims
if docs is not None:
# base case decomposition: given a job `docs`, compute its decomposition,
# *in-core*.
if not use_svdlibc:
u, s = stochastic_svd(docs, k, chunksize=sys.maxsize, num_terms=m,
power_iters=self.power_iters, extra_dims=self.extra_dims)
else:
try:
import sparsesvd
except ImportError:
raise ImportError("`sparsesvd` module requested but not found; run `easy_install sparsesvd`")
logger.info("computing sparse SVD of %s matrix" % str(docs.shape))
if not scipy.sparse.issparse(docs):
docs = matutils.corpus2csc(docs)
ut, s, vt = sparsesvd.sparsesvd(docs, k + 30) # ask for extra factors, because for some reason SVDLIBC sometimes returns fewer factors than requested
u = ut.T
del ut, vt
k = clip_spectrum(s**2, self.k)
self.u = u[:, :k].copy()
self.s = s[:k].copy()
else:
self.u, self.s = None, None
def empty_like(self):
return Projection(self.m, self.k, power_iters=self.power_iters, extra_dims=self.extra_dims)
def merge(self, other, decay=1.0):
"""
Merge this Projection with another.
The content of `other` is destroyed in the process, so pass this function a
copy of `other` if you need it further.
"""
if other.u is None:
# the other projection is empty => do nothing
return
if self.u is None:
# we are empty => result of merge is the other projection, whatever it is
self.u = other.u.copy()
self.s = other.s.copy()
return
if self.m != other.m:
raise ValueError("vector space mismatch: update is using %s features, expected %s" %
(other.m, self.m))
logger.info("merging projections: %s + %s" % (str(self.u.shape), str(other.u.shape)))
m, n1, n2 = self.u.shape[0], self.u.shape[1], other.u.shape[1]
# TODO Maybe keep the bases as elementary reflectors, without
# forming explicit matrices with ORGQR.
# The only operation we ever need is basis^T*basis ond basis*component.
# But how to do that in scipy? And is it fast(er)?
# find component of u2 orthogonal to u1
logger.debug("constructing orthogonal component")
self.u = asfarray(self.u, 'self.u')
c = numpy.dot(self.u.T, other.u)
self.u = ascarray(self.u, 'self.u')
other.u -= numpy.dot(self.u, c)
other.u = [other.u] # do some reference magic and call qr_destroy, to save RAM
q, r = matutils.qr_destroy(other.u) # q, r = QR(component)
assert not other.u
# find the rotation that diagonalizes r
k = numpy.bmat([[numpy.diag(decay * self.s), numpy.multiply(c, other.s)],
[matutils.pad(numpy.array([]).reshape(0, 0), min(m, n2), n1), numpy.multiply(r, other.s)]])
logger.debug("computing SVD of %s dense matrix" % str(k.shape))
try:
# in numpy < 1.1.0, running SVD sometimes results in "LinAlgError: SVD did not converge'.
# for these early versions of numpy, catch the error and try to compute
# SVD again, but over k*k^T.
# see http://www.mail-archive.com/numpy-discussion@scipy.org/msg07224.html and
# bug ticket http://projects.scipy.org/numpy/ticket/706
u_k, s_k, _ = numpy.linalg.svd(k, full_matrices=False) # TODO *ugly overkill*!! only need first self.k SVD factors... but there is no LAPACK wrapper for partial svd/eigendecomp in numpy :(
except numpy.linalg.LinAlgError:
logger.error("SVD(A) failed; trying SVD(A * A^T)")
u_k, s_k, _ = numpy.linalg.svd(numpy.dot(k, k.T), full_matrices=False) # if this fails too, give up with an exception
s_k = numpy.sqrt(s_k) # go back from eigen values to singular values
k = clip_spectrum(s_k**2, self.k)
u1_k, u2_k, s_k = numpy.array(u_k[:n1, :k]), numpy.array(u_k[n1:, :k]), s_k[:k]
# update & rotate current basis U = [U, U']*[U1_k, U2_k]
logger.debug("updating orthonormal basis U")
self.s = s_k
self.u = ascarray(self.u, 'self.u')
self.u = numpy.dot(self.u, u1_k)
q = ascarray(q, 'q')
q = numpy.dot(q, u2_k)
self.u += q
# make each column of U start with a non-negative number (to force canonical decomposition)
if self.u.shape[0] > 0:
for i in range(self.u.shape[1]):
if self.u[0, i] < 0.0:
self.u[:, i] *= -1.0
# diff = numpy.dot(self.u.T, self.u) - numpy.eye(self.u.shape[1])
# logger.info('orth error after=%f' % numpy.sum(diff * diff))
#endclass Projection
class LsiModel(interfaces.TransformationABC):
"""
Objects of this class allow building and maintaining a model for Latent
Semantic Indexing (also known as Latent Semantic Analysis).
The main methods are:
1. constructor, which initializes the projection into latent topics space,
2. the ``[]`` method, which returns representation of any input document in the
latent space,
3. `add_documents()` for incrementally updating the model with new documents.
The left singular vectors are stored in `lsi.projection.u`, singular values
in `lsi.projection.s`. Right singular vectors can be reconstructed from the output
of `lsi[training_corpus]`, if needed.
Model persistency is achieved via its load/save methods.
"""
def __init__(self, corpus=None, num_topics=200, id2word=None, chunksize=20000,
decay=1.0, distributed=False, onepass=True,
power_iters=P2_EXTRA_ITERS, extra_samples=P2_EXTRA_DIMS):
"""
`num_topics` is the number of requested factors (latent dimensions).
After the model has been trained, you can estimate topics for an
arbitrary, unseen document, using the ``topics = self[document]`` dictionary
notation. You can also add new training documents, with ``self.add_documents``,
so that training can be stopped and resumed at any time, and the
LSI transformation is available at any point.
If you specify a `corpus`, it will be used to train the model. See the
method `add_documents` for a description of the `chunksize` and `decay` parameters.
Turn `onepass` off to force a multi-pass stochastic algorithm.
`power_iters` and `extra_samples` affect the accuracy of the stochastic
multi-pass algorithm, which is used either internally (`onepass=True`) or
as the front-end algorithm (`onepass=False`). Increasing the number of
power iterations improves accuracy, but lowers performance. See [2]_ for
some hard numbers.
Turn on `distributed` to enable distributed computing.
Example:
>>> lsi = LsiModel(corpus, num_topics=10)
>>> print lsi[doc_tfidf] # project some document into LSI space
>>> lsi.add_documents(corpus2) # update LSI on additional documents
>>> print lsi[doc_tfidf]
.. [2] http://nlp.fi.muni.cz/~xrehurek/nips/rehurek_nips.pdf
"""
self.id2word = id2word
self.num_topics = int(num_topics)
self.chunksize = int(chunksize)
self.decay = float(decay)
if distributed:
if not onepass:
logger.warning("forcing the one-pass algorithm for distributed LSA")
onepass = True
self.onepass = onepass
self.extra_samples, self.power_iters = extra_samples, power_iters
if corpus is None and self.id2word is None:
raise ValueError('at least one of corpus/id2word must be specified, to establish input space dimensionality')
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + list(self.id2word.keys()))
self.docs_processed = 0
self.projection = Projection(self.num_terms, self.num_topics, power_iters=self.power_iters, extra_dims=self.extra_samples)
self.numworkers = 1
if not distributed:
logger.info("using serial LSI version on this node")
self.dispatcher = None
else:
if not onepass:
raise NotImplementedError("distributed stochastic LSA not implemented yet; "
"run either distributed one-pass, or serial randomized.")
try:
import Pyro4
dispatcher = Pyro4.Proxy('PYRONAME:gensim.lsi_dispatcher')
dispatcher._pyroOneway.add("exit")
logger.debug("looking for dispatcher at %s" % str(dispatcher._pyroUri))
dispatcher.initialize(id2word=self.id2word, num_topics=num_topics,
chunksize=chunksize, decay=decay,
power_iters=self.power_iters, extra_samples=self.extra_samples,
distributed=False, onepass=onepass)
self.dispatcher = dispatcher
self.numworkers = len(dispatcher.getworkers())
logger.info("using distributed version with %i workers" % self.numworkers)
except Exception as err:
# distributed version was specifically requested, so this is an error state
logger.error("failed to initialize distributed LSI (%s)" % err)
raise RuntimeError("failed to initialize distributed LSI (%s)" % err)
if corpus is not None:
self.add_documents(corpus)
def add_documents(self, corpus, chunksize=None, decay=None):
"""
Update singular value decomposition to take into account a new
corpus of documents.
Training proceeds in chunks of `chunksize` documents at a time. The size of
`chunksize` is a tradeoff between increased speed (bigger `chunksize`)
vs. lower memory footprint (smaller `chunksize`). If the distributed mode
is on, each chunk is sent to a different worker/computer.
Setting `decay` < 1.0 causes re-orientation towards new data trends in the
input document stream, by giving less emphasis to old observations. This allows
LSA to gradually "forget" old observations (documents) and give more
preference to new ones.
"""
logger.info("updating model with new documents")
# get computation parameters; if not specified, use the ones from constructor
if chunksize is None:
chunksize = self.chunksize
if decay is None:
decay = self.decay
if not scipy.sparse.issparse(corpus):
if not self.onepass:
# we are allowed multiple passes over the input => use a faster, randomized two-pass algo
update = Projection(self.num_terms, self.num_topics, None)
update.u, update.s = stochastic_svd(corpus, self.num_topics,
num_terms=self.num_terms, chunksize=chunksize,
extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
else:
# the one-pass algo
doc_no = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info("preparing a new chunk of documents")
nnz = sum(len(doc) for doc in chunk)
# construct the job as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense matrix!
logger.debug("converting corpus to csc format")
job = matutils.corpus2csc(chunk, num_docs=len(chunk), num_terms=self.num_terms, num_nnz=nnz)
del chunk
doc_no += job.shape[1]
if self.dispatcher:
# distributed version: add this job to the job queue, so workers can work on it
logger.debug("creating job #%i" % chunk_no)
self.dispatcher.putjob(job) # put job into queue; this will eventually block, because the queue has a small finite size
del job
logger.info("dispatched documents up to #%s" % doc_no)
else:
# serial version, there is only one "worker" (myself) => process the job directly
update = Projection(self.num_terms, self.num_topics, job, extra_dims=self.extra_samples, power_iters=self.power_iters)
del job
self.projection.merge(update, decay=decay)
del update
logger.info("processed documents up to #%s" % doc_no)
self.print_topics(5)
# wait for all workers to finish (distributed version only)
if self.dispatcher:
logger.info("reached the end of input; now waiting for all remaining jobs to finish")
self.projection = self.dispatcher.getstate()
# logger.info("top topics after adding %i documents" % doc_no)
# self.print_debug(10)
else:
assert not self.dispatcher, "must be in serial mode to receive jobs"
assert self.onepass, "distributed two-pass algo not supported yet"
update = Projection(self.num_terms, self.num_topics, corpus.tocsc(), extra_dims=self.extra_samples, power_iters=self.power_iters)
self.projection.merge(update, decay=decay)
logger.info("processed sparse job of %i documents" % (corpus.shape[1]))
def __str__(self):
return "LsiModel(num_terms=%s, num_topics=%s, decay=%s, chunksize=%s)" % \
(self.num_terms, self.num_topics, self.decay, self.chunksize)
def __getitem__(self, bow, scaled=False, chunksize=512):
"""
Return latent representation, as a list of (topic_id, topic_value) 2-tuples.
This is done by folding input document into the latent topic space.
"""
assert self.projection.u is not None, "decomposition not initialized yet"
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus and chunksize:
# by default, transform 256 documents at once, when called as `lsi[corpus]`.
# this chunking is completely transparent to the user, but it speeds
# up internal computations (one mat * mat multiplication, instead of
# 256 smaller mat * vec multiplications).
return self._apply(bow, chunksize=chunksize)
if not is_corpus:
bow = [bow]
vec = matutils.corpus2csc(bow, num_terms=self.num_terms)
topic_dist = (vec.T * self.projection.u[:, :self.num_topics]).T # (x^T * u).T = u^-1 * x
if scaled:
topic_dist = (1.0 / self.projection.s[:self.num_topics]) * topic_dist # s^-1 * u^-1 * x
# convert a numpy array to gensim sparse vector = tuples of (feature_id, feature_weight),
# with no zero weights.
if not is_corpus:
# lsi[single_document]
result = matutils.full2sparse(topic_dist.flat)
else:
# lsi[chunk of documents]
result = matutils.Dense2Corpus(topic_dist)
return result
def show_topic(self, topicno, topn=10):
"""
Return a specified topic (=left singular vector), 0 <= `topicno` < `self.num_topics`,
as string.
Return only the `topn` words which contribute the most to the direction
of the topic (both negative and positive).
>>> lsimodel.print_topic(10, topn=5)
'-0.340 * "category" + 0.298 * "$M$" + 0.183 * "algebra" + -0.174 * "functor" + -0.168 * "operator"'
"""
# size of the projection matrix can actually be smaller than `self.num_topics`,
# if there were not enough factors (real rank of input matrix smaller than
# `self.num_topics`). in that case, return an empty string
if topicno >= len(self.projection.u.T):
return ''
c = numpy.asarray(self.projection.u.T[topicno, :]).flatten()
norm = numpy.sqrt(numpy.sum(numpy.dot(c, c)))
most = numpy.abs(c).argsort()[::-1][:topn]
return [(1.0 * c[val] / norm, self.id2word[val]) for val in most]
def print_topic(self, topicno, topn=10):
return ' + '.join(['%.3f*"%s"' % v for v in self.show_topic(topicno, topn)])
def show_topics(self, num_topics=-1, num_words=10, log=False, formatted=True):
"""
Show `num_topics` most significant topics (show all by default).
For each topic, show `num_words` most significant words (10 words by defaults).
Return the shown topics as a list -- a list of strings if `formatted` is
True, or a list of (value, word) 2-tuples if it's False.
If `log` is True, also output this result to log.
"""
shown = []
if num_topics < 0:
num_topics = self.num_topics
for i in range(min(num_topics, self.num_topics)):
if i < len(self.projection.s):
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append(topic)
if log:
logger.info("topic #%i(%.3f): %s" %
(i, self.projection.s[i],
topic))
return shown
def print_topics(self, num_topics=5, num_words=10):
"""Alias for `show_topics()` which prints the top 5 topics to log."""
return self.show_topics(num_topics=num_topics, num_words=num_words, log=True)
def print_debug(self, num_topics=5, num_words=10):
"""
Print (to log) the most salient words of the first `num_topics` topics.
Unlike `print_topics()`, this looks for words that are significant for a
particular topic *and* not for others. This *should* result in a more
human-interpretable description of topics.
"""
# only wrap the module-level fnc
print_debug(self.id2word, self.projection.u, self.projection.s,
list(range(min(num_topics, len(self.projection.u.T)))),
num_words=num_words)
def save(self, fname):
"""
Override the default `save` (which uses cPickle), because that's
too inefficient and cPickle has bugs. Instead, single out the large transformation
matrix and store that separately in binary format (that can be directly
mmap'ed back in `load()`), under `fname.npy`.
"""
logger.info("storing %s object to %s and %s" % (self.__class__.__name__, fname, fname + '.npy'))
if self.projection.u is None:
# model not initialized: there is no projection
utils.pickle(self, fname)
# first, remove the projection from self.__dict__, so it doesn't get pickled
u, dispatcher = self.projection.u, self.dispatcher
del self.projection.u
self.dispatcher = None
try:
utils.pickle(self, fname) # store projection-less object
numpy.save(fname + '.npy', ascarray(u)) # store projection
finally:
self.projection.u, self.dispatcher = u, dispatcher
@classmethod
def load(cls, fname):
"""
Load a previously saved object from file (also see `save`).
"""
logger.debug("loading %s object from %s" % (cls.__name__, fname))
result = utils.unpickle(fname)
ufname = fname + '.npy'
try:
result.projection.u = numpy.load(ufname, mmap_mode='r') # load back as read-only
except:
logger.debug("failed to load mmap'ed projection from %s" % ufname)
result.dispatcher = None # TODO load back incl. distributed state? will require re-initialization of worker state
return result
#endclass LsiModel
def print_debug(id2token, u, s, topics, num_words=10, num_neg=None):
if num_neg is None:
# by default, print half as many salient negative words as positive
num_neg = num_words / 2
logger.info('computing word-topic salience for %i topics' % len(topics))
topics, result = set(topics), {}
# TODO speed up by block computation
for uvecno, uvec in enumerate(u):
uvec = numpy.abs(numpy.asarray(uvec).flatten())
udiff = uvec / numpy.sqrt(numpy.sum(numpy.dot(uvec, uvec)))
for topic in topics:
result.setdefault(topic, []).append((udiff[topic], uvecno))
logger.debug("printing %i+%i salient words" % (num_words, num_neg))
for topic in sorted(result.keys()):
weights = sorted(result[topic], key=lambda x: -abs(x[0]))
_, most = weights[0]
if u[most, topic] < 0.0: # the most significant word has a negative sign => flip sign of u[most]
normalize = -1.0
else:
normalize = 1.0
# order features according to salience; ignore near-zero entries in u
pos, neg = [], []
for weight, uvecno in weights:
if normalize * u[uvecno, topic] > 0.0001:
pos.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(pos) >= num_words:
break
for weight, uvecno in weights:
if normalize * u[uvecno, topic] < -0.0001:
neg.append('%s(%.3f)' % (id2token[uvecno], u[uvecno, topic]))
if len(neg) >= num_neg:
break
logger.info('topic #%s(%.3f): %s, ..., %s' % (topic, s[topic], ', '.join(pos), ', '.join(neg)))
def stochastic_svd(corpus, rank, num_terms, chunksize=20000, extra_dims=None,
power_iters=0, dtype=numpy.float64, eps=1e-6):
"""
Return (U, S): the left singular vectors and the singular values of the streamed
input corpus `corpus` [3]_.
This may actually return less than the requested number of top `rank` factors,
in case the input is of lower rank. The `extra_dims` (oversampling) and especially
`power_iters` (power iterations) parameters affect accuracy of the decomposition.
This algorithm uses `2+power_iters` passes over the data. In case you can only
afford a single pass over the input corpus, set `onepass=True` in :class:`LsiModel`
and avoid using this algorithm directly.
The decomposition algorithm is based on
**Halko, Martinsson, Tropp. Finding structure with randomness, 2009.**
.. [3] If `corpus` is a scipy.sparse matrix instead, it is assumed the whole
corpus fits into core memory and a different (more efficient) code path is chosen.
"""
rank = int(rank)
if extra_dims is None:
samples = max(10, 2 * rank) # use more samples than requested factors, to improve accuracy
else:
samples = rank + int(extra_dims)
logger.info("using %i extra samples and %i power iterations" % (samples - rank, power_iters))
num_terms = int(num_terms)
# first phase: construct the orthonormal action matrix Q = orth(Y) = orth((A * A.T)^q * A * O)
# build Y in blocks of `chunksize` documents (much faster than going one-by-one
# and more memory friendly than processing all documents at once)
y = numpy.zeros(dtype=dtype, shape=(num_terms, samples))
logger.info("1st phase: constructing %s action matrix" % str(y.shape))
if scipy.sparse.issparse(corpus):
m, n = corpus.shape
assert num_terms == m, "mismatch in number of features: %i in sparse matrix vs. %i parameter" % (m, num_terms)
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(y.dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, corpus.indptr, corpus.indices,
corpus.data, o.ravel(), y.ravel()) # y = corpus * o
del o
# unlike numpy, scipy.sparse `astype()` copies everything, even if there is no change to dtype!
# so check for equal dtype explicitly, to avoid the extra memory footprint if possible
if y.dtype != dtype:
y = y.astype(dtype)
logger.info("orthonormalizing %s action matrix" % str(y.shape))
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
logger.debug("running %i power iterations" % power_iters)
for power_iter in range(power_iters):
q = corpus.T * q
q = [corpus * q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range after each power iteration step
else:
num_docs = 0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i' % (chunk_no * chunksize))
# construct the chunk as a sparse matrix, to minimize memory overhead
# definitely avoid materializing it as a dense (num_terms x chunksize) matrix!
s = sum(len(doc) for doc in chunk)
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
m, n = chunk.shape
assert m == num_terms
assert n <= chunksize # the very last chunk of A is allowed to be smaller in size
num_docs += n
logger.debug("multiplying chunk * gauss")
o = numpy.random.normal(0.0, 1.0, (n, samples)).astype(dtype) # draw a random gaussian matrix
sparsetools.csc_matvecs(m, n, samples, chunk.indptr, chunk.indices, # y = y + chunk * o
chunk.data, o.ravel(), y.ravel())
del chunk, o
y = [y]
q, _ = matutils.qr_destroy(y) # orthonormalize the range
for power_iter in range(power_iters):
logger.info("running power iteration #%i" % (power_iter + 1))
yold = q.copy()
q[:] = 0.0
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=dtype) # documents = columns of sparse CSC
tmp = chunk.T * yold
tmp = chunk * tmp
del chunk
q += tmp
del yold
q = [q]
q, _ = matutils.qr_destroy(q) # orthonormalize the range
qt = q[:, :samples].T.copy()
del q
if scipy.sparse.issparse(corpus):
b = qt * corpus
logger.info("2nd phase: running dense svd on %s matrix" % str(b.shape))
u, s, vt = numpy.linalg.svd(b, full_matrices=False)
del b, vt
else:
# second phase: construct the covariance matrix X = B * B.T, where B = Q.T * A
# again, construct X incrementally, in chunks of `chunksize` documents from the streaming
# input corpus A, to avoid using O(number of documents) memory
x = numpy.zeros(shape=(qt.shape[0], qt.shape[0]), dtype=numpy.float64)
logger.info("2nd phase: constructing %s covariance matrix" % str(x.shape))
for chunk_no, chunk in enumerate(utils.grouper(corpus, chunksize)):
logger.info('PROGRESS: at document #%i/%i' % (chunk_no * chunksize, num_docs))
chunk = matutils.corpus2csc(chunk, num_terms=num_terms, dtype=qt.dtype)
b = qt * chunk # dense * sparse matrix multiply
del chunk
x += numpy.dot(b, b.T) # TODO should call the BLAS routine SYRK, but there is no SYRK wrapper in scipy :(
del b
# now we're ready to compute decomposition of the small matrix X
logger.info("running dense decomposition on %s covariance matrix" % str(x.shape))
u, s, vt = numpy.linalg.svd(x) # could use linalg.eigh, but who cares... and svd returns the factors already sorted :)
s = numpy.sqrt(s) # sqrt to go back from singular values of X to singular values of B = singular values of the corpus
q = qt.T.copy()
del qt
logger.info("computing the final decomposition")
keep = clip_spectrum(s**2, rank, discard=eps)
u = u[:, :keep].copy()
s = s[:keep]
u = numpy.dot(q, u)
return u.astype(dtype), s.astype(dtype)
|
samantp/gensimPy3
|
gensim/models/lsimodel.py
|
Python
|
gpl-3.0
| 33,211
|
[
"Gaussian"
] |
8396917e8542aba744fb4c3d60305a32ca71e9660361be516081e7cb8b01c003
|
#!/usr/bin/env python
########################################
#Globale Karte fuer tests
# from Rabea Amther
########################################
# http://gfesuite.noaa.gov/developer/netCDFPythonInterface.html
import math
import numpy as np
import pylab as pl
import Scientific.IO.NetCDF as IO
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.lines as lines
from mpl_toolkits.basemap import Basemap , addcyclic
from matplotlib.colors import LinearSegmentedColormap
import textwrap
pl.close('all')
########################## for CMIP5 charactors
DIR='/Users/tang/climate/CMIP5/monthly/rsds/hist/'
VARIABLE='rsds'
PRODUCT='Amon'
ENSEMBLE='r1i1p1'
EXPERIMENT='hist'
TIME='195001-200512'
#OBS='CRU'
OBS='CERES'
season='summer'
#season='winter'
K=0
NonData=['EC-EARTH-XXXX','CSIRO-Mk3-6-0-XXXXXX']
GCMs=[\
'ACCESS1-0',\
'BNU-ESM',\
'CCSM4',\
'CESM1-BGC',\
'CESM1-CAM5',\
'CESM1-FASTCHEM',\
'CESM1-WACCM',\
'CMCC-CESM',\
'CNRM-CM5',\
'CSIRO-Mk3-6-0',\
'CanESM2',\
'EC-EARTH',\
'FIO-ESM',\
'GFDL-ESM2M',\
'GISS-E2-H',\
'HadGEM2-AO',\
'HadGEM2-ES',\
'IPSL-CM5A-LR',\
'IPSL-CM5A-MR',\
'MIROC-ESM-CHEM',\
'MIROC-ESM',\
'MIROC5',\
'MPI-ESM-LR',\
'MPI-ESM-MR',\
'MPI-ESM-P',\
'MRI-CGCM3',\
'MRI-ESM1',\
'NorESM1-ME',\
'NorESM1-M',\
'bcc-csm1-1-m',\
'bcc-csm1-1',\
'inmcm4',\
]
ENSEMBLE=[ \
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r12i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r2i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
'r1i1p1',\
]
COLOR=['darkred','darkblue','darkgreen','deeppink',\
'black','orangered','cyan','magenta']
# read CERES data:
if OBS == 'CERES':
oVar='rsds'
obs1='/Users/tang/climate/GLOBALDATA/OBSDATA/CERES/rsds_CERES-EBAF_L3B_Ed2-8_2001-2005.NDJFMA.SWIO.nc'
else:
# read MODIS data:
oVar='clt'
obs1='/Users/tang/climate/GLOBALDATA/OBSDATA/MODIS/clt_MODIS_L3_C5_200101-200512.ymonmean.NDJFMA.SWIO.nc'
print obs1
obsfile1=IO.NetCDFFile(obs1,'r')
ObsVar=obsfile1.variables[oVar][0][:][:].copy()
for idx,Model in enumerate(GCMs):
if OBS == 'CERES':
infile1=DIR+\
'/rsds_Amon_'+Model+'_historical_'+ENSEMBLE[idx]+\
'_200101-200512.summer.remap.CERES.SWIO.nc'
#GFDL-ESM2M/clt_Amon_GFDL-ESM2M_historical_r1i1p1_200101-200512.nc.summer.mean.nc.remap.nc
#rsds_Amon_bcc-csm1-1_historical_r1i1p1_200101-200512.summer.remap.CERES.SWIO.nc
else:
infile1=DIR+'/'+\
'clt_Amon_'+Model+'_historical_'+ENSEMBLE[idx]+\
'_200101-200512.'+season+'.remap.modis.SWIO.nc'
print infile1
if Model in NonData:
infile1=obsfile1
VAR=infile1.variables[oVar][0,:,:].copy()
else:
print 'k=',idx
infile1=IO.NetCDFFile(infile1,'r')
VAR=infile1.variables[VARIABLE][0,:,:].copy()
print 'the variable tas ===============: '
print VAR
#open input files
# read the variables:
lat = infile1.variables['lat'][:].copy()
lon = infile1.variables['lon'][:].copy()
print np.shape(VAR)
print np.shape(ObsVar)
Bias=VAR-ObsVar
print np.shape(Bias)
#quit()
CoLev=10 #number of levels of colorbar
#=================================================== to plot
fig=plt.subplot(8,4,idx+1,aspect='equal')
print "============="
print idx; print Model
map=Basemap(projection='cyl',llcrnrlat=np.min(lat),urcrnrlat=np.max(lat),\
llcrnrlon=np.min(lon),urcrnrlon=np.max(lon),resolution='l')
map.drawcoastlines(linewidth=0.35)
map.drawparallels(np.arange(-90.,91.,15.),labels=[1,0,0,0],linewidth=0.35)
map.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],linewidth=0.35)
map.drawmapboundary()
x,y=map(lon,lat)
cmap=plt.get_cmap('bwr')
#cmap=plt.get_cmap('RdBu_r')
pic=map.pcolormesh(x,y,Bias,cmap=cmap)
plt.title(GCMs[idx])
#plt.figtext(0.68,0.73,timestamp, size="small")
#set the same colorbar range
pic.set_clim(vmin=-100,vmax=100)
plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
cax = plt.axes([0.85, 0.1, 0.01, 0.8])
plt.colorbar(cax=cax)
#if idx > 11:
#plt.colorbar(orientation='horizontal') # draw colorbar
#plt.legend(loc=2)
plt.suptitle('seasonal mean bias of Surface Downwelling SW radiation (W m-2) vs CERES',fontsize=18)
plt.show()
quit()
|
CopyChat/Plotting
|
Downscaling/global_change_rsds.cmip5.py
|
Python
|
gpl-3.0
| 5,099
|
[
"NetCDF"
] |
7eadfea7af681201942a35d72c449768d3131a3b7a1afa012d191f51a109e022
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import itertools
import multiprocessing.pool
import sys
import time
import weakref
from absl.testing import parameterized
import numpy
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import function as tf_function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.layers import convolutional
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_sendrecv_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.structured import structured_tensor
from tensorflow.python.platform import test
from tensorflow.python.training import training_ops
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
def total_function_cache(defined):
# pylint: disable=protected-access
return (set(defined._function_cache.primary)
| set(defined._function_cache.arg_relaxed))
# pylint: enable=protected-access
def _example_indexed_slices_with_dense_shape():
return indexed_slices.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]),
constant_op.constant([2]))
def _example_indexed_slices_without_dense_shape():
return indexed_slices.IndexedSlices(
constant_op.constant([1, 2]), constant_op.constant([0, 1]))
def _spec_for_value(value):
"""Returns the (nested) TypeSpec for a value."""
if nest.is_sequence(value):
return nest.map_structure(_spec_for_value, value)
elif isinstance(value, (ops.Tensor, composite_tensor.CompositeTensor)):
return type_spec.type_spec_from_value(value)
else:
return value
class FunctionTest(test.TestCase, parameterized.TestCase):
def setUp(self):
super(FunctionTest, self).setUp()
cpus = config.list_physical_devices('CPU')
# Set 4 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def testBasic(self):
matmul = def_function.function(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
sq2 = matmul(sq, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
self.assertAllEqual(sq2.numpy().reshape(-1), [52, 76, 74, 108])
def testOnExitCallback(self):
values = []
def append_1():
values.append(1)
def append_2():
values.append(2)
def g(x):
old_values = list(values)
ops.add_exit_callback_to_default_func_graph(append_1)
self.assertEqual(old_values, values)
return x + 1
tf_g = def_function.function(g)
def f(x):
old_values = list(values)
ops.add_exit_callback_to_default_func_graph(append_2)
self.assertEqual(old_values, values)
return tf_g(x)
tf_f = def_function.function(f)
self.assertEmpty(values)
tf_f(constant_op.constant(1.0))
self.assertEqual(values, [1, 2]) # Once for g, once for f.
tf_f(constant_op.constant([1.0])) # force a retrace
self.assertEqual(values, [1, 2, 1, 2]) # And again.
def testCannotAddExitCallbackWhenNotInFunctionScope(self):
with self.assertRaisesRegex(RuntimeError, 'when not building a function.'):
ops.add_exit_callback_to_default_func_graph(lambda: None)
def testVariable(self):
v1 = variables.Variable(1.0)
add = def_function.function(lambda x, v: x + v1 + v)
v2 = variables.Variable(1.0)
x = constant_op.constant(1.0)
r = add(x, v2)
self.assertEqual(3.0, self.evaluate(r))
def testVariableOnly(self):
v = variables.Variable(1.0)
add = def_function.function(lambda x: x.assign_add(1.0))
r1 = add(v)
self.assertEqual(2.0, self.evaluate(r1))
c = constant_op.constant(1.0)
with self.assertRaisesRegex(AttributeError, 'no attribute'):
add(c)
@test_util.disable_tfrt('Packed tensor is not supported in tfrt yet.')
def testPackedVariable(self):
with ops.device('/cpu:0'):
v0_0 = resource_variable_ops.ResourceVariable(1.0)
with ops.device('/cpu:1'):
v0_1 = resource_variable_ops.ResourceVariable(2.0)
v1_0 = resource_variable_ops.ResourceVariable(3.0)
with ops.device('/cpu:2'):
v1_1 = resource_variable_ops.ResourceVariable(4.0)
packed_var_0 = ops.pack_eager_tensors([v0_0.handle, v0_1.handle])
packed_var_1 = ops.pack_eager_tensors([v1_0.handle, v1_1.handle])
# TODO(b/145922293): use ResourceVariable.assign_add and
# ResourceVariable.read_value directly once we support packing multiple
# ResourceVariable into one ResourceVariable.
@def_function.function
def read_var():
resource_variable_ops.assign_add_variable_op(
packed_var_0, constant_op.constant(5.0))
resource_variable_ops.assign_add_variable_op(
packed_var_1, constant_op.constant(6.0))
with ops.device('/cpu:0'):
read0 = resource_variable_ops.read_variable_op(
packed_var_0, dtype=dtypes.float32)
with ops.device('/cpu:1'):
read1 = resource_variable_ops.read_variable_op(
packed_var_0, dtype=dtypes.float32)
read2 = resource_variable_ops.read_variable_op(
packed_var_1, dtype=dtypes.float32)
with ops.device('/cpu:2'):
read3 = resource_variable_ops.read_variable_op(
packed_var_1, dtype=dtypes.float32)
return read0, read1, read2, read3
arg_attrs = read_var.get_concrete_function().function_def.arg_attr
self.assertLen(arg_attrs, 2)
self.assertEqual(arg_attrs[0].attr['_composite_device'].s,
compat.as_bytes(packed_var_0.device))
self.assertEqual(arg_attrs[1].attr['_composite_device'].s,
compat.as_bytes(packed_var_1.device))
self.assertAllEqual(read_var(), (1 + 5, 2 + 5, 3 + 6, 4 + 6))
def testImplementsAttributeBasic(self):
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
with context.graph_mode(), self.cached_session():
a = array_ops.placeholder(dtypes.float32, ())
b = array_ops.placeholder(dtypes.float32, ())
v(a, b)
gradients_impl.gradients(v(a, b), [a, b])
fdefs = ops.get_default_graph().as_graph_def().library.function
self.assertLen(fdefs, 3)
not_present = 0
present = 0
for f in fdefs:
name = f.signature.name
if 'forward' in name or 'backward' in name:
not_present += 1
self.assertNotIn(function.IMPLEMENTS_ATTRIBUTE_NAME, f.attr, f)
else:
present += 1
self.assertEqual(f.attr[function.IMPLEMENTS_ATTRIBUTE_NAME].s,
'func'.encode('ascii'), f)
self.assertEqual(not_present, 2, fdefs)
self.assertEqual(present, 1, fdefs)
def testImplementsAttributeAssertsOnSideInput(self):
with context.graph_mode(), self.cached_session():
z = array_ops.zeros(0)
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y + z)
a = array_ops.ones((1.0,))
b = array_ops.ones((1.0,))
with self.assertRaisesRegex(AssertionError,
'variables are always captured'):
v(a, b)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertEmpty(functions)
def testImplementsAttributeWorksOnVariables(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable((1.0,))
b = variables.Variable((1.0,))
r1 = v(a, b)
_ = v(a, a)
functions = ops.get_default_graph().as_graph_def().library.function
# Verify that we created only one function
self.assertLen(functions, 1)
# Verify that eval() reads the current values.
a.initializer.run()
b.initializer.run()
self.assertEqual(r1.eval(), 2)
a.assign_add([1]).eval()
self.assertEqual(r1.eval(), 3)
def testImplementsAttributeWorksOnConstants(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable(1.0)
r1 = v(a, 2.)
r2 = v(2., a)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertLen(functions, 1)
self.assertLen(functions[0].signature.input_arg, 2)
# Verify that eval() reads the current values.
a.initializer.run()
self.assertEqual(r1.eval(), 3)
self.assertEqual(r2.eval(), 3)
def testImplementsAttributeSpecializes(self):
with context.graph_mode(), self.cached_session():
v = def_function.function(
experimental_implements='func')(lambda x, y: x + y)
a = variables.Variable(1.0)
r1 = v(a, [2.])
r2 = v([2., 2], a)
functions = ops.get_default_graph().as_graph_def().library.function
self.assertLen(functions, 2)
# Ensure that all parameters are still there and haven't been inlined!
self.assertLen(functions[0].signature.input_arg, 2)
self.assertLen(functions[1].signature.input_arg, 2)
# Verify that eval() reads the current values.
a.initializer.run()
numpy.testing.assert_equal(r1.eval(), [3.])
numpy.testing.assert_equal(r2.eval(), [3., 3.])
def testImplementsAttributeAsNameAttrList(self):
implements_attr = (
'name: "embedding_matmul" attr { key: "key1" value { i: 2 } '
'} attr { key: "key2" value { b: false } }')
v = def_function.function(
experimental_implements=implements_attr)(lambda x, y: x + y)
with context.graph_mode(), self.cached_session():
a = array_ops.placeholder(dtypes.float32, ())
b = array_ops.placeholder(dtypes.float32, ())
v(a, b)
gradients_impl.gradients(v(a, b), [a, b])
fdefs = ops.get_default_graph().as_graph_def().library.function
self.assertLen(fdefs, 3)
not_present = 0
present = 0
for f in fdefs:
name = f.signature.name
if 'forward' in name or 'backward' in name:
not_present += 1
self.assertNotIn(function.IMPLEMENTS_ATTRIBUTE_NAME, f.attr, f)
else:
present += 1
attr_value = f.attr[function.IMPLEMENTS_ATTRIBUTE_NAME]
self.assertIsNotNone(attr_value.func, f)
self.assertEqual(attr_value.func.name, 'embedding_matmul')
name_attrs = attr_value.func.attr
self.assertLen(name_attrs, 2)
self.assertEqual(not_present, 2, fdefs)
self.assertEqual(present, 1, fdefs)
def testExternalControlDependency(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
@function.defun
def f():
with ops.control_dependencies([op]):
return 1.0
self.evaluate(f())
self.assertAllEqual(self.evaluate(v), 2.0)
def testInputShapeFunctionRelaxation(self):
unknown_dim = [False]
@function.defun(experimental_relax_shapes=True)
def func(a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 1)
func(constant_op.constant([1.0]))
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
def testInputShapeRelaxationOnInstanceMethod(self):
# Test that experimental_relax_shapes is passed during
# instance method bounding.
unknown_dim = [False]
class Foo(object):
@def_function.function(experimental_relax_shapes=True)
def func(self, a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
foo = Foo()
foo.func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
foo.func(constant_op.constant([1.0]))
self.assertFalse(unknown_dim[0])
foo.func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
def testInputShapeFunctionRelaxationWithRaggedTensors(self):
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
check_trace( # Initial call gets traced.
ragged_factory_ops.constant([[1], [2, 3, 4]]),
ragged_tensor.RaggedTensorSpec([2, None], dtypes.int32))
check_trace( # Input TypeSpec is the same -> no retrace.
ragged_factory_ops.constant([[1, 2], [3, 4]]), None)
check_trace( # Even if component tensor shapes change -> no retrace.
ragged_factory_ops.constant([[1, 2], [3, 4, 5, 6]]), None)
check_trace( # Different TypeSpec shape (nrows): retrace
ragged_factory_ops.constant([[1], [2], [3]]),
ragged_tensor.RaggedTensorSpec([3, None], dtypes.int32))
check_trace( # Different nrows again: relax & retrace
ragged_factory_ops.constant([[1], [2], [3], [4]]),
ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32))
check_trace( # Different nrows yet again: not retrace
ragged_factory_ops.constant([[1]]), None)
check_trace( # Different ragged_rank: retrace
ragged_factory_ops.constant([[[1]]]),
ragged_tensor.RaggedTensorSpec([1, None, None], dtypes.int32))
check_trace( # Different ragged_rank again: retrace & relax
ragged_factory_ops.constant([[[1]], [[2]]]),
ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32))
def testInputShapeFunctionRelaxationWithStructuredTensors(self):
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
# If we have TypeSpecs that differ in ways other than just their shape,
# then retrace each time.
check_trace(
structured_tensor.StructuredTensor.from_pyval({'a': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((1,), dtypes.int32)}))
check_trace(
structured_tensor.StructuredTensor.from_pyval({'b': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'b': tensor_spec.TensorSpec((1,), dtypes.int32)}))
check_trace(
structured_tensor.StructuredTensor.from_pyval({'c': [1]}),
structured_tensor.StructuredTensorSpec(
[], {'c': tensor_spec.TensorSpec((1,), dtypes.int32)}))
# But if we call again with only shape different, then do relax:
check_trace( # retrace
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((2,), dtypes.int32)}))
check_trace( # relax & retrace
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2, 3]}),
structured_tensor.StructuredTensorSpec(
[], {'a': tensor_spec.TensorSpec((None,), dtypes.int32)}))
check_trace( # use relaxed graph
structured_tensor.StructuredTensor.from_pyval({'a': [1, 2, 3, 4]}),
None)
def testInputShapeFunctionRelaxationWithDatasetIterators(self):
# For dataset iterators, the TypeSpec includes type information that's
# not derivable from the component tensors. Make sure that the TypeSpec
# shapes get relaxed as appropriate.
traced_type_spec = [None]
@def_function.function(experimental_relax_shapes=True)
def func(x):
traced_type_spec[0] = x._type_spec
return x
def check_trace(x, expected_trace):
traced_type_spec[0] = None
func(x)
self.assertEqual(traced_type_spec[0], expected_trace)
ds_1_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([1, 2]))
ds_2_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([2, 2]))
ds_3_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([3, 2]))
ds_4_2 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([4, 2]))
ds_2_1 = dataset_ops.DatasetV2.from_tensors(array_ops.zeros([2, 1]))
check_trace( # shape=[1, 2]: retrace
dataset_ops.make_one_shot_iterator(ds_1_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([1, 2], dtypes.float32)))
check_trace( # shape=[1, 2]: no retrace (use the [1, 2] graph)
dataset_ops.make_one_shot_iterator(ds_1_2), None)
check_trace( # shape=[2, 2]: retrace
dataset_ops.make_one_shot_iterator(ds_2_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([2, 2], dtypes.float32)))
check_trace( # shape=[3, 2]: relax to [None, 2] and retrace
dataset_ops.make_one_shot_iterator(ds_3_2),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([None, 2], dtypes.float32)))
check_trace( # shape=[4, 2]: no retrace (use the [None, 2] graph)
dataset_ops.make_one_shot_iterator(ds_4_2), None)
check_trace( # shape=[2, 1]: relax to [None, None] and retrace
dataset_ops.make_one_shot_iterator(ds_2_1),
iterator_ops.IteratorSpec(
tensor_spec.TensorSpec([None, None], dtypes.float32)))
def testCapturesVariables(self):
a = variables.Variable(1.0, trainable=False)
b = variables.Variable(1.0)
cc = [None]
@def_function.function
def f():
c = cc[0]
if c is None:
c = cc[0] = variables.Variable(1.)
return a + b + c + 1
cf = f.get_concrete_function()
c = cc[0]
captured_variables = {v.ref() for v in (a, b, c)}
trainable_variables = {v.ref() for v in (b, c)}
self.assertEqual({v.ref() for v in cf.variables}, captured_variables)
self.assertEqual({v.ref() for v in cf.trainable_variables},
trainable_variables)
self.assertEqual(cf.variables, cf.graph.variables)
self.assertEqual(cf.trainable_variables, cf.graph.trainable_variables)
def testNestedInputShapeFunctionRelaxation(self):
unknown_dim = [False]
@function.defun(experimental_relax_shapes=True)
def func(a_, b_=None):
del a_ # Only used to check which cache is used.
self.assertEqual(b_[0]._shape_tuple(), ())
if b_[1]._shape_tuple()[0] is None:
unknown_dim[0] = True
return b_[0] + 1
a = 'hi'
b0 = constant_op.constant(1.0)
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 1)
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
func(a, b_=[b0, constant_op.constant([1.0, 1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 2)
unknown_dim[0] = False
# Now do the same except with a new a which is not a tensor; this should
# change the cache key.
a = 'bye'
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(total_function_cache(func), 3)
# Since we already marked a cache miss for a function with the same
# non-input signatures, here we will immediately start relaxing shapes.
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(total_function_cache(func), 3)
def testNestedShapeFunctionRelaxation(self):
got_shape = [None]
# The inner function will go through shape relaxation because the shapes it
# receives will be [1], [2], [3], ...
@def_function.function(experimental_relax_shapes=True)
def bar(x_shape):
got_shape[0] = x_shape._shape_tuple()
return x_shape
# The outer function will not go through shape relaxation because the shapes
# it receives will be [1], [[1]], [[[1]]], ...
@def_function.function(experimental_relax_shapes=True)
def foo(ones):
return bar(array_ops.shape(ones))
for rank in range(1, 6):
x_shape = self.evaluate(foo(array_ops.ones([1] * rank)))
self.assertAllEqual(x_shape, [1] * rank)
if rank < 3:
self.assertEqual(got_shape[0], (rank,))
else:
self.assertEqual(got_shape[0], (None,))
def testNoHash(self):
@def_function.function()
def f(_):
return 1.0
with self.assertRaisesRegex(ValueError, r'Got type: set'):
f(set([]))
def testFuncName(self):
@function.defun_with_attributes(attributes={'func_name': 'multiply'})
def add(x, y):
_ = x * y
return x + y
@function.defun
def add_2(x, y):
_ = x * y
return x + y
self.assertEqual(add._name, 'multiply')
self.assertEqual(add_2._name, 'add_2')
def testBasicGraphMode(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = sq(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedInputsGraphMode(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out = a_times_b(pair({'a': t}, {'b': t}))
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputsGraphMode(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function()
def pairs_mul(pair_a, pair_b):
return pair(matmul(pair_a.a, pair_b.a), matmul(pair_a.b, pair_b.b))
a = constant_op.constant([[1.0, 2.0], [1.0, 2.0]])
b = constant_op.constant([[3.0, 4.0], [3.0, 4.0]])
out = pairs_mul(pair(a, b), pair(b, a))
expected = pair(math_ops.matmul(a, b).numpy(),
math_ops.matmul(b, a).numpy())
self.assertAllClose(out, expected)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testNestedFunctionGraphNotOutOfDate(self, function_decorator):
@function_decorator
def f():
return constant_op.constant(1.)
class _Model(object):
@function_decorator
def g(self):
self.f = f.get_concrete_function()
model = _Model()
model.g()
concrete = model.f
weak_g_graph = weakref.ref(model.g.get_concrete_function().graph)
self.assertIs(weak_g_graph(), concrete.graph.outer_graph)
weak_g = weakref.ref(model.g)
del model
self.assertIsNone(weak_g())
self.assertIsNone(weak_g_graph())
self.assertIsNotNone(concrete.graph.outer_graph)
self.assertIs(ops.get_default_graph(), concrete.graph.outer_graph)
def testGraphEagerIsolation(self):
@function.defun
def f():
self.v = variables.Variable(1.0)
return self.v.read_value()
self.assertAllEqual(f(), 1.0)
with ops.Graph().as_default():
self.assertEqual(f().shape, ())
def testBasicGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testGetConcreteFunctionThreadSafety(self):
@def_function.function
def sq():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
return math_ops.matmul(t, t)
concrete_functions = []
def thread_func(_):
cf = sq.get_concrete_function()
concrete_functions.append(cf)
num_threads = 100
pool = multiprocessing.pool.ThreadPool(num_threads)
_ = pool.map(thread_func, list(range(num_threads)))
self.assertLen(set(concrete_functions), 1)
def testGetConcreteFunctionThreadSafetyWithArgs(self):
@def_function.function
def add_100(*args):
return math_ops.add_n(args)
p = multiprocessing.pool.ThreadPool(2)
args = (constant_op.constant(1.),) * 100
f1, f2 = p.map(add_100.get_concrete_function, [args] * 2)
# I see about len(args) + max(0, len(args) - 3) arguments expected.
f1(*args)
del f2
def testInputSpecGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return matmul(a, a)
sq_op = sq.get_concrete_function(
tensor_spec.TensorSpec((None, None), dtypes.float32))
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out1 = sq_op(t1)
self.assertAllEqual(out1, math_ops.matmul(t1, t1).numpy())
t2 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
out2 = sq_op(t2)
self.assertAllEqual(out2, math_ops.matmul(t2, t2).numpy())
def testNestedInputSpecGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(mats):
((a, b),) = mats
return matmul(a, b)
sq_op_autonamed = sq.get_concrete_function(
[(tensor_spec.TensorSpec((None, None), dtypes.float32),
tensor_spec.TensorSpec((None, None), dtypes.float32))])
self.assertEqual([None, None], sq_op_autonamed.output_shapes.as_list())
sq_op = sq.get_concrete_function(
[(tensor_spec.TensorSpec((None, None), dtypes.float32,
name='first_mat'),
tensor_spec.TensorSpec((None, None), dtypes.float32,
name='second_mat'))])
self.assertEqual([None, None], sq_op.output_shapes.as_list())
t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[1.4, 2.4], [3.4, 4.4]])
out = sq_op(first_mat=t1, second_mat=t2)
self.assertAllEqual(out, math_ops.matmul(t1, t2).numpy())
self.assertAllEqual(sq_op_autonamed(t1, t2),
math_ops.matmul(t1, t2).numpy())
def testExecutingStatelessDefunConcurrently(self):
@def_function.function
def stateless(x):
return math_ops.multiply(2.0, x)
pool = multiprocessing.pool.ThreadPool()
inputs = [constant_op.constant(1.0 * x) for x in range(100)]
outputs = [float(out) for out in pool.map(stateless, inputs)]
expected = [float(2.0 * x) for x in inputs]
self.assertSequenceEqual(outputs, expected)
def testExecutingManyStatelessDefunsConcurrently(self):
@def_function.function
def stateless(x):
del x
return math_ops.multiply(2.0, 2.0)
pool = multiprocessing.pool.ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
objects = [object() for _ in range(100)]
outputs = [float(out) for out in pool.map(stateless, objects)]
expected = [4.0] * 100
self.assertSequenceEqual(outputs, expected)
@test_util.disable_tfrt('b/169431085: This test is flaky on tfrt')
def testExecutingStatefulDefunConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def stateful(x):
v.assign(x)
pool = multiprocessing.pool.ThreadPool()
inputs = [constant_op.constant(0.0)] * 100
pool.map(stateful, inputs)
self.assertEqual(float(v.read_value()), 0.0)
def testExecutingManyStatefulDefunsConcurrently(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def stateful(x):
del x
return v.assign(0.0)
pool = multiprocessing.pool.ThreadPool()
# `pool.map` below instantiates 100 functions, one for each object.
pool.map(stateful, [object() for _ in range(100)])
self.assertEqual(float(v.read_value()), 0.0)
def testShareRendezvous(self):
# Disable grappler from inlining the functions. Note we run the send & recv
# in graph mode since with eager mode the function should automatically be
# inlined.
context.context().set_optimizer_experimental_options(
{'disable_meta_optimizer': True})
cpu = '/device:CPU:0'
signature = [tensor_spec.TensorSpec([], dtypes.int32)]
@def_function.function
def send():
x = constant_op.constant(1)
gen_sendrecv_ops.send(x, 'x', cpu, 0, cpu)
return x
send._shared_rendezvous = True # pylint: disable=protected-access
@def_function.function(input_signature=signature)
def send_body(n):
send()
return n - 1
@def_function.function
def recv():
return gen_sendrecv_ops.recv(dtypes.int32, 'x', cpu, 0, cpu)
recv._shared_rendezvous = True # pylint: disable=protected-access
@def_function.function(input_signature=signature)
def recv_body(n):
recv()
return n - 1
@def_function.function(input_signature=signature)
def cond(n):
return n > 0
# Instead of calling the send & recv functions directly we want to call them
# through a functional while to ensure the rendezvous is shared across the
# while boundary.
@def_function.function
def fn(n):
functional_ops.While([n], cond.get_concrete_function(),
send_body.get_concrete_function())
return functional_ops.While([n], cond.get_concrete_function(),
recv_body.get_concrete_function())
# Use a graph context since functions will not be automatically inlined
with context.graph_mode(), self.cached_session():
self.evaluate(fn(2))
def disabled_testRandomSeed(self):
@def_function.function
def f():
return random_ops.random_normal(())
random_seed.set_random_seed(1)
x = f()
self.assertNotEqual(x, f())
random_seed.set_random_seed(1)
self.assertAllEqual(f(), x)
def testNestedInputsGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
pair = collections.namedtuple('pair', ['a', 'b'])
@def_function.function
def a_times_b(inputs):
return matmul(inputs.a['a'], inputs.b['b'])
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = a_times_b.get_concrete_function(
pair(dict(a=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'a')),
dict(b=tensor_spec.TensorSpec([2, 2], dtypes.float32, 'b'))))
self.assertEqual(sq_op.output_shapes, tensor_shape.TensorShape([2, 2]))
out = sq_op(a=t, b=t)
self.assertAllEqual(out, math_ops.matmul(t, t).numpy())
def testNestedOutputGraphFunction(self):
matmul = def_function.function(math_ops.matmul)
@def_function.function
def sq(a):
return (matmul(a, a), {'b': constant_op.constant(1.0)})
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq_op = sq.get_concrete_function(t)
self.assertEqual(sq_op.output_shapes,
(tensor_shape.TensorShape([2, 2]),
{'b': tensor_shape.TensorShape([])}))
self.assertEqual(sq_op.output_dtypes,
(dtypes.float32, {'b': dtypes.float32}))
(a, b) = sq_op(t)
self.assertAllEqual(a, math_ops.matmul(t, t).numpy())
self.assertAllEqual(b['b'].numpy(), 1.0)
def testGraphFunctionNoneOutput(self):
@def_function.function
def fn(unused_a, unused_b):
return None
x = constant_op.constant(1)
fn_op = fn.get_concrete_function(x, x)
self.assertEqual(fn_op.output_dtypes, None)
self.assertEqual(fn_op.output_shapes, None)
self.assertAllEqual(fn_op(x, x), None)
def testDefunNumpyArraysConvertedToTensors(self):
def f(x):
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x)
self.assertLen(total_function_cache(defined), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(total_function_cache(defined), 1)
np_ones = numpy.ones([], numpy.float32)
np_zeros = numpy.zeros([], numpy.float32)
tf_ones = array_ops.ones([])
tf_zeros = array_ops.zeros([])
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(np_ones).numpy())
self.assertLen(total_function_cache(defined), 2)
self.assertEqual(0., defined(np_zeros).numpy())
self.assertEqual(1., defined(tf_ones).numpy())
self.assertEqual(0., defined(tf_zeros).numpy())
self.assertLen(total_function_cache(defined), 2)
# Test that mutable inputs are supported.
mutable = numpy.ones([], numpy.float32)
self.assertEqual(1., defined(mutable).numpy())
mutable.fill(0)
self.assertEqual(0., defined(mutable).numpy())
class MyNdarray(numpy.ndarray):
pass
# Test that the subclasses of ndarray are converted too.
self.assertEqual(1., defined(np_ones.view(MyNdarray)).numpy())
self.assertEqual(0., defined(np_zeros.view(MyNdarray)).numpy())
# We should not have triggered any re-tracing of the python function.
self.assertLen(total_function_cache(defined), 2)
def testNumpyDtypeInputSupported(self):
@function.defun
def f(x, dtype):
return constant_op.constant(dtype(x))
self.assertEqual(f(1, numpy.float32).numpy(), numpy.float32(1))
self.assertEqual(f(2, numpy.float32).numpy(), numpy.float32(2))
self.assertEqual(f(1, numpy.int32).numpy(), numpy.int32(1))
self.assertEqual(f(2, numpy.int32).numpy(), numpy.int32(2))
def testDefunNumpyArraysConvertedToTensorsInKwargs(self):
def f(**kwargs):
x = kwargs.pop('x')
self.assertIsInstance(x, ops.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
defined = function.defun(f)
defined(x=x)
self.assertLen(total_function_cache(defined), 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x=x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(total_function_cache(defined), 1)
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1., defined(x=numpy.ones([])).numpy())
self.assertEqual(0., defined(x=numpy.zeros([])).numpy())
self.assertEqual(1., defined(x=array_ops.ones([])).numpy())
self.assertEqual(0., defined(x=array_ops.zeros([])).numpy())
def testDefunCapturedInt32(self):
x = constant_op.constant(1, dtype=dtypes.int32)
@def_function.function
def add_int32s():
return x + x
self.assertEqual(2, int(add_int32s()))
def testDefunReadVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@def_function.function
def f():
return v.read_value()
self.assertEqual(1.0, float(f()))
def testDefunAssignAddVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
x = constant_op.constant(2.0)
@def_function.function
def test_assign_add():
v.assign_add(x)
return v.read_value()
self.assertEqual(3.0, float(test_assign_add()))
@test_util.run_in_graph_and_eager_modes
def testTensorInitializationInFunctionRaisesError(self):
error_msg = ('Tensor-typed variable initializers must either be '
'wrapped in an init_scope or callable.*')
@def_function.function
def tensor_init():
with self.assertRaisesRegex(ValueError, error_msg):
resource_variable_ops.ResourceVariable(constant_op.constant(2.0))
tensor_init()
@test_util.run_in_graph_and_eager_modes
def testCallableTensorInitializationInFunction(self):
@def_function.function
def tensor_init():
self.v = resource_variable_ops.ResourceVariable(
lambda: constant_op.constant(2.0))
return self.v.read_value()
value = tensor_init()
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertEqual(self.evaluate(value), 2.0)
@test_util.also_run_as_tf_function
def testInitScopeTensorInitializationInFunction(self):
@def_function.function
def tensor_init():
with ops.init_scope():
const = constant_op.constant(2.0)
# Note: this variable bypasses tf.function's variable creation
# requirements by bypassing variable_creator_scope by using
# ResourceVariable instead of Variable.
self.v = resource_variable_ops.ResourceVariable(const)
return self.v.read_value()
value = tensor_init()
self.assertAllEqual(value, 2.0)
@test_util.run_in_graph_and_eager_modes
def testGetConcreteFunctionCreatesVariables(self):
v_holder = []
@def_function.function
def tensor_init():
if not v_holder:
v_holder.append(variables.Variable(5.))
return v_holder[0].read_value()
concrete = tensor_init.get_concrete_function()
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(5., self.evaluate(concrete()))
self.assertAllEqual(5., self.evaluate(tensor_init()))
def testFuncGraphCaptureByValue(self):
v = variables.Variable(1.0)
def trivial_function():
return v.read_value()
graph_function = function.Function(
trivial_function, 'test', capture_by_value=True)
self.assertAllEqual(graph_function(), 1.0)
v.assign(2.0)
self.assertAllEqual(graph_function(), 1.0)
def testFuncGraphCaptureByValueNested(self):
v = variables.Variable(1.0)
def trivial_function():
return control_flow_ops.cond(
array_ops.placeholder_with_default(True, ()),
v.read_value, v.read_value)
graph_function = function.Function(
trivial_function, 'test', capture_by_value=True)
self.assertAllEqual(graph_function(), 1.0)
v.assign(2.0)
self.assertAllEqual(graph_function(), 1.0)
def testDefunShapeInferenceWithCapturedResourceVariable(self):
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = def_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.TensorShape([]))
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testShapeInferenceForMoreSpecificInput(self):
def f(a):
return array_ops.reshape(a, [-1, 3])
signature = [tensor_spec.TensorSpec(None, dtypes.float32)]
compiled = def_function.function(f, input_signature=signature)
@def_function.function
def use_f():
inputs = array_ops.zeros([10, 10, 3])
self.assertAllEqual(f(inputs).shape, compiled(inputs).shape)
use_f()
def testFuncListAttr(self):
@function.defun
def test_function(val):
def fn1():
return array_ops.ones([10])
fn2 = lambda: array_ops.ones([10]) * 2
def fn3(x=3):
return array_ops.ones([10]) * x
fn4 = functools.partial(fn3, x=4)
fn5 = functools.partial(fn3, 5)
return gen_functional_ops.case(val, [], [dtypes.float32],
[function.defun(f).get_concrete_function()
for f in (fn1, fn2, fn3, fn4, fn5)])
ones = array_ops.ones([10])
self.assertAllEqual([ones], test_function(0))
self.assertAllEqual([ones * 2], test_function(1))
self.assertAllEqual([ones * 3], test_function(2))
self.assertAllEqual([ones * 4], test_function(3))
self.assertAllEqual([ones * 5], test_function(4))
self.assertAllEqual([ones * 5], test_function(22)) # default branch
@test_util.enable_control_flow_v2
def testVariableInLoopInFunction(self):
@function.defun
def test_function():
def loop_test(_):
return False
def loop_body(_):
return variable_scope.get_variable('a', shape=())
return control_flow_ops.while_loop(loop_test, loop_body, [0.0])
self.assertEqual(test_function().shape, [])
def testDefunShapeInferenceWithCapturedResourceVariableInGraphMode(self):
with context.graph_mode():
v = resource_variable_ops.ResourceVariable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# We do not return v directly since the tensor conversion function of
# ResourceVariable returns the read value and not the resource itself.
return v._handle
compiled = def_function.function(f)
var_handle = compiled()
self.assertEqual(var_handle.dtype, dtypes.resource)
self.assertEqual(var_handle.shape, tensor_shape.TensorShape([]))
var_t = resource_variable_ops.read_variable_op(var_handle, dtype=v.dtype)
self.assertEqual(var_t.shape, tensor_shape.TensorShape([2, 2]))
def testDefunShapeInferenceWithCapturedVariableInGraphMode(self):
with context.graph_mode():
v = variables.Variable([[1, 2], [3, 4]])
def f():
x = constant_op.constant([[1, 2], [3, 4]])
out = math_ops.matmul(v, x)
self.assertEqual(out.shape, tensor_shape.TensorShape([2, 2]))
# Check that shape inference works while creating the defun
compiled = def_function.function(f)
compiled()
def testDefunShapeInferenceWithCapturedTensorListInGraphMode(self):
with context.graph_mode():
tensor_list = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=ops.convert_to_tensor([], dtype=dtypes.int32))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(1.0))
tensor_list = list_ops.tensor_list_push_back(tensor_list,
constant_op.constant(2.0))
def f():
tl, value = list_ops.tensor_list_pop_back(
tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.TensorShape([]))
return tl
compiled = def_function.function(f)
output_tensor_list = compiled()
_, value = list_ops.tensor_list_pop_back(
output_tensor_list, element_dtype=dtypes.float32)
self.assertEqual(value.shape, tensor_shape.TensorShape([]))
@test_util.run_in_graph_and_eager_modes
def testDefunForcesResourceVariables(self):
def variable_creator():
self.v = variables.Variable(0.0)
return self.v.read_value()
self.v = None
defined = function.defun(variable_creator)
defined() # Create the variable.
self.assertIsInstance(
self.v, resource_variable_ops.ResourceVariable)
def testRunMetadata(self):
@def_function.function
def f(x):
return x * x
with ops.device('cpu:0'):
context.enable_run_metadata()
f(constant_op.constant(1.0))
run_metadata = context.export_run_metadata()
context.disable_run_metadata()
self.assertLen(run_metadata.partition_graphs, 1)
def testGraphModeCaptureVariable(self):
with context.graph_mode(), self.cached_session():
class HasAVar(object):
def __init__(self):
self.v = resource_variable_ops.ResourceVariable(1.0)
def call(self):
return self.v * 2
o = HasAVar()
self.evaluate(variables.global_variables_initializer())
call = def_function.function(o.call)
op = call()
self.assertAllEqual(self.evaluate(op), 2.0)
def testGraphModeManyFunctions(self):
with ops.Graph().as_default(), self.cached_session():
@def_function.function
def f(x):
return x * x
@def_function.function
def g(x):
return f(x) + 1
self.assertAllEqual(g(constant_op.constant(2.0)), 5.0)
def testDict(self):
@def_function.function
def f(x):
return {'name': x + 1}
self.assertAllEqual(f(constant_op.constant(1.0))['name'], 2.0)
def testTensorConversionWithDefun(self):
@def_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
self.assertAllEqual(5, f(constant_op.constant(2)))
def testTensorConversionCall(self):
@def_function.function
def f(x):
return math_ops.add(x, constant_op.constant(3))
@def_function.function
def g(x):
return f(f(x))
self.assertAllEqual(8, g(constant_op.constant(2)))
def testCallShape(self):
@def_function.function
def f(x):
return x + 1
@def_function.function
def g(x):
x = f(x)
self.assertEqual(x.shape.as_list(), [])
return None
g(constant_op.constant(1.0))
def testNestedDefunWithNoOutputAndTapedInput(self):
three = resource_variable_ops.ResourceVariable(3.0, name='v')
@def_function.function
def f(x):
# This function intentionally takes a taped variable as input,
# but does not return any values
math_ops.add(x, three)
@def_function.function
def g(x):
y = math_ops.add(x, three)
f(y)
g(three)
def testGatherResourceWithDefun(self):
with ops.device('cpu:0'):
v = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
return math_ops.reduce_sum(array_ops.gather(v, [1, 2]))
defined = def_function.function(sum_gather)
self.assertAllEqual(sum_gather(), defined())
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1', ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor', sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
]) # pyformat: disable
def testReturnCompositeTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@def_function.function(input_signature=input_signature)
def f():
return input_ct
output_ct = f()
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
@parameterized.named_parameters([
('IndexedSlicesWithDenseShape',
_example_indexed_slices_with_dense_shape,),
('IndexedSlicesWithoutDenseShape',
_example_indexed_slices_without_dense_shape,),
('RaggedTensorRaggedRank1',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]}),
('RaggedTensorRaggedRank2',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]}),
('SparseTensor',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]}),
('RaggedTensorRaggedRank1WithSignature',
ragged_tensor.RaggedTensor.from_row_lengths,
{'values': [1, 2, 3], 'row_lengths': [2, 0, 1]},
[ragged_tensor.RaggedTensorSpec([None, None], dtypes.int32)]),
('RaggedTensorRaggedRank2WithSignature',
ragged_tensor.RaggedTensor.from_nested_row_lengths,
{'flat_values': [1, 2, 3], 'nested_row_lengths': [[1, 2], [2, 0, 1]]},
[ragged_tensor.RaggedTensorSpec([None, None, None], dtypes.int32)]),
('SparseTensorWithSignature',
sparse_tensor.SparseTensor,
{'values': [1, 2, 3], 'indices': [[0], [8], [10]], 'dense_shape': [20]},
[sparse_tensor.SparseTensorSpec([None], dtypes.int32)]),
]) # pyformat: disable
def testCompositeAsArgumentTensorWithDefun(self,
factory_fn,
factory_kwargs={},
input_signature=None):
input_ct = factory_fn(**factory_kwargs)
@def_function.function(input_signature=input_signature)
def f(x):
return x
output_ct = f(input_ct)
self.assertIsInstance(output_ct, type(input_ct))
nest.assert_same_structure(input_ct, output_ct, expand_composites=True)
input_flat = nest.flatten(input_ct, expand_composites=True)
output_flat = nest.flatten(output_ct, expand_composites=True)
for (input_component, output_component) in zip(input_flat, output_flat):
self.assertAllEqual(input_component, output_component)
def testTracedCompositeDiscardsShapeInfo(self):
# SparseTensorSpec intentionally excludes info about the number of elements
# that are in a sparse tensor (which is recorded as st.indices.shape[0] and
# st.values.shape[0]). Similarly, RaggedTensorSpec intentionally excludes
# info about the total number of values in a RaggedTensor (stored as
# rt.values.shape[0]). This test checks that the placeholders created by
# tf.function() properly mask this shape info.
@def_function.function
def f(rt, st):
self.assertEqual(st.indices.shape.as_list()[:1], [None])
self.assertEqual(st.values.shape.as_list(), [None])
return (rt, st)
rt = ragged_factory_ops.constant([[1, 2], [3]])
st = sparse_tensor.SparseTensor([[0]], [0], [10])
f(rt, st)
@test_util.run_gpu_only
def testFunctionOnDevice(self):
x = constant_op.constant([1.]).gpu()
f = def_function.function(math_ops.add)
y = f(x, x).cpu()
self.assertAllEqual(y, [2.])
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testFunctionWithResourcesOnDifferentDevices(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, gpu_result
defined = function.defun(sum_gather)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
self.assertAllEqual(expected, self.evaluate(defined()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testOpInFunctionWithConflictingResourceInputs(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='cpu')
v_also_cpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='also_cpu')
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name='gpu')
@def_function.function
def resource_apply_adam():
training_ops.resource_apply_adam(
v_cpu.handle,
v_gpu.handle,
v_also_cpu.handle,
1.0, # beta1_power
1.0, # beta2_power
1.0, # learning_rate
1.0, # beta1
1.0, # beta2
1.0, # epsilon,
[1.0, 1.0, 1.0], # grad
False) # use_locking
return None
with self.assertRaisesRegex(
errors.InvalidArgumentError,
'Cannot place the graph because a reference or resource edge connects '
'colocation groups with incompatible assigned devices'):
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(resource_apply_adam())
@test_util.run_gpu_only
def testFunctionHandlesInputsOnDifferentDevices(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = def_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.]).gpu()
shape = constant_op.constant([2, 1])
reshaped = reshape(value, shape).cpu()
self.assertAllEqual(reshaped, [[1], [2]])
@test_util.run_gpu_only
def testFunctionHandlesInputsPlacedOnTheWrongDeviceGracefully(self):
# The Reshape op requires the shape tensor to be placed in host memory.
reshape = def_function.function(array_ops.reshape)
value = constant_op.constant([1., 2.])
shape = constant_op.constant([2, 1]).gpu()
reshape(value, shape) # No error is raised
def testNoneOutput(self):
@def_function.function
def my_function(_):
return None
self.assertAllEqual(my_function(1), None)
def testNestedFunctions(self):
# TensorFlow function (which is what would be used in TensorFlow graph
# construction).
@tf_function.Defun(dtypes.int32, dtypes.int32)
def add(a, b):
return math_ops.add(a, b)
@def_function.function
def add_one(x):
return add(x, 1)
self.assertAllEqual(3, add_one(constant_op.constant(2)))
def testVariableCaptureInNestedFunctions(self):
v = resource_variable_ops.ResourceVariable(1, dtype=dtypes.int32)
@def_function.function
def inner_read():
return v.read_value()
@def_function.function
def outer():
return inner_read()
self.assertEqual(1, int(outer()))
def testReturnCapturedEagerTensor(self):
t = constant_op.constant(1)
@def_function.function
def read():
return t
self.assertEqual(1, int(read()))
def testReturnCapturedGraphTensor(self):
with context.graph_mode(), self.cached_session():
t = constant_op.constant(1)
@def_function.function
def read():
return t
self.assertEqual(1, int(self.evaluate(read())))
def testSequenceInputs(self):
clip_by_global_norm = def_function.function(clip_ops.clip_by_global_norm)
t_list = [constant_op.constant(1.0), constant_op.constant(2.0)]
clipped_list, global_norm = clip_by_global_norm(t_list,
constant_op.constant(.2))
for t in clipped_list:
self.assertIsInstance(t, ops.Tensor)
self.assertIsInstance(global_norm, ops.Tensor)
def testNestedSequenceInputs(self):
def my_op(inputs):
a, b, c = inputs
e, f = b
g, h = e
return [a + a, [tuple([f + f, g + g]), h + h], c + c], a + f + g + h + c
my_eager_op = def_function.function(my_op)
ret = my_eager_op([
constant_op.constant(1), [(constant_op.constant(2),
constant_op.constant(3)),
constant_op.constant(4)],
constant_op.constant(5)
])
self.assertLen(ret, 2)
self.assertAllEqual(ret[0][0], 2)
self.assertAllEqual(ret[0][1][0][0], 8)
self.assertAllEqual(ret[0][1][0][1], 4)
self.assertIsInstance(ret[0][1][0], tuple)
self.assertAllEqual(ret[0][1][1], 6)
self.assertAllEqual(ret[0][2], 10)
self.assertAllEqual(ret[1], 15)
def testVariableNamesRespectNameScopesWithDefun(self):
@def_function.function
def create_variable():
with ops.name_scope('foo', skip_on_eager=False):
v = resource_variable_ops.ResourceVariable(0.0, name='bar')
self.assertEqual(v.name, 'foo/bar:0')
create_variable()
def testVariableNamesRespectNameScopesWithDefunInGraph(self):
with context.graph_mode():
@def_function.function
def create_variable():
with ops.name_scope('foo', skip_on_eager=False):
v = resource_variable_ops.ResourceVariable([1.0, 2.0], name='bar')
self.assertEqual(v.name, 'foo/bar:0')
with ops.get_default_graph().as_default():
create_variable()
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def testLayerInDefun(self):
conv = convolutional.Conv2D(
filters=1,
kernel_size=2,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
@function.defun
def model(x):
return conv(x)
x = array_ops.ones([1, 2, 2, 1])
y = model(x)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([[[[4.0]]]], self.evaluate(y))
# Variable lifting is somewhat different between defun/tf.function, so testing
# device placement on both makes sense.
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
@test_util.run_in_graph_and_eager_modes
def testVariablesPlacedOnOutsideDevice(self, function_decorator):
class _Obj(object):
def __init__(self):
self.v = None
@function_decorator
def f(self):
if self.v is None:
self.v = variables.Variable(1.)
return self.v + 1.
has_device = _Obj()
with ops.device('cpu:0'):
has_device.f()
self.assertIn('CPU', has_device.v.device)
@test_util.run_in_graph_and_eager_modes
def testDeviceAnnotationsRespected(self):
def multi_device_fn():
with ops.device('/cpu:0'):
s0 = test_ops.device_placement_op()
with ops.device('/cpu:1'):
s1 = test_ops.device_placement_op()
with ops.device('/cpu:2'):
s2 = test_ops.device_placement_op()
s3 = test_ops.device_placement_op()
return s0, s1, s2, s3
defined = function.defun(multi_device_fn)
outputs = self.evaluate(defined())
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
with ops.device('/cpu:3'):
outputs = self.evaluate(defined())
# All function definitions are agnostic to call site devices.
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:3'), outputs[3])
with ops.device('/cpu:0'):
outputs = self.evaluate(defined())
self.assertLen(total_function_cache(defined), 1)
self.assertIn(compat.as_bytes('CPU:0'), outputs[0])
self.assertIn(compat.as_bytes('CPU:1'), outputs[1])
self.assertIn(compat.as_bytes('CPU:2'), outputs[2])
self.assertIn(compat.as_bytes('CPU:0'), outputs[3])
@test_util.run_in_graph_and_eager_modes
def testCallingGraphFunctionOnDifferentDevice(self):
def func():
return constant_op.constant(0)
defined = def_function.function(func)
with ops.device('cpu:0'):
cpu_graph_function = defined.get_concrete_function()
with ops.device('cpu:0'):
self.assertEqual(
self.evaluate(cpu_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
with ops.device(None):
self.assertEqual(0., self.evaluate(cpu_graph_function()))
default_graph_function = defined.get_concrete_function()
self.assertEqual(
self.evaluate(default_graph_function()), self.evaluate(func()))
with ops.device('cpu:1'):
self.assertEqual(0., self.evaluate(default_graph_function()))
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testColocateWithRespected(self):
# TODO(b/113291792): Use multiple CPUs instead of a GPU.
with ops.device('cpu:0'):
x = array_ops.identity(1.0)
with ops.device('gpu:0'):
y = array_ops.identity(1.0)
@def_function.function
def foo():
return test_ops.device_placement_op()
with ops.colocate_with(x):
self.assertIn(compat.as_bytes('CPU:0'), self.evaluate(foo()))
with ops.colocate_with(y):
self.assertIn(compat.as_bytes('GPU:0'), self.evaluate(foo()))
def testVariablesAreTracked(self):
v = resource_variable_ops.ResourceVariable(1.0)
def foo(x):
return v * x
defined = def_function.function(foo)
x = constant_op.constant([1.0])
self.assertEqual(1., self.evaluate(defined(x)))
v.assign(2.)
x = constant_op.constant([1.0, 2.0])
self.assertAllEqual([2., 4.], self.evaluate(defined(x)))
def testCacheObjectHashCollisions(self):
class Foo(object):
def __hash__(self):
return 42
def func(foo):
del foo
return
defined = function.defun(func)
defined(Foo())
self.assertLen(total_function_cache(defined), 1)
defined(Foo())
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorDtypeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([[1.0]], dtype=dtypes.complex128)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorShapeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([1.0], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorShapeDtypeCollision(self):
def func(t):
return t + t
defined = function.defun(func)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(total_function_cache(defined), 1)
t = constant_op.constant([1.0], dtype=dtypes.complex128)
defined(t)
self.assertLen(total_function_cache(defined), 2)
def testCacheTensorUnknownShapesCollisionRelaxedShapes(self):
def func(t):
return t + t
with context.graph_mode(), self.cached_session():
defined = function.defun(func, experimental_relax_shapes=True)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[])
defined(p)
self.assertLen(total_function_cache(defined), 1)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
defined(p)
self.assertLen(total_function_cache(defined), 2)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[2])
defined(p)
# Gradual shape relaxation is performed; and the common shape between
# [1] and [2] is one containing unknown dimensions.
self.assertLen(total_function_cache(defined), 2)
# pylint: disable=protected-access
self.assertLen(defined._function_cache.arg_relaxed_specs, 1)
relaxed_specs = (
list(defined._function_cache.arg_relaxed_specs.values())[0])
self.assertLen(relaxed_specs, 1)
relaxed_shape = relaxed_specs[0].shape
# pylint: enable=protected-access
self.assertEqual(relaxed_shape.rank, 1)
self.assertEqual(tensor_shape.dimension_value(relaxed_shape[0]), None)
t = constant_op.constant([1.0, 1.0, 1.0], dtype=dtypes.float32)
defined(t)
# Shape (3,) matches the relaxed shape TensorShape([None])
self.assertLen(total_function_cache(defined), 2)
def testPythonFunctionWithDefaultArgs(self):
def func(foo, bar=1, baz=2):
del foo
del bar
del baz
return
defined = function.defun(func)
defined(0, baz=20)
def cache_keys():
"""Sanitizes cache keys of non-input metadata."""
return tuple(key[0] for key in total_function_cache(defined))
# `True` corresponds to the fact that we're executing eagerly
self.assertIn(('URRRu', (0, 1, 20)), cache_keys())
defined(1) # bar=1, baz=2
self.assertIn(('URRRu', (1, 1, 2)), cache_keys())
# This matches the previous call.
defined(foo=1)
self.assertLen(total_function_cache(defined), 2)
defined(1, 2, 3)
self.assertLen(total_function_cache(defined), 3)
self.assertIn(('URRRu', (1, 2, 3)), cache_keys())
# This matches the previous call.
defined(1, bar=2, baz=3)
self.assertLen(total_function_cache(defined), 3)
# This matches the previous call.
defined(1, baz=3, bar=2)
self.assertLen(total_function_cache(defined), 3)
def testFunctoolsPartialUnwrappedCorrectly(self):
def full_function(a, b, c=3):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2)
defined = function.defun(partial)
func_a, func_b, func_c = defined(2)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureWithMatchingInputs(self):
def foo(a):
self.assertEqual(a.shape, (2,))
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([2])
self.assertAllEqual(a, defined(a))
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(a, defined.get_concrete_function()(a))
self.assertAllEqual(a, defined.get_concrete_function(a)(a))
self.assertAllEqual(a, defined.get_concrete_function(
tensor_spec.TensorSpec((2,), dtype=dtypes.float32))(a))
self.assertLen(total_function_cache(defined), 1)
def bar(a):
self.assertEqual(a._shape_tuple(), (2, None))
return a
signature = [tensor_spec.TensorSpec((2, None), dtypes.float32)]
defined = function.defun(bar, input_signature=signature)
a = array_ops.ones([2, 1])
out = defined(a)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out, a)
# Changing the second dimension shouldn't create a new function.
b = array_ops.ones([2, 3])
out = defined(b)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out, b)
def testInputSignatureWithCompatibleInputs(self):
rank2_spec = tensor_spec.TensorSpec(shape=(None, None),
dtype=dtypes.float32)
@function.defun(input_signature=[rank2_spec])
def func(a):
self.assertEqual([None, None], a.shape.as_list())
return array_ops.shape(a)
self.assertAllEqual([3, 1], func([[0], [1.0], [1]]))
self.assertAllEqual([2, 2], func(numpy.array([[1, 1], [2, 2]])))
with self.assertRaisesRegex(ValueError, 'incompatible'):
func([0.0, 1.0, 2.0]) # Wrong shape.
with self.assertRaisesRegex(ValueError, 'incompatible'):
func([['wrong dtype']])
def testNoKeywordOnlyArgumentsWithInputSignature(self):
if sys.version_info[0] < 3:
self.skipTest('keyword_only arguments only exist in Python 3.')
func = eval('lambda x, *, y: x') # pylint: disable=eval-used
signature = [tensor_spec.TensorSpec(None, dtypes.int32)]
with self.assertRaisesRegex(
ValueError, 'Cannot define a TensorFlow function from a Python '
'function with keyword-only arguments when input_signature is '
'provided.'):
def_function.function(func, signature)
def testNestedInputSignatures(self):
def expected_foo(a, b):
return [a, b]
@function.defun(input_signature=[
[tensor_spec.TensorSpec((2, None), dtypes.float32)] * 2,
tensor_spec.TensorSpec((1,), dtypes.float32),
])
def foo(a, b):
self.assertEqual(a[0]._shape_tuple(), (2, None))
self.assertEqual(a[1]._shape_tuple(), (2, None))
self.assertEqual(b._shape_tuple(), (1,))
return [a, b]
a = array_ops.ones([2, 1])
b = array_ops.ones([1])
expected = expected_foo([a, a], b)
out = foo([a, a], b)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], a)
self.assertAllEqual(out[1], b)
# Changing the unspecified dimensions shouldn't create a new function.
a = array_ops.ones([2, 3])
b = array_ops.ones([2, 5])
c = array_ops.ones([1])
expected = expected_foo([a, b], c)
out = foo([a, b], c)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
c = c.numpy().tolist()
out = foo([a, b], c)
self.assertLen(total_function_cache(foo), 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
def testNestedInputSignaturesWithDict(self):
def expected_bar(a):
return a
@function.defun(input_signature=[{
'a': tensor_spec.TensorSpec((2, None), dtypes.float32),
'b': tensor_spec.TensorSpec((2, None), dtypes.float32),
'c': tensor_spec.TensorSpec((1,), dtypes.float32)}])
def bar(a):
self.assertEqual(a['a']._shape_tuple(), (2, None))
self.assertEqual(a['b']._shape_tuple(), (2, None))
self.assertEqual(a['c']._shape_tuple(), (1,))
return a
a = array_ops.ones([2, 3])
b = array_ops.ones([1])
inputs = {'a': a, 'b': a, 'c': b}
expected = expected_bar(inputs)
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
inputs = {'a': a, 'b': a, 'c': b}
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
def testInputSignatureMustBeSequenceOfTensorSpecs(self):
def foo(a, b):
del a
del b
# Signatures must consist exclusively of `TensorSpec` objects.
signature = [(2, 3), tensor_spec.TensorSpec([2, 3], dtypes.float32)]
with self.assertRaisesRegex(TypeError, 'Invalid input_signature.*'):
def_function.function(foo, input_signature=signature)
# Signatures must be either lists or tuples on their outermost levels.
signature = {'t1': tensor_spec.TensorSpec([], dtypes.float32)}
with self.assertRaisesRegex(
TypeError, 'input_signature must be either a '
'tuple or a list.*'):
function.defun(foo, input_signature=signature)
@test_util.run_in_graph_and_eager_modes
def testInputsIncompatibleWithSignatureRaisesError(self):
def foo(a):
return a
signature = [tensor_spec.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = def_function.function(foo, input_signature=signature)
# Invalid shapes.
with self.assertRaisesRegex(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([3]))
with self.assertRaisesRegex(ValueError, 'Python inputs incompatible.*'):
defined(array_ops.ones([2, 1]))
# Wrong number of arguments.
with self.assertRaisesRegex(
TypeError, r'takes 1 positional arguments \(as specified by the '
r'input_signature\) but 2 were given'):
defined(array_ops.ones([2]), array_ops.ones([2]))
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined()
with self.assertRaisesRegex(ValueError,
'inputs incompatible with input_signature'):
defined.get_concrete_function(
tensor_spec.TensorSpec(shape=(3,), dtype=dtypes.float32))
def testInputsIncompatibleWithNestedSignatureRaisesError(self):
def foo(a, b):
return [a, b]
signature = [[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2,
[tensor_spec.TensorSpec((1,), dtypes.float32)] * 2]
defined = function.defun(foo, input_signature=signature)
a = array_ops.ones([1])
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined([a, a, a], [a])
with self.assertRaisesRegex(ValueError,
'Structure of Python function inputs.*'):
defined([a], [a, a, a])
defined([a, a], [a, a])
def testUnderspecifiedInputSignature(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
])
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
x = constant_op.constant(1.0)
with self.assertRaisesRegex(
TypeError, 'got keyword argument `training` '
'that was not included in input_signature'):
foo(x, training=True)
with self.assertRaisesRegex(
TypeError, 'got keyword argument `training` '
'that was not included in input_signature'):
foo(x, training=False)
self.assertAllEqual(x.numpy(), foo(x).numpy())
def testInputSignatureWithPartialFunction(self):
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2.0)
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
defined = function.defun(partial, input_signature=signature)
x = constant_op.constant(2.0)
func_a, func_b, func_c = defined(x)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureConversionWithDefaultArg(self):
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
signature = [
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.bool),
]
defined = def_function.function(foo, input_signature=signature)
a = constant_op.constant(1.0)
self.assertAllEqual(a.numpy(), defined(a))
self.assertAllEqual(a.numpy(), defined(a, training=True))
self.assertAllEqual(-a.numpy(), defined(a, training=False))
def testInputSignatureWithKeywordPositionalArgs(self):
@function.defun(input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int64)
])
def foo(flt, integer):
return flt, integer
flt = constant_op.constant(1.0)
integer = constant_op.constant(2, dtypes.int64)
out1, out2 = foo(flt, integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt=flt, integer=integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(integer=integer, flt=flt)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt, integer=integer)
self.assertLen(total_function_cache(foo), 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
def testInputSignatureWithKeywordArgs(self):
def foo(a, b, **kwargs):
del kwargs
return a, b
x = function.defun(
foo,
input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int32)
]).get_concrete_function()
result = x(constant_op.constant(5.0), constant_op.constant(5))
self.assertAllEqual(result, [5.0, 5])
def testInputSignatureWithCompositeTensors(self):
def f(rt):
self.assertEqual(rt.values.shape.as_list(), [None])
self.assertEqual(rt.row_splits.shape.as_list(), [4])
return rt
signature = [ragged_tensor.RaggedTensorSpec(
shape=[3, None], dtype=dtypes.int32)]
defined = function.defun(f, input_signature=signature)
rt1 = ragged_factory_ops.constant([[1], [], [2, 3, 4]])
out1 = defined(rt1)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out1.values, rt1.values)
self.assertAllEqual(out1.row_splits, rt1.row_splits)
# Changing the row lengths shouldn't create a new function.
rt2 = ragged_factory_ops.constant([[1, 2], [3, 4], [5]])
out2 = defined(rt2)
self.assertLen(total_function_cache(defined), 1)
self.assertAllEqual(out2.values, rt2.values)
self.assertAllEqual(out2.row_splits, rt2.row_splits)
# Different number of rows
rt3 = ragged_factory_ops.constant([[1, 2], [3, 4], [5], [6]])
with self.assertRaisesRegex(ValueError, 'incompatible'):
defined(rt3)
# Different dtype
rt4 = ragged_factory_ops.constant([[1.0, 2.0], [], [3.0]])
with self.assertRaisesRegex(ValueError, 'Structure .* does not match'):
defined(rt4)
# Different rank
rt5 = ragged_factory_ops.constant([[[1]], [[2]], [[3]]])
with self.assertRaisesRegex(ValueError, 'does not match'):
defined(rt5)
def testInputSignatureWithVariableArgs(self):
def f(v):
v.assign_add(1)
signature = [
resource_variable_ops.VariableSpec(shape=[], dtype=dtypes.int32)
]
defined = function.defun(f, input_signature=signature)
v1 = variables.Variable(0)
v2 = variables.Variable(0)
defined(v1)
self.assertEqual(v1.numpy(), 1)
self.assertEqual(v2.numpy(), 0)
defined(v=v2)
self.assertEqual(v1.numpy(), 1)
self.assertEqual(v2.numpy(), 1)
def testTensorKeywordArguments(self):
def foo(a, b):
del a
return b
defined = function.defun(foo)
a = constant_op.constant(2.0)
b = constant_op.constant([1.0, 2.0])
one = defined(a, b)
self.assertLen(total_function_cache(defined), 1)
two = defined(a=a, b=b)
self.assertLen(total_function_cache(defined), 1)
three = defined(b=b, a=a)
self.assertLen(total_function_cache(defined), 1)
four = defined(a, b=b)
self.assertLen(total_function_cache(defined), 1)
# The next call corresponds to a new input signature, hence
# we expect another function to be defined.
five = defined(b, a)
self.assertLen(total_function_cache(defined), 2)
six = defined(a=b, b=a)
self.assertLen(total_function_cache(defined), 2)
seven = defined(b=a, a=b)
self.assertLen(total_function_cache(defined), 2)
self.assertAllEqual(one, [1.0, 2.0])
self.assertAllEqual(two, [1.0, 2.0])
self.assertAllEqual(three, [1.0, 2.0])
self.assertAllEqual(four, [1.0, 2.0])
self.assertAllEqual(five, 2.0)
self.assertAllEqual(six, 2.0)
self.assertAllEqual(seven, 2.0)
def testDefuningInstanceMethod(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
def one(self, tensor):
return tensor
@def_function.function
def two(self, tensor, other=integer):
return self.one(tensor), other
foo = Foo()
t = constant_op.constant(1.0)
one, two = foo.two(t)
self.assertEqual(one.numpy(), 1.0)
self.assertEqual(two.numpy(), 2)
def testDefuningInstanceMethodWithDefaultArgument(self):
integer = constant_op.constant(2, dtypes.int64)
class Foo(object):
@def_function.function
def func(self, other=integer):
return other
foo = Foo()
self.assertEqual(foo.func().numpy(), int(integer))
def testPythonCallWithSideEffects(self):
state = []
@def_function.function
def side_effecting_function():
state.append(0)
side_effecting_function()
self.assertAllEqual(state, [0])
# The second invocation should call the graph function, which shouldn't
# trigger the list append.
side_effecting_function()
self.assertAllEqual(state, [0])
# Whereas calling the python function directly should create a side-effect.
side_effecting_function.python_function()
self.assertAllEqual(state, [0, 0])
def testFunctionWithNestedFunctionCallAndSideEffects(self):
v1 = variables.Variable(1.0)
v2 = variables.Variable(1.0)
@def_function.function
def add_one(a):
a.assign_add(1.0)
# Grappler will inline calls to `add_one` into the function body, we check
# that all side-effects were executed.
@def_function.function
def side_effecting_function(a, b):
add_one(a)
add_one(b)
return a + b
result = side_effecting_function(v1, v2)
self.assertEqual(result.numpy(), 4.0)
def testFunctionWithExtraAttributes(self):
@function.defun_with_attributes(attributes={'experimental_1': 'value1',
'experimental_2': 2})
def matmul(x, y):
return math_ops.matmul(x, y)
def add(x, y):
return math_ops.add(x, y)
defun_add = function.defun_with_attributes(
add, attributes={'experimental_3': True, 'experimental_4': 1.0})
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t)
double = defun_add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 2)
functions = list(graph._functions.values())
self.assertRegex(functions[0].definition.signature.name, '.*matmul.*')
attrs = functions[0].definition.attr
self.assertLen(attrs, 2)
self.assertEqual(attrs['experimental_1'].s, b'value1')
self.assertEqual(attrs['experimental_2'].i, 2)
self.assertRegex(functions[1].definition.signature.name, '.*add.*')
attrs = functions[1].definition.attr
self.assertLen(attrs, 2)
self.assertEqual(attrs['experimental_3'].b, True)
self.assertEqual(attrs['experimental_4'].f, 1.0)
# pylint: enable=protected-access
def testFunctionWithInvalidAttribute(self):
@function.defun_with_attributes(attributes={'experimental_1': ['value1']})
def add(x, y):
return math_ops.add(x, y)
with self.assertRaisesRegex(ValueError, '.*Unsupported attribute type.*'):
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
add(t, t)
def testRegisterFunction(self):
@function.defun
def add(x, y):
return math_ops.add(x, y)
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
function.register(add, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*matmul.*',
'.*forward.*matmul.*',
'.*inference.*backward.*matmul.*',
'.*inference.*add.*',
'.*forward.*add.*',
'.*inference.*backward.*add.*',
]
for i in range(len(functions)):
self.assertRegex(captured_function_names[i],
expected_func_name_regex[i])
# Check the forward and backward function has the correct attributes.
self.assertEqual(
functions[1].definition.attr['backward_function_name'].s,
functions[2].name)
self.assertEqual(
functions[2].definition.attr['forward_function_name'].s,
functions[1].name)
self.assertEqual(
functions[4].definition.attr['backward_function_name'].s,
functions[5].name)
self.assertEqual(
functions[5].definition.attr['forward_function_name'].s,
functions[4].name)
sq = defun_matmul(t, t)
double = add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
functions = list(graph._functions.values())
for i in range(len(functions)):
self.assertEqual(captured_function_names[i],
functions[i].definition.signature.name)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testRegisterConcreteFunction(self, function_decorator):
@function_decorator
def py_add(x, y):
return math_ops.add(x, y)
py_add(array_ops.ones([]), array_ops.ones([]))
add = py_add.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
@function_decorator
def py_composite(x, y):
return x, add(x, y)
py_composite(array_ops.ones([]), array_ops.ones([]))
composite = py_composite.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
composite.add_to_graph()
composite.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*py_composite.*',
'.*inference.*py_add.*',
'.*forward.*py_composite.*',
'.*forward.*py_add.*',
'.*inference.*backward.*py_composite.*',
'.*inference.*backward.*py_add.*',
]
for expected, found in zip(
expected_func_name_regex,
captured_function_names):
self.assertRegex(found, expected)
composite_t, composite_double = composite(t, t)
double = add(t, t)
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(double))
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(composite_double))
self.assertAllEqual([[1, 2], [3, 4]], self.evaluate(composite_t))
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
@parameterized.named_parameters(
dict(testcase_name='Defun',
function_decorator=function.defun),
dict(testcase_name='DefFunction',
function_decorator=def_function.function))
def testEagerCaptures(self, function_decorator):
with context.eager_mode():
large_tensor = array_ops.ones(shape=(256,))
self.assertGreater(256, func_graph._EAGER_CONST_THRESHOLD)
small_tensor = array_ops.ones(shape=(4,))
self.assertLessEqual(4, func_graph._EAGER_CONST_THRESHOLD)
v = resource_variable_ops.ResourceVariable(0.0)
for captured, op_type in [(large_tensor, 'Placeholder'),
(small_tensor, 'Const'), (v, 'Placeholder')]:
@function_decorator
def test_fn():
return captured + 1 # pylint: disable=cell-var-from-loop
g = test_fn.get_concrete_function().graph
internal_captures = g.internal_captures
self.assertLen(internal_captures, 1)
self.assertEqual(internal_captures[0].op.type, op_type)
def testRegisterFunctionWithInputSignature(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(
matmul,
input_signature=[
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(2, 2), dtype=dtypes.float32)
])
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
function.register(defun_matmul, t, t)
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
# Test register function with cache, note inputs are ignored.
function.register(defun_matmul)
graph = ops.get_default_graph()
self.assertLen(graph._functions, 3)
def testRegisterFunctionWithCache(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = function.defun(matmul)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]])
function.register(defun_matmul, t, t)
function.register(defun_matmul, t2, t2)
graph = ops.get_default_graph()
# Only one function is registered since the input param are in same type
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
def testCallingFunctionWithDifferentVariables(self):
@function.defun
def foo(v):
v.assign_add(1.0)
return v.read_value()
v = resource_variable_ops.ResourceVariable(0.0)
graph_function = foo.get_concrete_function(v)
self.assertLen(graph_function.inputs, 1)
self.assertEmpty(graph_function.captured_inputs)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(v)), 2.0)
w = resource_variable_ops.ResourceVariable(0.0)
@function.defun
def bar(v):
del v
return constant_op.constant(1.0)
graph_function = bar.get_concrete_function(v)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(w)), 1.0)
def testCallingFunctionWithNonTensorsFails(self):
@function.defun
def foo(x):
return x
graph_function = foo.get_concrete_function(constant_op.constant(1.0))
with self.assertRaises((TypeError, ValueError)):
graph_function('Not a Tensor.')
def testSwapImplementationWithGrapplerPlugin(self):
# Set the min_graph_nodes to -1 since the graph in this test is too small,
# and will be ignored by grappler if don't set this.
rewrites = rewriter_config_pb2.RewriterConfig()
rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
rewrites.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrites, build_cost_model=1)
config_proto = config_pb2.ConfigProto(graph_options=graph_options)
with context.graph_mode(), self.cached_session(
config=config_proto, graph=ops.Graph(), use_gpu=True):
@function.defun_with_attributes(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'CPU'
})
def cpu_boost(x):
return math_ops.add(x, 2.0)
@function.defun_with_attributes(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'GPU'
})
def gpu_boost(x):
return math_ops.add(x, 4.0)
x = constant_op.constant(1.0)
function.register(cpu_boost, x)
y = gpu_boost(x)
y_value = self.evaluate(y)
if test.is_gpu_available():
self.assertEqual(y_value, 5.0)
else:
# Grappler fallback to use the CPU impl even called with GPU function.
self.assertEqual(y_value, 3.0)
def testSwapImplementationInEager(self):
if not context.executing_eagerly():
self.skipTest('eager only')
# testSharedRendezvous sets the disable_meta_optimizer flag to True
# if that subtest runs before this one, then having that set to True
# will cause this subtest to fail. To avoid that scenario, explicitly
# set the disable_meta_optimizer flag to false here
context.context().set_optimizer_experimental_options({
'min_graph_nodes': -1,
'implementation_selector': True,
'disable_meta_optimizer': False
})
@function.defun_with_attributes(
attributes={'api_implements': 'foo',
'api_preferred_device': 'CPU'})
def on_cpu(x):
return x + 2
@function.defun_with_attributes(
attributes={'api_implements': 'foo',
'api_preferred_device': 'GPU'})
def on_gpu(x):
return x + 4
@function.defun
def run_on_cpu(t):
function.register(on_cpu, t)
with ops.device('CPU:0'):
return on_gpu(t)
# Expect to run the on_cpu branch, regardless whether gpu is available.
self.assertEqual(run_on_cpu(constant_op.constant(1)).numpy(), 3)
def testDefunFunctionSeparateGraphs(self):
with context.graph_mode():
@function.defun
def add(x):
return x + 5
@function.defun
def maybe_add(x, should_add):
if should_add:
return add(x)
else:
return x
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(total_function_cache(maybe_add), 1)
self.assertLen(total_function_cache(add), 1)
maybe_add(x, False)
self.assertLen(total_function_cache(maybe_add), 2)
self.assertLen(total_function_cache(add), 1)
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(total_function_cache(maybe_add), 3)
self.assertLen(total_function_cache(add), 2)
def testCacheKeyOverlappingShapes(self):
@function.defun
def defined(t):
return t
defined(array_ops.zeros([12, 1]))
self.assertLen(total_function_cache(defined), 1)
defined(array_ops.zeros([1, 21]))
self.assertLen(total_function_cache(defined), 2)
def testCacheKeyNestedLists(self):
@function.defun
def defined(l):
return l
a = constant_op.constant(1.)
b = constant_op.constant(2.)
c = constant_op.constant(3.)
defined([[a], b, c])
self.assertLen(total_function_cache(defined), 1)
defined([[a, b], c])
self.assertLen(total_function_cache(defined), 2)
def testCacheKeyAttrsClass(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class TestClass(object):
a = attr.ib()
b = attr.ib()
@function.defun
def defined(l):
return l
defined(
TestClass(
constant_op.constant(1.),
[constant_op.constant(2.),
constant_op.constant(3.)]))
self.assertLen(total_function_cache(defined), 1)
defined(
TestClass(
constant_op.constant(1.),
[constant_op.constant(2.),
constant_op.constant(3.)]))
self.assertLen(total_function_cache(defined), 1)
defined(
TestClass([constant_op.constant(1.),
constant_op.constant(2.)], constant_op.constant(3.)))
self.assertLen(total_function_cache(defined), 2)
def testCacheKeyVariables(self):
@function.defun
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
# If tensor equality is not enabled, we always get a cache miss if the
# function is called with different variables. With equality enabled we
# should only get a miss if the aliasing changed.
defined(x, y, z)
self.assertLen(total_function_cache(defined), 1)
defined(x, y, z)
self.assertLen(total_function_cache(defined), 1)
# Re-arranging arguments causes cache miss
defined(z, y, x)
self.assertLen(total_function_cache(defined), 2)
defined(z, y, x)
self.assertLen(total_function_cache(defined), 2)
# Aliasing causes cache miss
defined(x, x, z)
self.assertLen(total_function_cache(defined), 3)
defined(x, x, z)
self.assertLen(total_function_cache(defined), 3)
# Re-arranging arguments causes cache miss
defined(y, y, z)
self.assertLen(total_function_cache(defined), 4)
defined(y, y, z)
self.assertLen(total_function_cache(defined), 4)
# Different alias positions causes cache miss
defined(z, y, y)
self.assertLen(total_function_cache(defined), 5)
defined(z, y, y)
self.assertLen(total_function_cache(defined), 5)
x_copy = copy.deepcopy(x)
# Deep copy causes cache miss
defined(x_copy, y, z)
self.assertLen(total_function_cache(defined), 6)
defined(x_copy, y, z)
self.assertLen(total_function_cache(defined), 6)
def testVariableRetracing(self):
v1 = variables.Variable(1.)
v2 = variables.Variable(1.)
v3 = copy.deepcopy(variables.Variable(1.))
var_dict = {id(v1): constant_op.constant(1),
id(v2): constant_op.constant(2),
id(v3): constant_op.constant(3)}
@function.defun
def lookup_tensor(v):
return var_dict[id(v)]
self.assertEqual(1, lookup_tensor(v1).numpy())
self.assertEqual(2, lookup_tensor(v2).numpy())
self.assertEqual(3, lookup_tensor(v3).numpy())
def testDecoratedMethodInspect(self):
class DefunnedMiniModel(object):
@function.defun
def call(self, inputs, training=True):
pass
m = DefunnedMiniModel()
fullargspec = tf_inspect.getfullargspec(m.call)
self.assertIn('training', fullargspec.args)
def testFunctionModifiesInputList(self):
# Tests on `list` methods that do in place modification, except `list.sort`
# since it cannot even be "defunned" in the first place
def get_list():
return [constant_op.constant(0.), constant_op.constant(1.)]
expected_msg = '.*() should not modify'
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def append(l):
l.append(constant_op.constant(0.))
append(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def extend(l):
l.extend([constant_op.constant(0.)])
extend(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def insert(l):
l.insert(0, constant_op.constant(0.))
insert(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def pop(l):
l.pop()
pop(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def reverse(l):
l.reverse()
reverse(get_list())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def remove(l):
l.remove(l[0])
remove(get_list())
# `list.clear` is a method that is in Py3 but not Py2
if sys.version.startswith('3'):
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def clear(l):
l.clear()
clear(get_list())
# One last test for keyword arguments
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def kwdappend(**kwargs):
l = kwargs['l']
l.append(constant_op.constant(0.))
kwdappend(l=get_list())
def testFunctionModifiesInputDict(self):
def get_dict():
return {'t1': constant_op.constant(0.), 't2': constant_op.constant(1.)}
expected_msg = '.* should not modify'
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def clear(m):
m.clear()
clear(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def pop(m):
m.pop('t1')
pop(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def popitem(m):
m.popitem()
popitem(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def update(m):
m.update({'t1': constant_op.constant(3.)})
update(get_dict())
with self.assertRaisesRegex(ValueError, expected_msg):
@def_function.function
def setdefault(m):
m.setdefault('t3', constant_op.constant(3.))
setdefault(get_dict())
def testFunctionModifiesInputNest(self):
with self.assertRaisesRegex(ValueError, 'modify.* should not modify'):
@def_function.function
def modify(n):
n[0]['t1'].append(constant_op.constant(1.))
nested_input = [{
't1': [constant_op.constant(0.),
constant_op.constant(1.)],
},
constant_op.constant(2.)]
modify(nested_input)
with self.assertRaisesRegex(ValueError,
'modify_same_flat.* should not modify'):
# The flat list doesn't change whereas the true structure changes
@def_function.function
def modify_same_flat(n):
n[0].append(n[1].pop(0))
nested_input = [[constant_op.constant(0.)],
[constant_op.constant(1.),
constant_op.constant(2.)]]
modify_same_flat(nested_input)
def testExecutorType(self):
@function.defun
def add_five(x):
return x + 5
self.assertEqual(
5,
add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
with self.assertRaisesRegex(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'):
with context.function_executor_type('NON_EXISTENT_EXECUTOR'):
add_five(constant_op.constant(0, dtype=dtypes.int32))
for executor_type in ('', 'DEFAULT', None):
with context.function_executor_type(executor_type):
self.assertAllEqual(
5,
add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy())
@test_util.assert_no_garbage_created
def testReferenceCycles(self):
fn = function.defun(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
def testFunctionStackInErrorMessage(self):
if context.executing_eagerly():
# TODO(b/122736651): Remove this skipTest once fixed.
self.skipTest('Error interpolation is not working when function is '
'invoked without PartitionedCallOp.')
@def_function.function()
def fn3(x):
return x + 2
@def_function.function()
def fn2(x):
check_ops.assert_equal(fn3(x), 3)
return 2
@def_function.function()
def fn(x):
return fn2(x)
with self.assertRaises(errors.InvalidArgumentError) as cm:
fn(2)
e = cm.exception
self.assertIn('fn -> fn2', e.message)
self.assertIn('node assert_equal/Assert/Assert (defined at', e.message)
self.assertNotIn('fn3', e.message)
@test_util.run_gpu_only
def testFunctionIsNotPinned(self):
"""Tests that functions aren't pinned to the CPU by the eager runtime."""
seed1, seed2 = 79, 25
shape = constant_op.constant([4, 7])
dtype = dtypes.float32
@def_function.function
def func():
with ops.device('GPU:0'):
return gen_random_ops.random_standard_normal(
shape, dtype=dtype, seed=seed1, seed2=seed2)
with ops.device('GPU:0'):
x = func()
self.assertRegex(x.device, 'GPU')
@test_util.run_in_graph_and_eager_modes
def testShapeCaching(self):
@function.defun
def func(x):
return array_ops.shape(x)
@function.defun(
input_signature=[tensor_spec.TensorSpec([None, None], dtypes.float32)])
def calls_func(x):
return func(x)
self.assertAllEqual([1, 1], self.evaluate(func(array_ops.zeros([1, 1]))))
self.assertAllEqual([2, 2], self.evaluate(func(array_ops.zeros([2, 2]))))
self.assertAllEqual(
[3, 3],
self.evaluate(calls_func(array_ops.zeros([3, 3]))))
def testLimitedRetracing(self):
trace_count = [0]
@function.defun
def func(x):
trace_count[0] += 1
return x
for _ in range(50):
func(constant_op.constant(3.))
func(constant_op.constant(4.))
func(constant_op.constant([[1., 2.]]))
func(constant_op.constant([[]]))
func(constant_op.constant([[3., 4.], [5., 6.]]))
func(constant_op.constant([[3., 4.], [5., 6.], [7., 8.]]))
# Tracing more than twice per input doesn't make sense.
self.assertLess(trace_count[0], 13)
def testLimitedRetracingWithCompositeTensors(self):
trace_count = [0]
@def_function.function
def f(x):
trace_count[0] += 1
return x
for i in range(10):
f(ragged_factory_ops.constant([[1, 2], [i]]))
f(ragged_factory_ops.constant([[1, 2], [], [3, 4, 5]]))
f(ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]]))
self.assertEqual(trace_count[0], 3)
def test_concrete_function_shape_mismatch(self):
@def_function.function
def f(argument_name):
return argument_name + 1.
f_concrete = f.get_concrete_function(constant_op.constant([1.]))
# Calling a function from eager doesn't do any shape checking above what
# kernels do while executing.
self.assertAllEqual(
[2., 3.],
f_concrete(constant_op.constant([1., 2.])).numpy())
@def_function.function
def g():
f_concrete(constant_op.constant([1., 2.]))
with self.assertRaisesRegex(ValueError, 'argument_name'):
g()
@test_util.run_in_graph_and_eager_modes
def test_shape_inference_with_symbolic_shapes(self):
@def_function.function
def _uses_symbolic_shapes(w, x, y):
x = array_ops.identity(x, name='name_collision')
x = array_ops.transpose(x, [1, 0, 2])
x_batch = array_ops.shape(x)[0]
y_batch = array_ops.shape(y)[0]
y *= w
n = y_batch // x_batch
return array_ops.reshape(y, [n, x_batch, -1])
conc = _uses_symbolic_shapes.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32))
@def_function.function
def _call_concrete():
c = constant_op.constant(1.)
array_ops.identity(c, name='name_collision')
output1 = conc(array_ops.ones([2]),
array_ops.ones([5, 4, 2]),
array_ops.ones([20, 2]))
self.assertEqual([5, 4, 2], output1.shape)
output2 = conc(array_ops.ones([3]),
array_ops.ones([5, 4, 3]),
array_ops.ones([40, 3]))
self.assertEqual([10, 4, 3], output2.shape)
return output1, output2
output1, output2 = _call_concrete()
self.assertEqual((5, 4, 2), self.evaluate(output1).shape)
self.assertEqual((10, 4, 3), self.evaluate(output2).shape)
def testAutoGraphContext(self):
@def_function.function
def test_fn():
self.assertEqual(
ag_ctx.control_status_ctx().status, ag_ctx.Status.ENABLED)
prev_status = ag_ctx.control_status_ctx().status
test_fn()
self.assertEqual(ag_ctx.control_status_ctx().status, prev_status)
@test_util.disable_tfrt('b/170435618')
def testCancelBeforeFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
c_mgr.start_cancel()
with self.assertRaises(errors.CancelledError):
cancelable_func()
@test_util.disable_tfrt('b/170435618')
def testCancelBlockedFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
def cancel_thread():
time.sleep(0.5)
c_mgr.start_cancel()
t = self.checkedThread(cancel_thread)
t.start()
with self.assertRaises(errors.CancelledError):
cancelable_func()
t.join()
@test_util.disable_tfrt('b/170435618')
def testCancelAfterFunctionExecution(self):
if not context.executing_eagerly():
self.skipTest('eager only')
q = data_flow_ops.FIFOQueue(1, dtypes.int32)
q.enqueue(37)
@def_function.function
def f():
return q.dequeue()
c_mgr = cancellation.CancellationManager()
cancelable_func = c_mgr.get_cancelable_function(f.get_concrete_function())
self.assertAllEqual(37, cancelable_func().numpy())
# Cancellation after the function executes is a no-op.
c_mgr.start_cancel()
def testAddFunctionCallback(self):
functions = []
def function_callback(f):
functions.append(f)
@def_function.function
def plus_one(x):
return x + 1
try:
function.add_function_callback(function_callback)
x_float32 = numpy.array(3.0, dtype=numpy.float32)
self.assertAllClose(plus_one(x_float32), 4.0)
self.assertLen(functions, 1)
# Function is already created. Executing it again should not invoke the
# function callback.
self.assertAllClose(plus_one(x_float32), 4.0)
self.assertLen(functions, 1)
# Signature change leads to a new Function being built.
x_float64 = numpy.array(3.0, dtype=numpy.float64)
self.assertAllClose(plus_one(x_float64), 4.0)
self.assertLen(functions, 2)
finally:
function.clear_function_callbacks()
def testRemoveFunctionCallback(self):
functions_1 = []
def function_callback_1(f):
functions_1.append(f)
functions_2 = []
def function_callback_2(f):
functions_2.append(f)
@def_function.function
def plus_one(x):
return x + 1
try:
function.add_function_callback(function_callback_1)
function.add_function_callback(function_callback_2)
self.assertAllClose(plus_one(numpy.array(3.0, dtype=numpy.float32)), 4.0)
self.assertLen(functions_1, 1)
self.assertLen(functions_2, 1)
function.remove_function_callback(function_callback_1)
# The 1st callback should not be invokved after remove_function_callback()
# is called.
self.assertAllClose(plus_one(numpy.array(3.0, dtype=numpy.float64)), 4.0)
self.assertLen(functions_1, 1)
self.assertLen(functions_2, 2)
finally:
function.clear_function_callbacks()
def testClearFunctionCallbacks(self):
function.add_function_callback(lambda f: None)
function.add_function_callback(lambda f: None)
self.assertLen(function._function_callbacks, 2)
function.clear_function_callbacks()
self.assertEmpty(function._function_callbacks) # pylint:disable=protected-access
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNestedTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = constant_op.constant(1000)
b = constant_op.constant(200)
c = constant_op.constant(30)
d = {'a': a, 'b': b}
e = (c, 4)
# Test different argument signatures when constructing the concrete func.
for cf in [
f.get_concrete_function(d, e),
f.get_concrete_function(d, y=e),
f.get_concrete_function(y=e, x=d),
f.get_concrete_function(_spec_for_value(d), _spec_for_value(e)),
f.get_concrete_function(_spec_for_value(d), y=_spec_for_value(e)),
f.get_concrete_function(y=_spec_for_value(e), x=_spec_for_value(d))
]:
# Test different calling conventions when calling the concrete func.
for output in [
cf(d, e), # structured signature
cf(d, y=e), # structured signature w/ kwarg
cf(y=e, x=d), # structured signature w/ 2 kwargs
cf(a, b, c), # flat signature
cf(x=a, x_1=b, y=c) # flat signature w/ kwargs
]:
self.assertIsInstance(output, tuple)
self.assertLen(output, 2)
self.assertAllEqual(output[0], 1200)
self.assertAllEqual(output[1], 34)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': constant_op.constant(1000), 'b': constant_op.constant(200)}
b = (50, 3)
for cf in [ # argument y is bound to non-Tensor value (50, 3).
f.get_concrete_function(a, b),
f.get_concrete_function(a, y=b),
f.get_concrete_function(x=a, y=b)
]:
for output in [cf(a), cf(x=a), cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 1253)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithNonTensorStringInputs(self):
@def_function.function
def f(x, y):
return string_ops.string_join([x, y])
a = constant_op.constant('a')
b = 'b'
cf = f.get_concrete_function(a, b)
for output in [cf(a), cf(x=a), cf(a, b), cf(x=a, y=b)]:
self.assertAllEqual(output, b'ab')
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithBoundNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': 3000, 'b': 200, 'c': 9000}
b = (constant_op.constant(30), 4)
for cf in [ # argument x is bound to non-tensor value `a`
f.get_concrete_function(a, b),
f.get_concrete_function(a, y=b),
f.get_concrete_function(x=a, y=b)
]:
for output in [cf(a, b), cf(a, y=b), cf(y=b), cf(x=a, y=b)]:
self.assertAllEqual(output[0] + output[1], 3234)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionWithAllBoundNestedNonTensorInputs(self):
@def_function.function
def f(x, y):
return (x['a'] + x['b'], y[0] + y[1])
a = {'a': 5000, 'b': 500}
b = (50, 5)
cf = f.get_concrete_function(a, b)
for output in [cf(), cf(a), cf(y=b)]:
self.assertAllEqual(output[0] + output[1], 5555)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionMethodWithVarargs(self):
float32_scalar = tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)
class MyModel(module.Module):
@def_function.function(input_signature=[float32_scalar, float32_scalar])
def add(self, *arg):
return math_ops.add(*arg)
m = MyModel()
cf = m.add.get_concrete_function()
cf(-12.0, 3.0)
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionStructuredSignatureKeywordOrder(self):
# Check that keyword-only arguments are sorted appropriately, so that they
# feed the right tensor into each input.
@def_function.function
def g(**kwargs):
return string_ops.reduce_join(
string_ops.reduce_join(
ops.convert_to_tensor(sorted(kwargs.items())),
axis=1,
separator='='),
axis=0,
separator=', ')
s = constant_op.constant('s')
g.get_concrete_function(q=s, a=s, p=s, r=s, v=s, m=s, l=s)
self.assertAllEqual(
g(m='a', r='b', v='c', q='d', l='e', a='f', p='g'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
self.assertAllEqual(
g(q='d', a='f', p='g', r='b', v='c', m='a', l='e'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
self.assertAllEqual(
g(a='f', l='e', m='a', p='g', q='d', r='b', v='c'),
b'a=f, l=e, m=a, p=g, q=d, r=b, v=c')
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='MissingArg',
conc_args=lambda: (1, constant_op.constant(2)),
call_args=lambda: (1,),
error=r'func\(x, y\) missing required arguments: y'),
dict(
testcase_name='MissingVararg',
conc_args=lambda: (1, 2, constant_op.constant(1.0)),
call_args=lambda: (1, 2),
error=r'func\(x, y, <arg3>\) missing required arguments: <arg3>'),
dict(
testcase_name='ExtraPositionalArg',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2, 3),
error=r'func\(x, y\) takes 2 positional arguments but 3 were given'),
dict(
testcase_name='MissingKeywordOnlyArg',
conc_args=lambda: (1, 2),
conc_kwargs=lambda: {'c': constant_op.constant(1.0)},
call_args=lambda: (1, 2),
error=r'func\(x, y, \*, c\) missing required arguments: c'),
dict(
testcase_name='ExtraKeywordArg',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2),
call_kwargs=lambda: {'c': constant_op.constant(1.0)},
error=r'func\(x, y\) got unexpected keyword arguments: c'),
dict(
testcase_name='ExpectedRaggedGotNest',
conc_args=lambda: (ragged_factory_ops.constant([[1, 2], [3]]),),
call_args=lambda: ({
'a': constant_op.constant([1, 2, 3])
},),
error=r'func\(x, y\): argument x had incorrect type\n'
r' expected: RaggedTensor\n'
r" got: {'a': (Eager)?Tensor}"),
dict(
testcase_name='WrongRaggedRank',
conc_args=lambda: (ragged_factory_ops.constant([[1, 2], [3]]),),
call_args=lambda: (ragged_factory_ops.constant([[[1]]]),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='WrongRaggedDType',
conc_args=lambda: (ragged_factory_ops.constant([[1]]),),
call_args=lambda: (ragged_factory_ops.constant([[1.0]]),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='ExpectedDictGotTensor',
conc_args=lambda: ({
'a': constant_op.constant(1),
'b': constant_op.constant(1)
},),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='ExpectedTupleGotTensor',
conc_args=lambda:
((constant_op.constant(1), constant_op.constant(2)),),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\): argument x had incorrect type\n'),
dict(
testcase_name='WrongDType',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1.0),),
exception=(ValueError, errors.InvalidArgumentError,
# on xla_gpu, we get InternalError instead.
errors.InternalError)),
dict(
testcase_name='ExpectedTensorGotInt',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (5,),
error=r'func\(x, y\) expected a Tensor in x, but got int value 5'),
dict(
testcase_name='ExpectedIntGotDifferentInt',
conc_args=lambda: (5,),
call_args=lambda: (8,),
error=r'ConcreteFunction func\(x, y\) was constructed with int '
r'value 5 in x, but was called with int value 8'),
dict(
testcase_name='ExpectedIntGotTensor',
conc_args=lambda: (5,),
call_args=lambda: (constant_op.constant(6),),
error=r'ConcreteFunction func\(x, y\) was constructed with int '
'value 5 in x, but was called with (Eager)?Tensor value .*'),
dict(
testcase_name='TwoValuesForArgument',
conc_args=lambda: (1, 2),
call_args=lambda: (1, 2),
call_kwargs=lambda: {'x': 3},
error=r"func\(x, y\) got two values for argument 'x'"),
])
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionStructuredSignatureError(self,
conc_args=(),
conc_kwargs=None,
call_args=(),
call_kwargs=None,
error='.*',
exception=TypeError):
"""Tests for errors in the structrued signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
"""
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@def_function.function
def func(x, y=5, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
with self.assertRaisesRegex(exception, error):
self.evaluate(conc(*call_args, **call_kwargs))
# pylint: disable=g-long-lambda
@parameterized.named_parameters([
dict(
testcase_name='MissingArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1),),
error=r'func\(x, y\) missing required arguments: y'),
dict(
testcase_name='TwoValuesForArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1),),
call_kwargs=lambda: {
'x': constant_op.constant(1),
'y': constant_op.constant(1)
},
error=r"func\(x, y\) got two values for argument 'x'"),
dict(
testcase_name='ExtraPositionalArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (constant_op.constant(1), constant_op.constant(2),
constant_op.constant(3)),
error=r'func\(x, y\) takes 2 positional arguments but 3 were given'),
dict(
testcase_name='UnexpectedKeywordArg',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1),),
call_kwargs=lambda: {'c': constant_op.constant(1)},
error=r'func\(x\) got unexpected keyword arguments: c'),
dict(
testcase_name='MissingVararg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2),
constant_op.constant(3)),
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
error=r'func\(x, y, varargs_0\) missing required '
r'arguments: varargs_0'),
dict(
testcase_name='MissingKeywordArg',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
conc_kwargs=lambda: {'c': constant_op.constant(1)},
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
error=r'func\(x, y, c\) missing required arguments: c'),
dict(
testcase_name='ExpectedTensorGotInt',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_args=lambda: (5, constant_op.constant(2)),
error=r'func\(x, y\): expected argument #0\(zero-based\) to be '
r'a Tensor; got int \(5\)'),
dict(
testcase_name='WrongDType',
conc_args=lambda: (constant_op.constant(1),),
call_args=lambda: (constant_op.constant(1.0),),
exception=(ValueError, errors.InvalidArgumentError,
# on xla_gpu, we get InternalError instead.
errors.InternalError)),
dict(
testcase_name='MissingKeywordArgNestPiece',
conc_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
conc_kwargs=lambda: {'c': ragged_factory_ops.constant([[1]])},
call_args=lambda: (constant_op.constant(1), constant_op.constant(2)),
call_kwargs=lambda: {'c': constant_op.constant(1)},
error=r'func\(x, y, c, c_1\) missing required arguments: c_1'),
])
# pylint: enable=g-long-lambda
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionFlatSignatureError(self,
conc_args=(),
conc_kwargs=None,
call_args=(),
call_kwargs=None,
error='.*',
exception=TypeError):
"""Tests for errors in the flat signature.
Args:
conc_args: Positional arguments used for get_concrete_function.
conc_kwargs: Keyword arguments used for get_concrete_function.
call_args: Positional arguments used to call the function.
call_kwargs: Keyword arguments used to call the function.
error: Expected exception message.
exception: Expected exception type.
"""
conc_args = conc_args() if callable(conc_args) else conc_args
conc_kwargs = conc_kwargs() if callable(conc_kwargs) else conc_kwargs or {}
call_args = call_args() if callable(call_args) else call_args
call_kwargs = call_kwargs() if callable(call_kwargs) else call_kwargs or {}
self.assertIsInstance(conc_args, tuple)
self.assertIsInstance(call_args, tuple)
self.assertIsInstance(conc_kwargs, dict)
self.assertIsInstance(call_kwargs, dict)
@def_function.function
def func(x, y=5, *varargs, **kwargs): # pylint: disable=keyword-arg-before-vararg
del y, varargs, kwargs
return x
conc = func.get_concrete_function(*conc_args, **conc_kwargs)
# Remove _function_spec, to disable the structured signature.
conc._set_function_spec(None) # pylint: disable=protected-access
with self.assertRaisesRegex(exception, error):
self.evaluate(conc(*call_args, **call_kwargs))
@test_util.run_in_graph_and_eager_modes
def testConcreteFunctionAmbiguousSignature(self):
# When both the flat & structured signatures are applicable, but they
# give different results, we use the structured signature. Note: we expect
# this to be extremely rare.
@def_function.function
def f(x, y):
return x * 10 + y
conc = f.get_concrete_function(
x=tensor_spec.TensorSpec(None, dtypes.int32, name='y'),
y=tensor_spec.TensorSpec(None, dtypes.int32, name='x'))
result = conc(x=constant_op.constant(5), y=constant_op.constant(6))
self.assertAllEqual(result, 56)
def testPrettyPrintedSignature(self):
@def_function.function
def func(x, kangaroo=None, octopus=7):
del octopus, kangaroo
return x
scalar = constant_op.constant(5)
vector = constant_op.constant([10, 10, 20])
ragged = ragged_factory_ops.constant([[10, 20], [40]])
c1 = func.get_concrete_function(scalar, vector)
c1_summary = r'func\(x, kangaroo, octopus=7\)'
c1_details = (r' Args:\n'
r' x: int32 Tensor, shape=\(\)\n'
r' kangaroo: int32 Tensor, shape=\(3,\)\n'
r' Returns:\n'
r' int32 Tensor, shape=\(\)')
self.assertRegex(c1.pretty_printed_signature(verbose=False), c1_summary)
self.assertRegex(
c1.pretty_printed_signature(verbose=True),
c1_summary + '\n' + c1_details)
self.assertRegex(
repr(c1), r'<ConcreteFunction func\(x, kangaroo, octopus=7\) at .*>')
self.assertRegex(
str(c1), 'ConcreteFunction {}\n{}'.format(c1_summary, c1_details))
c2 = func.get_concrete_function(scalar, ragged, 3)
c2_summary = r'func\(x, kangaroo, octopus=3\)'
c2_details = (r' Args:\n'
r' x: int32 Tensor, shape=\(\)\n'
r' kangaroo: RaggedTensorSpec\(.*\)\n'
r' Returns:\n'
r' int32 Tensor, shape=\(\)')
self.assertRegex(c2.pretty_printed_signature(),
c2_summary + '\n' + c2_details)
c3 = func.get_concrete_function({'a': scalar, 'b': [ragged, ragged]})
c3_summary = r'func\(x, kangaroo=None, octopus=7\)'
c3_details = (r' Args:\n'
r" x: {'a': <1>, 'b': \[<2>, <3>\]}\n"
r' <1>: int32 Tensor, shape=\(\)\n'
r' <2>: RaggedTensorSpec\(.*\)\n'
r' <3>: RaggedTensorSpec\(.*\)\n'
r' Returns:\n'
r" {'a': <1>, 'b': \[<2>, <3>\]}\n"
r' <1>: int32 Tensor, shape=\(\)\n'
r' <2>: RaggedTensorSpec\(.*\)\n'
r' <3>: RaggedTensorSpec\(.*\)')
# python 3.5 does not gurantee deterministic iteration of dict contents
# which can lead mismatch on pretty_printed_signature output for "Args"
if sys.version_info >= (3, 6):
self.assertRegex(c3.pretty_printed_signature(),
c3_summary + '\n' + c3_details)
# pylint: disable=keyword-arg-before-vararg
@def_function.function
def func2(x, y=3, *args, **kwargs):
return (x, y, args, kwargs)
c4 = func2.get_concrete_function(scalar, 4, 5, a=scalar)
c4_summary = 'func2(x, y=4, <arg3>=5, *, a)'
self.assertEqual(c4.pretty_printed_signature(verbose=False), c4_summary)
c5 = func2.get_concrete_function(8, vector)
c5_summary = 'func2(x=8, y)'
self.assertEqual(c5.pretty_printed_signature(verbose=False), c5_summary)
def testPrettyPrintedExplicitSignatureWithKeywordArg(self): # b/159639913
@def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
def fn(a, b=1):
return a + b
concrete_fn = fn.get_concrete_function()
self.assertEqual(concrete_fn.pretty_printed_signature(False), 'fn(a)')
self.assertEqual(
concrete_fn.pretty_printed_signature(True), 'fn(a)\n'
' Args:\n'
' a: float32 Tensor, shape=<unknown>\n'
' Returns:\n'
' float32 Tensor, shape=<unknown>')
@test_util.run_in_graph_and_eager_modes
def testIndexedSlicesAsGradientsForConcreteFunctions(self):
@def_function.function
def summing_rnn(inputs):
return math_ops.reduce_sum(inputs, axis=1)
@def_function.function
def gradients(inputs):
with backprop.GradientTape() as tape:
tape.watch(inputs)
hidden = summing_rnn(inputs)
hidden = array_ops.gather(hidden, constant_op.constant([0]))
loss = math_ops.reduce_mean(hidden)
return tape.gradient(loss, inputs)
gradients(constant_op.constant([[[1.0], [2.0]]])) # No error is raised
def testFollowTypeHintsTraceBasic(self):
trace_count = [0]
def func(x: ops.Tensor):
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1) # Initial call gets traced
enabled(2)
enabled(3)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1)
disabled(2) # Retrace
disabled(3) # Retrace
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithArgs(self):
trace_count = [0]
def func(*args: ops.Tensor):
trace_count[0] += 1
return args
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
args = (
'abc',
'def',
) * 20
args2 = (
'def',
'abc',
) * 20
enabled(args)
enabled(args2)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(args)
disabled(args2) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithKwargs(self):
trace_count = [0]
def func(t: ops.Tensor, **kwargs: ops.Tensor):
del kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1, x=1, y=1.0, z='one')
enabled(2, x=2, y=2.0, z='two')
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1, x=1, y=1.0, z='one')
disabled(2, x=2, y=2.0, z='two') # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithMultipleInputTypes(self):
trace_count = [0]
def func(t: ops.Tensor, *args: ops.Tensor, **kwargs: ops.Tensor):
del args, kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
disabled = def_function.function(func, experimental_follow_type_hints=False)
enabled(1, constant_op.constant(1), 'str', x=4.0)
enabled(2, constant_op.constant(2), 'str2', x=5.0)
self.assertEqual(trace_count[0], 1)
trace_count = [0]
disabled(1, constant_op.constant(1), 'str', x=4.0)
disabled(2, constant_op.constant(2), 'str2', x=5.0) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithOnlyArgNamed(self):
trace_count = [0]
def func(t: ops.Tensor, i: int = 1, **kwargs): # pylint: disable=bad-whitespace
del i, kwargs
trace_count[0] += 1
return t
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 3, x=4.0, y='str')
enabled(2, 4, x=4.0, y='str') # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithNotAllNamed(self):
trace_count = [0]
def func(x, y: ops.Tensor, z: int):
del y, z
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3)
enabled(1, 20, 3) # No retrace - change in ops.Tensor typed arg
enabled(2, 2, 3) # Retrace - change in untyped arg
enabled(2, 2, 4) # Retrace - change in typed arg
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithOnlyArgsNamed(self):
trace_count = [0]
def func(x, y, *args: ops.Tensor):
del y, args
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 20, 3, 4, 5, 6)
enabled(1, 20, 3, 4, 5, 60) # No retrace - change in *args
enabled(1, 30, 7, 8, 9, 10) # Retrace - change in args
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithOnlyKwargsNamed(self):
trace_count = [0]
def func(x, y, *args, **kwargs: ops.Tensor):
del y, args, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, 5, 6, a=1.0, b=2.0, c=3.0)
enabled(
1, 2, 3, 4, 5, 6, a=1.5, b=2.5,
c=3.5) # No retrace - change in **kwargs
enabled(100, 2, 3, 4, 5, 6, a=1.0, b=2.0, c=3.0) # Retrace - change in args
enabled(
1, 2, 3, 4, 5, 100, a=1.0, b=2.0, c=3.0) # Retrace - change in *args
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithArgsEquals(self):
trace_count = [0]
def func(
x: ops.Tensor = 0, # pylint:disable=bad-whitespace
y: int = 1, # pylint:disable=bad-whitespace
**kwargs: ops.Tensor):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace - change in args
enabled(x=2, y=2, z=4) # No retrace - change in args and **kwargs
enabled(x=2, y=2, z=4, u=5) # Retrace - change in **kwargs
self.assertEqual(trace_count[0], 3)
def testFollowTypeHintsTraceWithArgsEqualsTypedKwargs(self):
trace_count = [0]
def func(x, y, **kwargs: ops.Tensor):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace
enabled(x=1, y=2, z=4) # No retrace
enabled(x=2, y=2, z=4) # Retrace
enabled(x=2, y=2, z=4, u=5) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsEqualsTypedArgs(self):
trace_count = [0]
def func(x: ops.Tensor, y: int, **kwargs):
del y, kwargs
trace_count[0] += 1
return x
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(x=1, y=2, z=3)
enabled(x=1, y=3, z=3) # Retrace
enabled(x=1, y=2, z=4) # Retrace
enabled(x=2, y=2, z=3) # No retrace
enabled(x=2, y=2, z=4, u=5) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithKwOnlyArgsBasic(self):
trace_count = [0]
def func(*, a: ops.Tensor = None, b=1): # pylint: disable=bad-whitespace
del b
trace_count[0] += 1
return a
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(a=1, b=2)
enabled(a=2, b=2) # No retrace
enabled(a=1, b=1) # Retrace
self.assertEqual(trace_count[0], 2)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedArg(self):
trace_count = [0]
def func(arg: ops.Tensor, *args, kwonly, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1000, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedArgs(self):
trace_count = [0]
def func(arg, *args: ops.Tensor, kwonly, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 200, 300, 400, kwonly=5, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedKwOnlyArg(self):
trace_count = [0]
def func(arg, *args, kwonly: ops.Tensor, **kwargs):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=500, kwarg1=6, kwarg2=7) # No retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # Retrace
self.assertEqual(trace_count[0], 4)
def testFollowTypeHintsTraceWithArgsKwOnlyArgsKwargsAndTypedKwargs(self):
trace_count = [0]
def func(arg, *args, kwonly, **kwargs: ops.Tensor):
del args, kwonly, kwargs
trace_count[0] += 1
return arg
enabled = def_function.function(func, experimental_follow_type_hints=True)
enabled(1, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7)
enabled(100, 2, 3, 4, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 20, 30, 40, kwonly=5, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=50, kwarg1=6, kwarg2=7) # Retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=60, kwarg2=70) # No retrace
enabled(1, 2, 3, 4, kwonly=5, kwarg1=600, kwarg2=700) # No retrace
self.assertEqual(trace_count[0], 4)
def testWithModuleNameScope(self):
self.skipTest('b/166158748:function does not handle this case correctly.')
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@def_function.function
@module.Module.with_name_scope
def add(self, x, y, z=1):
if self.var is None:
return x + y + z
foo = Foo()
self.assertEqual(foo.add(2, 3), 6)
def testWithModuleNameScopeRedundantArgs(self):
self.skipTest('b/166158748:function does not handle this case correctly.')
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@def_function.function
@module.Module.with_name_scope
def add(self, x, y):
if self.var is None:
return x + y
foo = Foo()
with self.assertRaisesRegex(TypeError, 'got two values for argument'):
foo.add(2, x=3) # pylint: disable=redundant-keyword-arg,no-value-for-parameter
def testWithModuleNameScopeMissingArgs(self):
self.skipTest('b/166158748:function does not handle this case correctly.')
class Foo(module.Module):
def __init__(self):
super().__init__()
self.var = None
@def_function.function
@module.Module.with_name_scope
def add(self, x, y):
if self.var is None:
return x + y
foo = Foo()
with self.assertRaisesRegex(TypeError, 'missing required arguments: y'):
foo.add(2) # pylint: disable=no-value-for-parameter
def testShapeInferencePropagateConstNestedStack(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((None, None), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(x, s):
old_shape = array_ops.shape(x)
new_shape = array_ops.stack([old_shape[0], s], axis=0)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(3, 6), dtype=dtypes.int32)
])
def g(x):
y = f(x, s=5)
assert y.shape.as_list() == [3, 5], y.shape.as_list()
return y
self.assertAllEqual(
g(array_ops.zeros([3, 6], dtype=dtypes.int32)), array_ops.ones([3, 5]))
def testShapeInferencePropagateConstNestedUnstackStack(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((None, None), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(x, s):
s0, _ = array_ops.unstack(array_ops.shape(x), axis=0)
new_shape = array_ops.stack([s0, s], axis=0)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(3, 6), dtype=dtypes.int32)
])
def g(x):
y = f(x, s=5)
assert y.shape.as_list() == [3, 5], y.shape.as_list()
return y
self.assertAllEqual(
g(array_ops.zeros([3, 6], dtype=dtypes.int32)), array_ops.ones([3, 5]))
def testShapeInferencePropagateConstNestedConcat(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(d1, d2, d3):
new_shape = array_ops.concat([[d1], [d2], [d3]], axis=-1)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function()
def g():
y = f(1, 2, 3)
assert y.shape.as_list() == [1, 2, 3], y.shape.as_list()
return y
self.assertAllEqual(g(), array_ops.ones([1, 2, 3]))
def testShapeInferencePropagateConstDoubleNested(self):
@def_function.function(input_signature=[
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
tensor_spec.TensorSpec((), dtype=dtypes.int32),
])
def f(d1, d2, d3):
new_shape = array_ops.concat([[d1], [d2], [d3]], axis=-1)
y = array_ops.ones(shape=new_shape, dtype=dtypes.int32)
return y
@def_function.function()
def g():
y = def_function.function(f)(1, 2, 3)
assert y.shape.as_list() == [1, 2, 3], y.shape.as_list()
return y
self.assertAllEqual(g(), array_ops.ones([1, 2, 3]))
@test_util.run_v2_only
def testControlDependencyAfterInline(self):
v = variables.Variable(0.)
@def_function.function
def assign():
return v.assign(1.)
@def_function.function
def assign_add():
return v.assign_add(1.)
@def_function.function
def f():
check_ops.assert_equal_v2(assign(), 1.)
check_ops.assert_equal_v2(assign_add(), 2.)
# We don't have a way to inspect the inlined graph in Python, so we run it
# multiple times to have more confidence the dependency is correct.
for _ in range(30):
f()
@test_util.run_v2_only
def testReadInFuncWriteOutside(self):
# Run many times since we are testing for a potential race condition.
for _ in range(30):
# pylint: disable=cell-var-from-loop
v = variables.Variable(1.)
@def_function.function
def add_one():
return v + 1.
@def_function.function
def get_v_plus_one():
v_plus_one = add_one()
v.assign_add(2.0)
return v_plus_one
self.assertAllEqual(get_v_plus_one(), 2.0)
class MultiDeviceTest(test.TestCase, parameterized.TestCase):
@test_util.run_gpu_only
def testMultiDeviceOutput(self):
"""Tests that functions can produce outputs on multiple devices."""
@function.defun
def func(a, b, transpose_a):
with ops.device('/device:CPU:0'):
m1 = math_ops.matmul(a, b, transpose_a=transpose_a)
with ops.device('/device:GPU:0'):
m2 = math_ops.matmul(a, b, transpose_a=transpose_a)
return m1, m2
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
m1, m2 = func(t, t, transpose_a=True)
self.assertAllEqual(m1.numpy(), [[10, 14], [14, 20]])
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), [[10, 14], [14, 20]])
self.assertRegex(m2.backing_device, 'GPU')
@test_util.run_gpu_only
def testEmptyBody(self):
@function.defun
def func(a, b):
return b, a
with ops.device('/device:CPU:0'):
a = array_ops.identity(3.0)
with ops.device('/device:GPU:0'):
b = array_ops.identity(5.0)
m1, m2 = func(a, b)
self.assertAllEqual(m1.numpy(), 5.0)
self.assertRegex(m1.backing_device, 'GPU')
self.assertAllEqual(m2.numpy(), 3.0)
self.assertRegex(m2.backing_device, 'CPU')
@test_util.run_gpu_only
def testMultiDeviceInt32(self):
"""Tests that multi-device functions can take and output INT32s.
When an INT32 device tensor is fed into a function, it is copied to CPU
by the eager runtime. The function sees all INT32 inputs on CPU.
We set allocator attribute 'on_host' for INT32 outputs. They can be
partitioned into the GPU component function, but will be allocated on
CPU nevertheless.
There is experimental support for `ints_on_device` in
FunctionLibraryRuntime now. We can try that.
"""
with ops.device('/device:CPU:0'):
int_cpu = constant_op.constant(3, dtype=dtypes.int32)
resource = resource_variable_ops.ResourceVariable(5, dtype=dtypes.int32)
with ops.device('/device:GPU:0'):
int_gpu = constant_op.constant(7, dtype=dtypes.int32)
@function.defun
def func(int_cpu, resource, int_gpu):
with ops.device('/device:CPU:0'):
m1 = int_cpu * resource + int_gpu
with ops.device('/device:GPU:0'):
# This computation will happen on GPU but m2 will be copied to CPU.
m2 = int_gpu * resource + int_cpu + 1
return m1, m2
m1, m2 = func(int_cpu, resource, int_gpu)
self.assertAllEqual(m1.numpy(), 22)
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), 39)
self.assertRegex(m2.backing_device, 'CPU')
# flip arguments
m1, m2 = func(int_gpu, resource, int_cpu)
self.assertAllEqual(m1.numpy(), 38)
self.assertRegex(m1.backing_device, 'CPU')
self.assertAllEqual(m2.numpy(), 23)
self.assertRegex(m2.backing_device, 'CPU')
@test_util.run_gpu_only
def testMultiDeviceColocateWith(self):
"""Tests that function's outputs respect colocation constraints."""
@function.defun
def func(a, b):
with ops.colocate_with(a):
ra = 2 * a
with ops.colocate_with(b):
rb = 3 * b
return ra, rb
devices = ['/device:CPU:0', '/device:GPU:0']
for dev1, dev2 in itertools.product(devices, devices):
with ops.device(dev1):
a = array_ops.identity(1.0)
with ops.device(dev2):
b = array_ops.identity(10.0)
ra, rb = func(a, b)
self.assertEqual(ra.numpy(), 2.0)
self.assertRegex(ra.backing_device, dev1)
self.assertEqual(rb.numpy(), 30.0)
self.assertRegex(rb.backing_device, dev2)
@test_util.run_gpu_only
def testMultiDeviceResources(self):
with ops.device('/device:CPU:0'):
c1 = resource_variable_ops.ResourceVariable(2.0)
c2 = resource_variable_ops.ResourceVariable(7.0)
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
g2 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def func(resource1, resource2):
with ops.device('/device:CPU:0'):
result1 = resource1 * g2
with ops.device('/device:GPU:0'):
result2 = resource2 * c2
return result1, result2
r1, r2 = func(c1, g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 21.0)
self.assertRegex(r2.backing_device, 'GPU')
# Call with flipped inputs. Check that we look at resource's
# device and reinstantiates the function when inputs' devices change.
r1, r2 = func(g1, c1)
self.assertEqual(r1.numpy(), 15.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 14.0)
self.assertRegex(r2.backing_device, 'GPU')
@test_util.run_gpu_only
def testOutputResources(self):
with ops.device('/device:CPU:0'):
c1 = resource_variable_ops.ResourceVariable(2.0)
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun
def func(resource1, resource2):
with ops.device('/device:CPU:0'):
result1 = resource1 * 5
with ops.device('/device:GPU:0'):
result2 = resource2 * 7
return result1, resource1.handle, result2, resource2.handle
r1, res1, r2, res2 = func(c1, g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 21.0)
self.assertRegex(r2.backing_device, 'GPU')
def check_handle(handle, expected_value):
self.assertRegex(handle.backing_device, 'CPU')
tensor = gen_resource_variable_ops.read_variable_op(
handle, dtypes.float32)
self.assertEqual(tensor.numpy(), expected_value)
# Check that handles returned from functions are on CPU and an op using
# the resource handle is correctly placed on the device backing the
# resource.
check_handle(res1, 2.0)
check_handle(res2, 3.0)
# Call with flipped inputs to make sure the same the function is
# reinstantiated and eager runtime does not mess up the device assignment
# for ops consuming handles returned from defuns.
r1, res1, r2, res2 = func(g1, c1)
self.assertEqual(r1.numpy(), 15.0)
self.assertRegex(r1.backing_device, 'CPU')
self.assertEqual(r2.numpy(), 14.0)
self.assertRegex(r2.backing_device, 'GPU')
check_handle(res1, 3.0)
check_handle(res2, 2.0)
@test_util.run_gpu_only
def testPassResourceThroughNestedFunctionCall(self):
"""Test passing GPU resource to noinline function call placed on CPU.
PartitionedCallOp must not enforce any particular device assignment for the
resource output. Inner function marked as `_nospecialize`, so Grappler would
not prune unused function output.
"""
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun_with_attributes(attributes={
'_noinline': True,
'_nospecialize': True
})
def inner(resource1):
return resource1 * 2, resource1.handle
@function.defun
def outer(resource1):
with ops.device('/device:CPU:0'):
r1, _ = inner(resource1)
return r1
r1 = outer(g1)
self.assertEqual(r1.numpy(), 6.0)
self.assertRegex(r1.backing_device, 'CPU')
@test_util.run_gpu_only
def testReturnResourceFromNestedFunctionCall(self):
"""Test returning GPU resource from noinline function call placed on CPU.
When inferring output devices for the return value, do not set a device for
returns of DT_RESOURCE data type based on the device assignment of the node
that produced that resource. As an example function call placed on CPU can
return resources on GPU.
"""
with ops.device('/device:GPU:0'):
g1 = resource_variable_ops.ResourceVariable(3.0)
@function.defun_with_attributes(attributes={
'_noinline': True
})
def inner(resource1):
resource1.assign_add(2.0)
return resource1 * 2, resource1.handle
@function.defun
def outer(resource1):
with ops.device('/device:CPU:0'):
r1, res1 = inner(resource1)
return r1, res1
r1, res1 = outer(g1)
self.assertEqual(r1.numpy(), 10.0)
self.assertRegex(r1.backing_device, 'CPU')
def check_handle(handle, expected_value):
self.assertRegex(handle.backing_device, 'CPU')
tensor = gen_resource_variable_ops.read_variable_op(
handle, dtypes.float32)
self.assertEqual(tensor.numpy(), expected_value)
# Check that handles returned from functions are on CPU and an op using
# the resource handle is correctly placed on the device backing the
# resource.
check_handle(res1, 5.0)
@test_util.run_gpu_only
def testComplexInputOutputDevicePattern(self):
"""Tests input/output mapping logic in partitioning."""
with ops.device('/device:CPU:0'):
rc0 = resource_variable_ops.ResourceVariable(2.0)
rc1 = resource_variable_ops.ResourceVariable(3.0)
cc0 = array_ops.identity(5.0)
cc1 = array_ops.identity(7.0)
with ops.device('/device:GPU:0'):
rg0 = resource_variable_ops.ResourceVariable(11.0)
rg1 = resource_variable_ops.ResourceVariable(13.0)
cg0 = array_ops.identity(17.0)
cg1 = array_ops.identity(19.0)
# Make sure tensors are on expected devices.
for tensor in [cc0, cc1]:
self.assertRegex(tensor.backing_device, 'CPU:0')
for tensor in [cg0, cg1]:
self.assertRegex(tensor.backing_device, 'GPU:0')
@function.defun
def func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1):
with ops.device('/device:CPU:0'):
m1 = rc0 * cg0
with ops.device('/device:GPU:0'):
m2 = rg0 * cc0
with ops.device('/device:CPU:0'):
r1 = 1000.0 * m2 + rc1 * cg1
with ops.device('/device:GPU:0'):
r2 = 1000.0 * m1 + rg1 * cc1
return r1, r2, m2, m1
r1, r2, m2, m1 = func(rc0, cc0, cg0, rc1, cg1, rg0, rg1, cc1)
self.assertRegex(m1.backing_device, 'CPU')
self.assertRegex(r1.backing_device, 'CPU')
self.assertRegex(m2.backing_device, 'GPU')
self.assertRegex(r2.backing_device, 'GPU')
self.assertEqual(m1.numpy(), 34.0)
self.assertEqual(r1.numpy(), 55000.0 + 3.0 * 19.0)
self.assertEqual(m2.numpy(), 55.0)
self.assertEqual(r2.numpy(), 34000.0 + 13.0 * 7.0)
@test_util.run_gpu_only
def testArgumentPruning(self):
"""Tests functions taking unnecessary arguments."""
with ops.device('/device:CPU:0'):
c1 = constant_op.constant(5.0)
c2 = constant_op.constant(7.0)
with ops.device('/device:GPU:0'):
g1 = constant_op.constant(11.0)
g2 = constant_op.constant(13.0)
g3 = constant_op.constant(17.0)
@function.defun
def func(g1, g2, c1, g3, c2): # pylint: disable=unused-argument
# arguments g1 and g2 are unused and can be pruned by grappler.
return c1 * g3 * c2
result = func(g1, g2, c1, g3, c2)
self.assertEqual(result.numpy(), 5.0 * 7.0 * 17.0)
def testNestedCallWatchedVariables(self):
v = variables.Variable(4.)
@def_function.function
def f():
return v ** 2.
with backprop.GradientTape() as tape:
f()
self.assertEqual((v,), tape.watched_variables())
@def_function.function
def g():
return f()
with backprop.GradientTape() as tape:
g()
self.assertEqual((v,), tape.watched_variables())
# f() can rely on the variable being read during its trace. g() checks that
# variables from a function which knows about them are recorded on the
# tape. h() tests that functions forward knowledge of variables to callers.
@def_function.function
def h():
return g()
with backprop.GradientTape() as tape:
h()
self.assertEqual((v,), tape.watched_variables())
def testDeferredCapture(self):
value = 1.0
@def_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(None))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# After changing the value of `value` the function call should return a
# different result.
value = 2.0
self.assertAllEqual(lazy_capture(2.0), 4.0)
def testDeferredCaptureWithKey(self):
value0 = 1.0
value1 = 2.0
@def_function.function
def lazy_capture(x):
w = ops.get_default_graph().capture_call_time_value(
lambda: value0, tensor_spec.TensorSpec(None), key=0)
y = ops.get_default_graph().capture_call_time_value(
lambda: value1, tensor_spec.TensorSpec(None), key=1)
def bad_closure():
raise ValueError('Should not run')
z = ops.get_default_graph().capture_call_time_value(
bad_closure, tensor_spec.TensorSpec(None), key=1)
return x + y + w + z
self.assertAllEqual(lazy_capture(2.0), 7.0)
value0 = 2.0
value1 = 3.0
self.assertAllEqual(lazy_capture(2.0), 10.0)
def testDeferredCaptureTypeError(self):
value = constant_op.constant(1.0)
@def_function.function
def lazy_capture(x):
y = ops.get_default_graph().capture_call_time_value(
lambda: value, tensor_spec.TensorSpec(()))
return x + y
self.assertAllEqual(lazy_capture(2.0), 3.0)
# dtype mismatch
value = constant_op.constant(1)
with self.assertRaisesRegex(ValueError, 'Value .* to a tensor with dtype'):
lazy_capture(2.0)
# shape mismatch
value = constant_op.constant([1.0])
with self.assertRaisesRegex(ValueError, 'Value .* shape'):
lazy_capture(2.0)
def testDeferredCaptureReturnNestWithCompositeTensor(self):
i_s = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64),
constant_op.constant([2]))
r_t = ragged_factory_ops.constant([[[1, 2], [3]], [[4, 5, 6]]])
s_t = sparse_tensor.SparseTensor(
values=[1, 2, 3], indices=[[0], [8], [10]], dense_shape=[20])
@def_function.function
def lazy_capture():
y = ops.get_default_graph().capture_call_time_value(
lambda: {'i': i_s, 't': (r_t, s_t)},
{'i': indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int32),
't': (ragged_tensor.RaggedTensorSpec([2, None, None], dtypes.int32),
sparse_tensor.SparseTensorSpec([None], dtypes.int32))})
return y['i'], y['t']
i, (r, s) = lazy_capture()
self.assertAllEqual(i_s.values, i.values)
self.assertAllEqual(i_s.indices, i.indices)
self.assertAllEqual(i_s.dense_shape, i.dense_shape)
self.assertAllEqual(r_t, r)
self.assertAllEqual(s_t.indices, s.indices)
self.assertAllEqual(s_t.values, s.values)
self.assertAllEqual(s_t.dense_shape, s.dense_shape)
def testDeferredCaptureCompositeTensorSpecTypeMismatch(self):
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64))
@def_function.function
def lazy_capture():
return ops.get_default_graph().capture_call_time_value(
lambda: value,
indexed_slices.IndexedSlicesSpec(dtype=dtypes.int32))
# Type matches spec.
lazy_capture()
# Extra dense shape component.
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1], dtype=dtypes.int64),
constant_op.constant([2]))
with self.assertRaises(ValueError):
lazy_capture()
# Index dtype mismatch int32 vs. int64.
value = indexed_slices.IndexedSlices(
constant_op.constant([1, 2]),
constant_op.constant([0, 1]))
with self.assertRaises(ValueError):
lazy_capture()
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
aam-at/tensorflow
|
tensorflow/python/eager/function_test.py
|
Python
|
apache-2.0
| 167,367
|
[
"Octopus"
] |
ca9be24d16b551f98262919dcec3e689c7285c95938908e39287a9f5020463a5
|
from datetime import datetime
import unittest
from pheme.util.config import Config
from pheme.util.pg_access import AlchemyAccess
from pheme.warehouse.tables import create_tables
from pheme.warehouse.tables import HL7_Dx
from pheme.warehouse.tables import HL7_Msh
from pheme.warehouse.tables import HL7_Nte
from pheme.warehouse.tables import HL7_Obr
from pheme.warehouse.tables import HL7_Obx
from pheme.warehouse.tables import HL7_RawMessage
from pheme.warehouse.tables import HL7_Spm
from pheme.warehouse.tables import HL7_Visit
def setup_module():
"""Create a fresh db (once) for all tests in this module"""
c = Config()
if c.get('general', 'in_production'): # pragma: no cover
raise RuntimeError("DO NOT run destructive test on production system")
cfg_value = lambda v: c.get('warehouse', v)
create_tables(cfg_value('create_table_user'),
cfg_value('create_table_password'),
cfg_value('database'),
enable_delete=True)
class testSqlAObjects(unittest.TestCase):
"""We should be able to create and work with objects
that are based on tables in the database
"""
def setUp(self):
c = Config()
cfg_value = lambda v: c.get('warehouse', v)
self.alchemy = AlchemyAccess(database=cfg_value('database'),
host='localhost',
user=cfg_value('database_user'),
password=cfg_value('database_password'))
self.session = self.alchemy.session
def tearDown(self):
# Purge the unittest hl7_msh and all related data
self.session.delete(self.msh)
self.session.commit()
self.alchemy.disconnect()
def testABuildTables(self):
"""We need to build dependent tables in the correct order.
"""
self.tHL7_Msh()
self.tHL7_RawMessage()
self.tHL7_Visit()
self.tHL7_Dx()
self.tHL7_Obr()
self.tHL7_Obx()
def tHL7_RawMessage(self):
"""Create an HL7_RawMessage object that is saved to the database"""
mess = HL7_RawMessage(hl7_raw_message_id=1,
message_control_id=u'control_id',
raw_data=u'some raw data')
#Add the new message to the session
self.session.add(mess)
self.session.commit()
query = self.session.query(HL7_RawMessage).\
filter(HL7_RawMessage.hl7_raw_message_id == 1)
self.assert_(query.count() == 1,
'The message we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_RawMessage 1>',
'Message string invalid.\nExpected: '\
'<HL7_RawMessage 1>\nGot: %s' % result)
#Make sure all the fields came out as expected
self.assert_(result.hl7_raw_message_id == 1,
'hl7_raw_message_id invalid.\nExpected: '\
'1\nGot: %s' % result.hl7_raw_message_id)
self.assert_(result.message_control_id == 'control_id',
'message_control_id invalid.\nExpected: '\
'control_id\nGot: %s' % result.message_control_id)
self.assert_(result.raw_data == 'some raw data',
'raw_data invalid.\nExpected: some raw '\
'data\nGot: %s' % result.raw_data)
def tHL7_Msh(self):
"""Create an HL7_Msh object that is saved to the database"""
self.msh = HL7_Msh(hl7_msh_id=1,
message_control_id=u'control_id',
message_type=u'message type',
facility=u'facility',
message_datetime=datetime(2007, 01, 01),
batch_filename=u'183749382629734')
#Add the new msh to the session
self.session.add(self.msh)
self.session.commit()
query = self.session.query(HL7_Msh)
self.assert_(query.count() == 1,
'The msh we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Msh 1>',
'Message string invalid.\nExpected: '\
'<HL7_RawMessage 1>\nGot: %s' % result)
#Make sure all the fields came out as expected
self.assert_(result.hl7_msh_id == 1,
'hl7_msh_id invalid.\nExpected: 1\nGot: '\
'%s' % result.hl7_msh_id)
self.assert_(result.message_control_id == 'control_id',
'message_control_id invalid.\nExpected: '\
'control_id\nGot: %s' % result.message_control_id)
self.assert_(result.message_type == 'message type',
'message_type invalid.\nExpected: message '\
'type\nGot: %s' % result.message_type)
self.assert_(result.facility == 'facility',
'facility invalid.\nExpected: '\
'facility\nGot: %s' % result.facility)
self.assert_(result.message_datetime ==
datetime(2007, 01, 01, 0, 0),
'message_datetime invalid.\nExpected: '\
'2007-01-01 00:00:00\nGot: %s' % result.message_datetime)
self.assert_(result.batch_filename == '183749382629734',
'batch_filename invalid.\nExpected: '\
'183749382629734\nGot: %s' % result.batch_filename)
def tHL7_Visit(self):
"""Create an HL7_Visit object that is saved to the database"""
visit = HL7_Visit(hl7_visit_id=1,
visit_id=u'45',
patient_id=u'patient id',
zip=u'zip',
admit_datetime=datetime(2007, 01, 01),
gender=u'F',
dob=u'2001,01',
chief_complaint=u'Pain',
patient_class=u'1',
hl7_msh_id=1,
disposition='01',
state='WA',
admission_source='Emergency room',
assigned_patient_location='MVMGREF')
#Add the new msh to the session
self.session.add(visit)
self.session.commit()
query = self.session.query(HL7_Visit)
self.assert_(query.count() == 1,
'The visit we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Visit 1>',
'Message string invalid.\nExpected: '\
'<HL7_Visit 1>\nGot: %s' % result)
#Make sure all the fields came out as expected
self.assert_(result.hl7_visit_id == 1,
'hl7_visit_id invalid.\nExpected: '\
'1\nGot: %s' % result.hl7_visit_id)
self.assert_(result.visit_id == '45',
'visit_id invalid.\nExpected: 45\nGot: '\
'%s' % result.visit_id)
self.assert_(result.patient_id == 'patient id',
'patient_id invalid.\nExpected: patient '\
'id\nGot: %s' % result.patient_id)
self.assert_(result.zip == 'zip',
'zip invalid.\nExpected: zip\nGot: %s' % result.zip)
self.assert_(result.admit_datetime == datetime(2007, 01, 01),
'admit_datetime invalid.\nExpected: '\
'2007-01-01 00:00:00\nGot: %s' % result.admit_datetime)
self.assert_(result.gender == 'F',
'gender invalid.\nExpected: F\nGot: %s' % result.gender)
self.assert_(result.dob == '2001,01',
'dob invalid.\nExpected: 2001-01-10 '\
'00:00:00\nGot: %s' % result.dob)
self.assert_(result.chief_complaint == 'Pain',
'chief_complaint invalid.\nExpected: '\
'Pain\nGot: %s' % result.chief_complaint)
self.assert_(result.patient_class == '1',
'patient_class invalid.\nExpected: '\
'1\nGot: %s' % result.patient_class)
self.assert_(result.disposition == '01',
'disposition invalid.\nExpected: '\
'01\nGot: %s' % result.disposition)
self.assertEquals(result.state, 'WA')
self.assertEquals(result.admission_source, 'Emergency room')
self.assertEquals(result.assigned_patient_location, 'MVMGREF')
def tHL7_Dx(self):
"""Create an HL7_Dx object that is saved to the database"""
dx = HL7_Dx(hl7_dx_id=1,
dx_code=u'dx code',
dx_description=u'description',
dx_type=u'A',
hl7_msh_id=1)
#Add the new msh to the session
self.session.add(dx)
self.session.commit()
query = self.session.query(HL7_Dx)
self.assert_(query.count() == 1,
'The dx we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Dx 1>',
'Message string invalid.\nExpected: '\
'<HL7_Dx 1>\nGot: %s' % result)
self.assert_(result.hl7_dx_id == 1,
'hl7_dx_id invalid.\nExpected: 1\nGot: '\
'%s' % result.hl7_dx_id)
self.assert_(result.dx_code == 'dx code',
'dx_code invalid.\nExpected: dx code\nGot: '\
'%s' % result.dx_code)
self.assert_(result.dx_description == 'description',
'dx_description invalid.\nExpected: '\
'description\nGot: %s' % result.dx_description)
self.assert_(result.dx_type == 'A',
'dx_type invalid.\nExpected: A\nGot: %s' % result.dx_type)
def tHL7_Obr(self):
"""Create an HL7_Obr object that is saved to the database"""
dt = datetime.now()
obr = HL7_Obr(hl7_obr_id=1,
loinc_code=u'loinc code',
loinc_text=u'loinc text',
alt_text=u'alt text',
hl7_msh_id=1,
status='W',
report_datetime=dt,
specimen_source='NASAL')
#Add the new msh to the session
self.session.add(obr)
self.session.commit()
query = self.session.query(HL7_Obr)
self.assert_(query.count() == 1,
'The obr we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Obr 1>',
'Message string invalid.\nExpected: '\
'<HL7_Obr 1>\nGot: %s' % result)
self.assert_(result.hl7_obr_id == 1,
'hl7_obr_id invalid.\nExpected: 1\nGot: '\
'%s' % result.hl7_obr_id)
self.assert_(result.loinc_code == 'loinc code',
'loinc_code invalid.\nExpected: '\
'loinc code\nGot: %s' % result.loinc_code)
self.assert_(result.loinc_text == 'loinc text',
'loinc_text invalid.\nExpected: '\
'loinc text\nGot: %s' % result.loinc_text)
self.assert_(result.alt_text == 'alt text',
'alt text invalid.\nExpected: alt '\
'text\nGot: %s' % result.alt_text)
self.assertEquals(result.status, 'W')
self.assertEquals(result.report_datetime, dt)
self.assertEquals(result.specimen_source, 'NASAL')
def tHL7_Obx(self):
"""Create an HL7_Obx object that is saved to the database"""
obx = HL7_Obx(hl7_obx_id=1,
hl7_obr_id=1,
value_type='vt',
observation_id=u'observation id',
observation_text=u'observation text',
observation_result=u'observation result',
units=u'units',
result_status=u'result status',
observation_datetime=datetime(2001, 1, 1),
hl7_msh_id=1,
performing_lab_code='SHMC')
#Add the new msh to the session
self.session.add(obx)
self.session.commit()
query = self.session.query(HL7_Obx)
self.assert_(query.count() == 1,
'The obx we created was not found')
result = query.first()
#Check that the __repr__ is working as expected
self.assert_(result.__repr__() == '<HL7_Obx 1>',
'Message string invalid.\nExpected: '\
'<HL7_Obx 1>\nGot: %s' % result)
self.assert_(result.hl7_obx_id == 1,
'hl7_obx_id invalid.\nExpected: '\
'1\nGot: %s' % result.hl7_obx_id)
self.assert_(result.hl7_obr_id == 1,
'hl7_obr_id invalid.\nExpected: '\
'1\nGot: %s' % result.hl7_obr_id)
self.assert_(result.value_type.strip() == 'vt',
'value_type invalid.\nExpected: '\
'vt\nGot: %s' % result.value_type)
self.assert_(result.observation_text == 'observation text',
'observation_text invalid.\nExpected: '\
'observation text\nGot: %s' % result.observation_text)
self.assert_(result.observation_result == 'observation result',
'observation_result invalid.\nExpected: '\
'observation result\nGot: %s' % result.observation_result)
self.assert_(result.units == 'units',
'units invalid.\nExpected: units\nGot: %s'
% result.units)
self.assert_(result.result_status == 'result status',
'result_status invalid.\nExpected: result '\
'status\nGot: %s' % result.result_status)
self.assert_(result.observation_datetime == datetime(2001, 1, 1),
'observation_datetime invalid.\nExpected: '\
'2001-01-01 00:00:00\nGot: %s' %
result.observation_datetime)
self.assertEquals(result.performing_lab_code, 'SHMC')
def testObxRelation(self):
"Use sqlalchemy relations for automated obx/obr relations "
# Need an HL7_Msh for foreign key constraint conformance
self.msh = HL7_Msh(hl7_msh_id=1,
message_control_id=u'control_id',
message_type=u'message type',
facility=u'facility',
message_datetime=datetime(2007, 01, 01),
batch_filename=u'183749382629734')
obr = HL7_Obr(loinc_code=u'loinc code',
loinc_text=u'loinc text',
alt_text=u'alt text',
hl7_msh_id=self.msh.hl7_msh_id)
obx = HL7_Obx(value_type='vt',
observation_id=u'observation id',
observation_text=u'observation text',
observation_result=u'observation result',
units=u'units',
result_status=u'result status',
observation_datetime=datetime(2001, 1, 1),
hl7_msh_id=self.msh.hl7_msh_id)
obr.obxes.append(obx)
self.session.add(self.msh)
self.session.commit()
self.session.add(obr)
self.session.commit()
# See if the commit cascaded. If so, the obx will have a
# valid pk and the obr foreign key set.
self.assertEquals(obr.hl7_obr_id, obx.hl7_obr_id)
# Now query for the obr, see if the obx is in tow.
roundTripObr = self.session.query(HL7_Obr).one()
self.assertTrue(roundTripObr.hl7_obr_id > 0)
self.assertEquals(type(roundTripObr.obxes[0]), type(obx))
self.assertEquals(roundTripObr.obxes[0], obx)
def testNte(self):
"""Test HL7_Nte table access """
self.msh = HL7_Msh(hl7_msh_id=1,
message_control_id=u'control_id',
message_type=u'message type',
facility=u'facility',
message_datetime=datetime(2007, 01, 01),
batch_filename=u'183749382629734')
self.session.add(self.msh)
self.session.commit()
obr = HL7_Obr(hl7_obr_id=1,
loinc_code=u'loinc code',
loinc_text=u'loinc text',
alt_text=u'alt text',
hl7_msh_id=1,
status='W',
report_datetime=datetime.now(),
specimen_source='NASAL')
self.session.add(obr)
self.session.commit()
obx = HL7_Obx(hl7_obx_id=1,
hl7_obr_id=1,
value_type='vt',
observation_id=u'observation id',
observation_text=u'observation text',
observation_result=u'observation result',
units=u'units',
result_status=u'result status',
observation_datetime=datetime(2001, 1, 1),
hl7_msh_id=1,
performing_lab_code=u'SHMC',
sequence=u'1.1',)
self.session.add(obx)
self.session.commit()
note = HL7_Nte(sequence_number=1,
note='fascinating unittest note',
hl7_obx_id=1)
self.session.add(note)
self.session.commit()
query = self.session.query(HL7_Nte)
self.assertEquals(query.count(), 1)
self.assertEquals(query.one().note,
'fascinating unittest note')
self.assertEquals(query.one().sequence_number, 1)
def testSpecimenSource(self):
"""Test HL7_Spm table access """
self.msh = HL7_Msh(hl7_msh_id=1,
message_control_id=u'control_id',
message_type=u'message type',
facility=u'facility',
message_datetime=datetime(2007, 01, 01),
batch_filename=u'183749382629734')
self.session.add(self.msh)
self.session.commit()
obr = HL7_Obr(hl7_obr_id=1,
loinc_code=u'loinc code',
loinc_text=u'loinc text',
alt_text=u'alt text',
hl7_msh_id=1,
status='W',
report_datetime=datetime.now(),
specimen_source='NASAL')
self.session.add(obr)
self.session.commit()
spm = HL7_Spm(id='123', description="your belly",
code='bly', hl7_obr_id=1)
self.session.add(spm)
self.session.commit()
query = self.session.query(HL7_Spm)
self.assertEquals(query.count(), 1)
self.assertEquals(query.one().description, 'your belly')
self.assertEquals(query.one().code, 'bly')
if '__main__' == __name__: # pragma: no cover
unittest.main()
|
pbugni/pheme.warehouse
|
pheme/warehouse/tests/test_warehousedb.py
|
Python
|
bsd-3-clause
| 19,638
|
[
"VisIt"
] |
eb615f33f249fdab9124e42d62744290ba1252e56ddfb11fa7f6658c93db9c2d
|
import math
import numpy
from chainer import cuda, Function
def _as_mat(x):
return x.reshape(x.shape[0], x.size / x.shape[0])
class Linear(Function):
"""Implementation of a linear function (a.k.a. fully-connected layer or affine
transformation).
This function holds a weight matrix ``W`` and a bias vector ``b``.
The weight matrix ``W`` has shape ``(out_size, in_size)``.
This matrix is initialized with i.i.d. Gaussian samples, each of which has zero
mean and deviation :math:`\sqrt{1/\\text{in_size}}`.
The deviation is scaled by factor ``wscale`` if specified.
The bias vector ``b`` is of size ``out_size``.
Each element is initialized with the ``bias`` value.
If ``nobias`` argument is set to True, then this function does not hold a
bias vector.
Let :math:`X` be an input matrix, and :math:`W, b` the weight matrix and
the bias vector, respectively.
Then, the output matrix :math:`Y` is computed by :math:`Y = XW^\\top + b`,
where the addition by :math:`b` is broadcasted across the minibatch.
Args:
in_size (int): Dimension of input vectors.
out_size (int): Dimension of output vectors.
wscale (float): Scaling factor of the weight matrix.
bias (float): Initial bias value.
nobias (bool): If True, then this function does not use the bias.
.. note::
This function accepts an input variable of a non-matrix array.
In this case, the leading dimension is treated as the batch dimension,
and the other dimensions are reduced to one dimension.
"""
def __init__(self, in_size, out_size, wscale=1, bias=0, nobias=False):
self.W = numpy.random.normal(
0, wscale * math.sqrt(1. / in_size),
(out_size, in_size)).astype(numpy.float32)
self.gW = numpy.empty_like(self.W)
if nobias:
self.b = None
self.gb = None
else:
self.b = numpy.repeat(numpy.float32(bias), out_size)
self.gb = numpy.empty_like(self.b)
@property
def parameter_names(self):
if self.b is None:
return 'W',
return 'W', 'b'
@property
def gradient_names(self):
if self.gb is None:
return 'gW',
return 'gW', 'gb'
def forward_cpu(self, x):
x = _as_mat(x[0])
Wx = x.dot(self.W.T)
if self.b is not None:
Wx += self.b
return Wx,
def forward_gpu(self, x):
x = _as_mat(x[0])
y = cuda.empty((x.shape[0], self.W.shape[0]), dtype=x.dtype)
with cuda.using_cumisc():
cuda.culinalg.dot(x, self.W, transb='T', out=y)
if self.b is not None:
cuda.elementwise(
'float* y, float* b, int n_channel',
'y[i] += b[i % n_channel]',
'linear_bias')(y, self.b, self.b.size)
return y,
def backward_cpu(self, x, gy):
_x = _as_mat(x[0])
self.gW += gy[0].T.dot(_x)
if self.gb is not None:
self.gb += gy[0].sum(0)
return gy[0].dot(self.W).reshape(x[0].shape),
def backward_gpu(self, x, gy):
_x = _as_mat(x[0])
gx = cuda.empty_like(_x)
with cuda.using_cumisc():
cuda.culinalg.add_dot(gy[0], _x, self.gW, transa='T')
if self.gb is not None:
self.gb += cuda.cumisc.sum(gy[0], 0)
cuda.culinalg.dot(gy[0], self.W, out=gx)
return gx.reshape(x[0].shape),
|
nushio3/chainer
|
chainer/functions/linear.py
|
Python
|
mit
| 3,511
|
[
"Gaussian"
] |
995b88959edaf602a5a0c1e45b3207d39198bbc50888435685d6d8696a31a383
|
"""
Views for the verification flow
"""
import datetime
import decimal
import json
import logging
import urllib
import analytics
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from django.views.generic.base import View
from edx_rest_api_client.exceptions import SlumberBaseException
from eventtracking import tracker
from ipware.ip import get_ip
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from pytz import UTC
from course_modes.models import CourseMode
from edxmako.shortcuts import render_to_response, render_to_string
from lms.djangoapps.commerce.utils import EcommerceService, is_account_activation_requirement_disabled
from lms.djangoapps.verify_student.image import InvalidImageData, decode_image_data
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification, VerificationDeadline
from lms.djangoapps.verify_student.ssencrypt import has_valid_signature
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.djangoapps.embargo import api as embargo_api
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.user_api.accounts import NAME_MIN_LENGTH
from openedx.core.djangoapps.user_api.accounts.api import update_account_settings
from openedx.core.djangoapps.user_api.errors import AccountValidationError, UserNotFound
from openedx.core.lib.log_utils import audit_log
from shoppingcart.models import CertificateItem, Order
from shoppingcart.processors import get_purchase_endpoint, get_signed_purchase_params
from student.models import CourseEnrollment
from util.db import outer_atomic
from util.json_request import JsonResponse
from xmodule.modulestore.django import modulestore
log = logging.getLogger(__name__)
class PayAndVerifyView(View):
"""
View for the "verify and pay" flow.
This view is somewhat complicated, because the user
can enter it from a number of different places:
* From the "choose your track" page.
* After completing payment.
* From the dashboard in order to complete verification.
* From the dashboard in order to upgrade to a verified track.
The page will display different steps and requirements
depending on:
* Whether the user has submitted a photo verification recently.
* Whether the user has paid for the course.
* How the user reached the page (mostly affects messaging)
We are also super-paranoid about how users reach this page.
If they somehow aren't enrolled, or the course doesn't exist,
or they've unenrolled, or they've already paid/verified,
... then we try to redirect them to the page with the
most appropriate messaging (including the dashboard).
Note that this page does NOT handle re-verification
(photo verification that was denied or had an error);
that is handled by the "reverify" view.
"""
# Step definitions
#
# These represent the numbered steps a user sees in
# the verify / payment flow.
#
# Steps can either be:
# - displayed or hidden
# - complete or incomplete
#
# For example, when a user enters the verification/payment
# flow for the first time, the user will see steps
# for both payment and verification. As the user
# completes these steps (for example, submitting a photo)
# the steps will be marked "complete".
#
# If a user has already verified for another course,
# then the verification steps will be hidden,
# since the user has already completed them.
#
# If a user re-enters the flow from another application
# (for example, after completing payment through
# a third-party payment processor), then the user
# will resume the flow at an intermediate step.
#
INTRO_STEP = 'intro-step'
MAKE_PAYMENT_STEP = 'make-payment-step'
PAYMENT_CONFIRMATION_STEP = 'payment-confirmation-step'
FACE_PHOTO_STEP = 'face-photo-step'
ID_PHOTO_STEP = 'id-photo-step'
REVIEW_PHOTOS_STEP = 'review-photos-step'
ENROLLMENT_CONFIRMATION_STEP = 'enrollment-confirmation-step'
ALL_STEPS = [
INTRO_STEP,
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP,
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
PAYMENT_STEPS = [
MAKE_PAYMENT_STEP,
PAYMENT_CONFIRMATION_STEP
]
VERIFICATION_STEPS = [
FACE_PHOTO_STEP,
ID_PHOTO_STEP,
REVIEW_PHOTOS_STEP,
ENROLLMENT_CONFIRMATION_STEP
]
# These steps can be skipped using the ?skip-first-step GET param
SKIP_STEPS = [
INTRO_STEP,
]
STEP_TITLES = {
INTRO_STEP: ugettext_lazy("Intro"),
MAKE_PAYMENT_STEP: ugettext_lazy("Make payment"),
PAYMENT_CONFIRMATION_STEP: ugettext_lazy("Payment confirmation"),
FACE_PHOTO_STEP: ugettext_lazy("Take photo"),
ID_PHOTO_STEP: ugettext_lazy("Take a photo of your ID"),
REVIEW_PHOTOS_STEP: ugettext_lazy("Review your info"),
ENROLLMENT_CONFIRMATION_STEP: ugettext_lazy("Enrollment confirmation"),
}
# Messages
#
# Depending on how the user entered reached the page,
# we will display different text messaging.
# For example, we show users who are upgrading
# slightly different copy than users who are verifying
# for the first time.
#
FIRST_TIME_VERIFY_MSG = 'first-time-verify'
VERIFY_NOW_MSG = 'verify-now'
VERIFY_LATER_MSG = 'verify-later'
UPGRADE_MSG = 'upgrade'
PAYMENT_CONFIRMATION_MSG = 'payment-confirmation'
# Requirements
#
# These explain to the user what he or she
# will need to successfully pay and/or verify.
#
# These are determined by the steps displayed
# to the user; for example, if the user does not
# need to complete the verification steps,
# then the photo ID and webcam requirements are hidden.
#
ACCOUNT_ACTIVATION_REQ = "account-activation-required"
PHOTO_ID_REQ = "photo-id-required"
WEBCAM_REQ = "webcam-required"
STEP_REQUIREMENTS = {
ID_PHOTO_STEP: [PHOTO_ID_REQ, WEBCAM_REQ],
FACE_PHOTO_STEP: [WEBCAM_REQ],
}
# Deadline types
VERIFICATION_DEADLINE = "verification"
UPGRADE_DEADLINE = "upgrade"
def _get_user_active_status(self, user):
"""
Returns the user's active status to the caller
Overrides the actual value if account activation has been disabled via waffle switch
Arguments:
user (User): Current user involved in the onboarding/verification flow
"""
return user.is_active or is_account_activation_requirement_disabled()
@method_decorator(login_required)
def get(
self, request, course_id,
always_show_payment=False,
current_step=None,
message=FIRST_TIME_VERIFY_MSG
):
"""
Render the payment and verification flow.
Arguments:
request (HttpRequest): The request object.
course_id (unicode): The ID of the course the user is trying
to enroll in.
Keyword Arguments:
always_show_payment (bool): If True, show the payment steps
even if the user has already paid. This is useful
for users returning to the flow after paying.
current_step (string): The current step in the flow.
message (string): The messaging to display.
Returns:
HttpResponse
Raises:
Http404: The course does not exist or does not
have a verified mode.
"""
# Parse the course key
# The URL regex should guarantee that the key format is valid.
course_key = CourseKey.from_string(course_id)
course = modulestore().get_course(course_key)
# Verify that the course exists
if course is None:
log.warn(u"Could not find course with ID %s.", course_id)
raise Http404
# Check whether the user has access to this course
# based on country access rules.
redirect_url = embargo_api.redirect_if_blocked(
course_key,
user=request.user,
ip_address=get_ip(request),
url=request.path
)
if redirect_url:
return redirect(redirect_url)
# If the verification deadline has passed
# then show the user a message that he/she can't verify.
#
# We're making the assumptions (enforced in Django admin) that:
#
# 1) Only verified modes have verification deadlines.
#
# 2) If set, verification deadlines are always AFTER upgrade deadlines, because why would you
# let someone upgrade into a verified track if they can't complete verification?
#
verification_deadline = VerificationDeadline.deadline_for_course(course.id)
response = self._response_if_deadline_passed(course, self.VERIFICATION_DEADLINE, verification_deadline)
if response is not None:
log.info(u"Verification deadline for '%s' has passed.", course.id)
return response
# Retrieve the relevant course mode for the payment/verification flow.
#
# WARNING: this is technical debt! A much better way to do this would be to
# separate out the payment flow and use the product SKU to figure out what
# the user is trying to purchase.
#
# Nonetheless, for the time being we continue to make the really ugly assumption
# that at some point there was a paid course mode we can query for the price.
relevant_course_mode = self._get_paid_mode(course_key)
# If we can find a relevant course mode, then log that we're entering the flow
# Otherwise, this course does not support payment/verification, so respond with a 404.
if relevant_course_mode is not None:
if CourseMode.is_verified_mode(relevant_course_mode):
log.info(
u"Entering payment and verification flow for user '%s', course '%s', with current step '%s'.",
request.user.id, course_id, current_step
)
else:
log.info(
u"Entering payment flow for user '%s', course '%s', with current step '%s'",
request.user.id, course_id, current_step
)
else:
# Otherwise, there has never been a verified/paid mode,
# so return a page not found response.
log.warn(
u"No paid/verified course mode found for course '%s' for verification/payment flow request",
course_id
)
raise Http404
# If the user is trying to *pay* and the upgrade deadline has passed,
# then they shouldn't be able to enter the flow.
#
# NOTE: This should match the availability dates used by the E-Commerce service
# to determine whether a user can purchase a product. The idea is that if the service
# won't fulfill the order, we shouldn't even let the user get into the payment flow.
#
user_is_trying_to_pay = message in [self.FIRST_TIME_VERIFY_MSG, self.UPGRADE_MSG]
if user_is_trying_to_pay:
upgrade_deadline = relevant_course_mode.expiration_datetime
response = self._response_if_deadline_passed(course, self.UPGRADE_DEADLINE, upgrade_deadline)
if response is not None:
log.info(u"Upgrade deadline for '%s' has passed.", course.id)
return response
# Check whether the user has verified, paid, and enrolled.
# A user is considered "paid" if he or she has an enrollment
# with a paid course mode (such as "verified").
# For this reason, every paid user is enrolled, but not
# every enrolled user is paid.
# If the course mode is not verified(i.e only paid) then already_verified is always True
already_verified = (
self._check_already_verified(request.user)
if CourseMode.is_verified_mode(relevant_course_mode)
else True
)
already_paid, is_enrolled = self._check_enrollment(request.user, course_key)
# Redirect the user to a more appropriate page if the
# messaging won't make sense based on the user's
# enrollment / payment / verification status.
sku_to_use = relevant_course_mode.sku
purchase_workflow = request.GET.get('purchase_workflow', 'single')
if purchase_workflow == 'bulk' and relevant_course_mode.bulk_sku:
sku_to_use = relevant_course_mode.bulk_sku
redirect_response = self._redirect_if_necessary(
message,
already_verified,
already_paid,
is_enrolled,
course_key,
user_is_trying_to_pay,
request.user,
sku_to_use
)
if redirect_response is not None:
return redirect_response
display_steps = self._display_steps(
always_show_payment,
already_verified,
already_paid,
relevant_course_mode
)
# Override the actual value if account activation has been disabled
# Also see the reference to this parameter in context dictionary further down
user_is_active = self._get_user_active_status(request.user)
requirements = self._requirements(display_steps, user_is_active)
if current_step is None:
current_step = display_steps[0]['name']
# Allow the caller to skip the first page
# This is useful if we want the user to be able to
# use the "back" button to return to the previous step.
# This parameter should only work for known skip-able steps
if request.GET.get('skip-first-step') and current_step in self.SKIP_STEPS:
display_step_names = [step['name'] for step in display_steps]
current_step_idx = display_step_names.index(current_step)
if (current_step_idx + 1) < len(display_steps):
current_step = display_steps[current_step_idx + 1]['name']
courseware_url = ""
if not course.start or course.start < datetime.datetime.today().replace(tzinfo=UTC):
courseware_url = reverse(
'course_root',
kwargs={'course_id': unicode(course_key)}
)
full_name = (
request.user.profile.name
if request.user.profile.name
else ""
)
# If the user set a contribution amount on another page,
# use that amount to pre-fill the price selection form.
contribution_amount = request.session.get(
'donation_for_course', {}
).get(unicode(course_key), '')
# Remember whether the user is upgrading
# so we can fire an analytics event upon payment.
request.session['attempting_upgrade'] = (message == self.UPGRADE_MSG)
# Determine the photo verification status
verification_good_until = self._verification_valid_until(request.user)
# get available payment processors
if relevant_course_mode.sku:
# transaction will be conducted via ecommerce service
processors = ecommerce_api_client(request.user).payment.processors.get()
else:
# transaction will be conducted using legacy shopping cart
processors = [settings.CC_PROCESSOR_NAME]
# Render the top-level page
context = {
'contribution_amount': contribution_amount,
'course': course,
'course_key': unicode(course_key),
'checkpoint_location': request.GET.get('checkpoint'),
'course_mode': relevant_course_mode,
'courseware_url': courseware_url,
'current_step': current_step,
'disable_courseware_js': True,
'display_steps': display_steps,
'is_active': json.dumps(user_is_active),
'user_email': request.user.email,
'message_key': message,
'platform_name': configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
'processors': processors,
'requirements': requirements,
'user_full_name': full_name,
'verification_deadline': verification_deadline or "",
'already_verified': already_verified,
'verification_good_until': verification_good_until,
'capture_sound': staticfiles_storage.url("audio/camera_capture.wav"),
'nav_hidden': True,
'is_ab_testing': 'begin-flow' in request.path,
}
return render_to_response("verify_student/pay_and_verify.html", context)
def add_utm_params_to_url(self, url):
# utm_params is [(u'utm_content', u'course-v1:IDBx IDB20.1x 1T2017'),...
utm_params = [item for item in self.request.GET.items() if 'utm_' in item[0]]
# utm_params is utm_content=course-v1%3AIDBx+IDB20.1x+1T2017&...
utm_params = urllib.urlencode(utm_params, True)
# utm_params is utm_content=course-v1:IDBx+IDB20.1x+1T2017&...
# (course-keys do not have url encoding)
utm_params = urllib.unquote(utm_params)
if utm_params:
if '?' in url:
url = url + '&' + utm_params
else:
url = url + '?' + utm_params
return url
def _redirect_if_necessary(
self, message, already_verified, already_paid, is_enrolled, course_key, # pylint: disable=bad-continuation
user_is_trying_to_pay, user, sku # pylint: disable=bad-continuation
):
"""Redirect the user to a more appropriate page if necessary.
In some cases, a user may visit this page with
verification / enrollment / payment state that
we don't anticipate. For example, a user may unenroll
from the course after paying for it, then visit the
"verify now" page to complete verification.
When this happens, we try to redirect the user to
the most appropriate page.
Arguments:
message (string): The messaging of the page. Should be a key
in `MESSAGES`.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
is_enrolled (bool): Whether the user has an active enrollment
in the course.
course_key (CourseKey): The key for the course.
Returns:
HttpResponse or None
"""
url = None
course_kwargs = {'course_id': unicode(course_key)}
if already_verified and already_paid:
# If they've already paid and verified, there's nothing else to do,
# so redirect them to the dashboard.
if message != self.PAYMENT_CONFIRMATION_MSG:
url = reverse('dashboard')
elif message in [self.VERIFY_NOW_MSG, self.VERIFY_LATER_MSG, self.PAYMENT_CONFIRMATION_MSG]:
if is_enrolled:
# If the user is already enrolled but hasn't yet paid,
# then the "upgrade" messaging is more appropriate.
if not already_paid:
url = reverse('verify_student_upgrade_and_verify', kwargs=course_kwargs)
else:
# If the user is NOT enrolled, then send him/her
# to the first time verification page.
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
elif message == self.UPGRADE_MSG:
if is_enrolled:
if already_paid:
# If the student has paid, but not verified, redirect to the verification flow.
url = reverse('verify_student_verify_now', kwargs=course_kwargs)
else:
url = reverse('verify_student_start_flow', kwargs=course_kwargs)
if user_is_trying_to_pay and self._get_user_active_status(user) and not already_paid:
# If the user is trying to pay, has activated their account, and the ecommerce service
# is enabled redirect him to the ecommerce checkout page.
ecommerce_service = EcommerceService()
if ecommerce_service.is_enabled(user):
url = ecommerce_service.get_checkout_page_url(sku)
# Redirect if necessary, otherwise implicitly return None
if url is not None:
url = self.add_utm_params_to_url(url)
return redirect(url)
def _get_paid_mode(self, course_key):
"""
Retrieve the paid course mode for a course.
The returned course mode may or may not be expired.
Unexpired modes are preferred to expired modes.
Arguments:
course_key (CourseKey): The location of the course.
Returns:
CourseMode tuple
"""
# Retrieve all the modes at once to reduce the number of database queries
all_modes, unexpired_modes = CourseMode.all_and_unexpired_modes_for_courses([course_key])
# Retrieve the first mode that matches the following criteria:
# * Unexpired
# * Price > 0
# * Not credit
for mode in unexpired_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, find the first non credit expired paid mode
for mode in all_modes[course_key]:
if mode.min_price > 0 and not CourseMode.is_credit_mode(mode):
return mode
# Otherwise, return None and so the view knows to respond with a 404.
return None
def _display_steps(self, always_show_payment, already_verified, already_paid, course_mode):
"""Determine which steps to display to the user.
Includes all steps by default, but removes steps
if the user has already completed them.
Arguments:
always_show_payment (bool): If True, display the payment steps
even if the user has already paid.
already_verified (bool): Whether the user has submitted
a verification request recently.
already_paid (bool): Whether the user is enrolled in a paid
course mode.
Returns:
list
"""
display_steps = self.ALL_STEPS
remove_steps = set()
if already_verified or not CourseMode.is_verified_mode(course_mode):
remove_steps |= set(self.VERIFICATION_STEPS)
if already_paid and not always_show_payment:
remove_steps |= set(self.PAYMENT_STEPS)
else:
# The "make payment" step doubles as an intro step,
# so if we're showing the payment step, hide the intro step.
remove_steps |= set([self.INTRO_STEP])
return [
{
'name': step,
'title': unicode(self.STEP_TITLES[step]),
}
for step in display_steps
if step not in remove_steps
]
def _requirements(self, display_steps, is_active):
"""Determine which requirements to show the user.
For example, if the user needs to submit a photo
verification, tell the user that she will need
a photo ID and a webcam.
Arguments:
display_steps (list): The steps to display to the user.
is_active (bool): If False, adds a requirement to activate the user account.
Returns:
dict: Keys are requirement names, values are booleans
indicating whether to show the requirement.
"""
all_requirements = {
self.ACCOUNT_ACTIVATION_REQ: not is_active,
self.PHOTO_ID_REQ: False,
self.WEBCAM_REQ: False,
}
# Remove the account activation requirement if disabled via waffle
if is_account_activation_requirement_disabled():
all_requirements.pop(self.ACCOUNT_ACTIVATION_REQ)
display_steps = set(step['name'] for step in display_steps)
for step, step_requirements in self.STEP_REQUIREMENTS.iteritems():
if step in display_steps:
for requirement in step_requirements:
all_requirements[requirement] = True
return all_requirements
def _verification_valid_until(self, user, date_format="%m/%d/%Y"):
"""
Check whether the user has a valid or pending verification.
Arguments:
user:
date_format: optional parameter for formatting datetime
object to string in response
Returns:
datetime object in string format
"""
photo_verifications = SoftwareSecurePhotoVerification.verification_valid_or_pending(user)
# return 'expiration_datetime' of latest photo verification if found,
# otherwise implicitly return ''
if photo_verifications:
return photo_verifications[0].expiration_datetime.strftime(date_format)
return ''
def _check_already_verified(self, user):
"""Check whether the user has a valid or pending verification.
Note that this includes cases in which the user's verification
has not been accepted (either because it hasn't been processed,
or there was an error).
This should return True if the user has done their part:
submitted photos within the expiration period.
"""
return SoftwareSecurePhotoVerification.user_has_valid_or_pending(user)
def _check_enrollment(self, user, course_key):
"""Check whether the user has an active enrollment and has paid.
If a user is enrolled in a paid course mode, we assume
that the user has paid.
Arguments:
user (User): The user to check.
course_key (CourseKey): The key of the course to check.
Returns:
Tuple `(has_paid, is_active)` indicating whether the user
has paid and whether the user has an active account.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course_key)
has_paid = False
if enrollment_mode is not None and is_active:
all_modes = CourseMode.modes_for_course_dict(course_key, include_expired=True)
course_mode = all_modes.get(enrollment_mode)
has_paid = (course_mode and course_mode.min_price > 0)
return (has_paid, bool(is_active))
def _response_if_deadline_passed(self, course, deadline_name, deadline_datetime):
"""
Respond with some error messaging if the deadline has passed.
Arguments:
course (Course): The course the user is trying to enroll in.
deadline_name (str): One of the deadline constants.
deadline_datetime (datetime): The deadline.
Returns: HttpResponse or None
"""
if deadline_name not in [self.VERIFICATION_DEADLINE, self.UPGRADE_DEADLINE]:
log.error("Invalid deadline name %s. Skipping check for whether the deadline passed.", deadline_name)
return None
deadline_passed = (
deadline_datetime is not None and
deadline_datetime < datetime.datetime.now(UTC)
)
if deadline_passed:
context = {
'course': course,
'deadline_name': deadline_name,
'deadline': deadline_datetime
}
return render_to_response("verify_student/missed_deadline.html", context)
def checkout_with_ecommerce_service(user, course_key, course_mode, processor):
""" Create a new basket and trigger immediate checkout, using the E-Commerce API. """
course_id = unicode(course_key)
try:
api = ecommerce_api_client(user)
# Make an API call to create the order and retrieve the results
result = api.baskets.post({
'products': [{'sku': course_mode.sku}],
'checkout': True,
'payment_processor_name': processor
})
# Pass the payment parameters directly from the API response.
return result.get('payment_data')
except SlumberBaseException:
params = {'username': user.username, 'mode': course_mode.slug, 'course_id': course_id}
log.exception('Failed to create order for %(username)s %(mode)s mode of %(course_id)s', params)
raise
finally:
audit_log(
'checkout_requested',
course_id=course_id,
mode=course_mode.slug,
processor_name=processor,
user_id=user.id
)
def checkout_with_shoppingcart(request, user, course_key, course_mode, amount):
""" Create an order and trigger checkout using shoppingcart."""
cart = Order.get_cart_for_user(user)
cart.clear()
enrollment_mode = course_mode.slug
CertificateItem.add_to_order(cart, course_key, amount, enrollment_mode)
# Change the order's status so that we don't accidentally modify it later.
# We need to do this to ensure that the parameters we send to the payment system
# match what we store in the database.
# (Ordinarily we would do this client-side when the user submits the form, but since
# the JavaScript on this page does that immediately, we make the change here instead.
# This avoids a second AJAX call and some additional complication of the JavaScript.)
# If a user later re-enters the verification / payment flow, she will create a new order.
cart.start_purchase()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
payment_data = {
'payment_processor_name': settings.CC_PROCESSOR_NAME,
'payment_page_url': get_purchase_endpoint(),
'payment_form_data': get_signed_purchase_params(
cart,
callback_url=callback_url,
extra_data=[unicode(course_key), course_mode.slug]
),
}
return payment_data
@require_POST
@login_required
def create_order(request):
"""
This endpoint is named 'create_order' for backward compatibility, but its
actual use is to add a single product to the user's cart and request
immediate checkout.
"""
course_id = request.POST['course_id']
course_id = CourseKey.from_string(course_id)
donation_for_course = request.session.get('donation_for_course', {})
contribution = request.POST.get("contribution", donation_for_course.get(unicode(course_id), 0))
try:
amount = decimal.Decimal(contribution).quantize(decimal.Decimal('.01'), rounding=decimal.ROUND_DOWN)
except decimal.InvalidOperation:
return HttpResponseBadRequest(_("Selected price is not valid number."))
current_mode = None
sku = request.POST.get('sku', None)
if sku:
try:
current_mode = CourseMode.objects.get(sku=sku)
except CourseMode.DoesNotExist:
log.exception(u'Failed to find CourseMode with SKU [%s].', sku)
if not current_mode:
# Check if there are more than 1 paid(mode with min_price>0 e.g verified/professional/no-id-professional) modes
# for course exist then choose the first one
paid_modes = CourseMode.paid_modes_for_course(course_id)
if paid_modes:
if len(paid_modes) > 1:
log.warn(u"Multiple paid course modes found for course '%s' for create order request", course_id)
current_mode = paid_modes[0]
# Make sure this course has a paid mode
if not current_mode:
log.warn(u"Create order requested for course '%s' without a paid mode.", course_id)
return HttpResponseBadRequest(_("This course doesn't support paid certificates"))
if CourseMode.is_professional_mode(current_mode):
amount = current_mode.min_price
if amount < current_mode.min_price:
return HttpResponseBadRequest(_("No selected price or selected price is below minimum."))
if current_mode.sku:
# if request.POST doesn't contain 'processor' then the service's default payment processor will be used.
payment_data = checkout_with_ecommerce_service(
request.user,
course_id,
current_mode,
request.POST.get('processor')
)
else:
payment_data = checkout_with_shoppingcart(request, request.user, course_id, current_mode, amount)
if 'processor' not in request.POST:
# (XCOM-214) To be removed after release.
# the absence of this key in the POST payload indicates that the request was initiated from
# a stale js client, which expects a response containing only the 'payment_form_data' part of
# the payment data result.
payment_data = payment_data['payment_form_data']
return HttpResponse(json.dumps(payment_data), content_type="application/json")
class SubmitPhotosView(View):
"""
End-point for submitting photos for verification.
"""
@method_decorator(transaction.non_atomic_requests)
def dispatch(self, *args, **kwargs): # pylint: disable=missing-docstring
return super(SubmitPhotosView, self).dispatch(*args, **kwargs)
@method_decorator(login_required)
@method_decorator(outer_atomic(read_committed=True))
def post(self, request):
"""
Submit photos for verification.
This end-point is used for the following cases:
* Initial verification through the pay-and-verify flow.
* Initial verification initiated from a checkpoint within a course.
* Re-verification initiated from a checkpoint within a course.
POST Parameters:
face_image (str): base64-encoded image data of the user's face.
photo_id_image (str): base64-encoded image data of the user's photo ID.
full_name (str): The user's full name, if the user is requesting a name change as well.
course_key (str): Identifier for the course, if initiated from a checkpoint.
checkpoint (str): Location of the checkpoint in the course.
"""
# If the user already has an initial verification attempt, we can re-use the photo ID
# the user submitted with the initial attempt.
initial_verification = SoftwareSecurePhotoVerification.get_initial_verification(request.user)
# Validate the POST parameters
params, response = self._validate_parameters(request, bool(initial_verification))
if response is not None:
return response
# If necessary, update the user's full name
if "full_name" in params:
response = self._update_full_name(request.user, params["full_name"])
if response is not None:
return response
# Retrieve the image data
# Validation ensures that we'll have a face image, but we may not have
# a photo ID image if this is a reverification.
face_image, photo_id_image, response = self._decode_image_data(
params["face_image"], params.get("photo_id_image")
)
# If we have a photo_id we do not want use the initial verification image.
if photo_id_image is not None:
initial_verification = None
if response is not None:
return response
# Submit the attempt
attempt = self._submit_attempt(request.user, face_image, photo_id_image, initial_verification)
self._fire_event(request.user, "edx.bi.verify.submitted", {"category": "verification"})
self._send_confirmation_email(request.user)
return JsonResponse({})
def _validate_parameters(self, request, has_initial_verification):
"""
Check that the POST parameters are valid.
Arguments:
request (HttpRequest): The request object.
has_initial_verification (bool): Whether the user has an initial verification attempt.
Returns:
HttpResponse or None
"""
# Pull out the parameters we care about.
params = {
param_name: request.POST[param_name]
for param_name in [
"face_image",
"photo_id_image",
"course_key",
"full_name"
]
if param_name in request.POST
}
# If the user already has an initial verification attempt, then we don't
# require the user to submit a photo ID image, since we can re-use the photo ID
# image from the initial attempt.
# If we don't have an initial verification OR a photo ID image, something has gone
# terribly wrong in the JavaScript. Log this as an error so we can track it down.
if "photo_id_image" not in params and not has_initial_verification:
log.error(
(
"User %s does not have an initial verification attempt "
"and no photo ID image data was provided. "
"This most likely means that the JavaScript client is not "
"correctly constructing the request to submit photos."
), request.user.id
)
return None, HttpResponseBadRequest(
_("Photo ID image is required if the user does not have an initial verification attempt.")
)
# The face image is always required.
if "face_image" not in params:
msg = _("Missing required parameter face_image")
return None, HttpResponseBadRequest(msg)
# If provided, parse the course key and checkpoint location
if "course_key" in params:
try:
params["course_key"] = CourseKey.from_string(params["course_key"])
except InvalidKeyError:
return None, HttpResponseBadRequest(_("Invalid course key"))
return params, None
def _update_full_name(self, user, full_name):
"""
Update the user's full name.
Arguments:
user (User): The user to update.
full_name (unicode): The user's updated full name.
Returns:
HttpResponse or None
"""
try:
update_account_settings(user, {"name": full_name})
except UserNotFound:
return HttpResponseBadRequest(_("No profile found for user"))
except AccountValidationError:
msg = _(
"Name must be at least {min_length} characters long."
).format(min_length=NAME_MIN_LENGTH)
return HttpResponseBadRequest(msg)
def _decode_image_data(self, face_data, photo_id_data=None):
"""
Decode image data sent with the request.
Arguments:
face_data (str): base64-encoded face image data.
Keyword Arguments:
photo_id_data (str): base64-encoded photo ID image data.
Returns:
tuple of (str, str, HttpResponse)
"""
try:
# Decode face image data (used for both an initial and re-verification)
face_image = decode_image_data(face_data)
# Decode the photo ID image data if it's provided
photo_id_image = (
decode_image_data(photo_id_data)
if photo_id_data is not None else None
)
return face_image, photo_id_image, None
except InvalidImageData:
msg = _("Image data is not valid.")
return None, None, HttpResponseBadRequest(msg)
def _submit_attempt(self, user, face_image, photo_id_image=None, initial_verification=None):
"""
Submit a verification attempt.
Arguments:
user (User): The user making the attempt.
face_image (str): Decoded face image data.
Keyword Arguments:
photo_id_image (str or None): Decoded photo ID image data.
initial_verification (SoftwareSecurePhotoVerification): The initial verification attempt.
"""
attempt = SoftwareSecurePhotoVerification(user=user)
# We will always have face image data, so upload the face image
attempt.upload_face_image(face_image)
# If an ID photo wasn't submitted, re-use the ID photo from the initial attempt.
# Earlier validation rules ensure that at least one of these is available.
if photo_id_image is not None:
attempt.upload_photo_id_image(photo_id_image)
elif initial_verification is None:
# Earlier validation should ensure that we never get here.
log.error(
"Neither a photo ID image or initial verification attempt provided. "
"Parameter validation in the view should prevent this from happening!"
)
# Submit the attempt
attempt.mark_ready()
attempt.submit(copy_id_photo_from=initial_verification)
return attempt
def _send_confirmation_email(self, user):
"""
Send an email confirming that the user submitted photos
for initial verification.
"""
context = {
'full_name': user.profile.name,
'platform_name': configuration_helpers.get_value("PLATFORM_NAME", settings.PLATFORM_NAME)
}
subject = _("Verification photos received")
message = render_to_string('emails/photo_submission_confirmation.txt', context)
from_address = configuration_helpers.get_value('email_from_address', settings.DEFAULT_FROM_EMAIL)
to_address = user.email
try:
send_mail(subject, message, from_address, [to_address], fail_silently=False)
except: # pylint: disable=bare-except
# We catch all exceptions and log them.
# It would be much, much worse to roll back the transaction due to an uncaught
# exception than to skip sending the notification email.
log.exception("Could not send notification email for initial verification for user %s", user.id)
def _fire_event(self, user, event_name, parameters):
"""
Fire an analytics event.
Arguments:
user (User): The user who submitted photos.
event_name (str): Name of the analytics event.
parameters (dict): Event parameters.
Returns: None
"""
if settings.LMS_SEGMENT_KEY:
tracking_context = tracker.get_tracker().resolve_context()
context = {
'ip': tracking_context.get('ip'),
'Google Analytics': {
'clientId': tracking_context.get('client_id')
}
}
analytics.track(user.id, event_name, parameters, context=context)
@require_POST
@csrf_exempt # SS does its own message signing, and their API won't have a cookie value
def results_callback(request):
"""
Software Secure will call this callback to tell us whether a user is
verified to be who they said they are.
"""
body = request.body
try:
body_dict = json.loads(body)
except ValueError:
log.exception("Invalid JSON received from Software Secure:\n\n{}\n".format(body))
return HttpResponseBadRequest("Invalid JSON. Received:\n\n{}".format(body))
if not isinstance(body_dict, dict):
log.error("Reply from Software Secure is not a dict:\n\n{}\n".format(body))
return HttpResponseBadRequest("JSON should be dict. Received:\n\n{}".format(body))
headers = {
"Authorization": request.META.get("HTTP_AUTHORIZATION", ""),
"Date": request.META.get("HTTP_DATE", "")
}
has_valid_signature(
"POST",
headers,
body_dict,
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
)
_response, access_key_and_sig = headers["Authorization"].split(" ")
access_key = access_key_and_sig.split(":")[0]
# This is what we should be doing...
#if not sig_valid:
# return HttpResponseBadRequest("Signature is invalid")
# This is what we're doing until we can figure out why we disagree on sigs
if access_key != settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]:
return HttpResponseBadRequest("Access key invalid")
receipt_id = body_dict.get("EdX-ID")
result = body_dict.get("Result")
reason = body_dict.get("Reason", "")
error_code = body_dict.get("MessageType", "")
try:
attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=receipt_id)
except SoftwareSecurePhotoVerification.DoesNotExist:
log.error("Software Secure posted back for receipt_id %s, but not found", receipt_id)
return HttpResponseBadRequest("edX ID {} not found".format(receipt_id))
if result == "PASS":
log.debug("Approving verification for %s", receipt_id)
attempt.approve()
status = "approved"
elif result == "FAIL":
log.debug("Denying verification for %s", receipt_id)
attempt.deny(json.dumps(reason), error_code=error_code)
status = "denied"
elif result == "SYSTEM FAIL":
log.debug("System failure for %s -- resetting to must_retry", receipt_id)
attempt.system_error(json.dumps(reason), error_code=error_code)
status = "error"
log.error("Software Secure callback attempt for %s failed: %s", receipt_id, reason)
else:
log.error("Software Secure returned unknown result %s", result)
return HttpResponseBadRequest(
"Result {} not understood. Known results: PASS, FAIL, SYSTEM FAIL".format(result)
)
return HttpResponse("OK!")
class ReverifyView(View):
"""
Reverification occurs when a user's initial verification is denied
or expires. When this happens, users can re-submit photos through
the re-verification flow.
Unlike in-course reverification, this flow requires users to submit
*both* face and ID photos. In contrast, during in-course reverification,
students submit only face photos, which are matched against the ID photo
the user submitted during initial verification.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render the reverification flow.
Most of the work is done client-side by composing the same
Backbone views used in the initial verification flow.
"""
status, __ = SoftwareSecurePhotoVerification.user_status(request.user)
expiration_datetime = SoftwareSecurePhotoVerification.get_expiration_datetime(request.user)
can_reverify = False
if expiration_datetime:
if SoftwareSecurePhotoVerification.is_verification_expiring_soon(expiration_datetime):
# The user has an active verification, but the verification
# is set to expire within "EXPIRING_SOON_WINDOW" days (default is 4 weeks).
# In this case user can resubmit photos for reverification.
can_reverify = True
# If the user has no initial verification or if the verification
# process is still ongoing 'pending' or expired then allow the user to
# submit the photo verification.
# A photo verification is marked as 'pending' if its status is either
# 'submitted' or 'must_retry'.
if status in ["none", "must_reverify", "expired", "pending"] or can_reverify:
context = {
"user_full_name": request.user.profile.name,
"platform_name": configuration_helpers.get_value('PLATFORM_NAME', settings.PLATFORM_NAME),
"capture_sound": staticfiles_storage.url("audio/camera_capture.wav"),
}
return render_to_response("verify_student/reverify.html", context)
else:
context = {
"status": status
}
return render_to_response("verify_student/reverify_not_allowed.html", context)
|
lduarte1991/edx-platform
|
lms/djangoapps/verify_student/views.py
|
Python
|
agpl-3.0
| 49,123
|
[
"VisIt"
] |
61289f5d3275bb32549104e51f337e6c6d5d484cb15323f1c0797f87b2b8fc23
|
from polychartQuery.expr import ExprTreeVisitor
QUOTE = "'" # note: double quote does not work in postgres!
def escape(str): return str # TODO: implement
def quote(str): return QUOTE+escape(str)+QUOTE
def unquote(str):
if str[0] == QUOTE and str[-1] == QUOTE:
return str[1:len(str)-1]
return str
class ExprToGA(ExprTreeVisitor):
def __init__(self):
self.fns = {
}
self.binfns = {
'hour': 'date,ga:hour'
, 'day': 'date'
, 'week': 'date,ga:week'
, 'month': 'month,ga:year'
, 'twomonth': 'month,ga:year'
, 'quarter': 'month,ga:year'
, 'sixmonth': 'month,ga:year'
, 'year': 'year'
, 'twoyear': 'year'
, 'fiveyear': 'year'
, 'decade': 'year'
}
def ident(self, name): return name
def const(self, type, value):
if type == 'num':
return value
else:
return quote(value)
def infixop(self, opname, lhs, rhs):
raise Exception("Unsupported operation %s" % opname)
def conditional(self, cond, conseq, altern):
raise Exception("Unsupported operation: conditionals.")
def call(self, fname, args):
if fname == 'bin':
return self.fn_bin(args)
return args[0]
def fn_bin(self, args):
key, bw = args
bw = unquote(bw)
if bw in self.binfns:
return self.binfns[bw]
else:
return key
exprToGAInstance = ExprToGA()
def exprToGA(expr):
str = exprToGAInstance.visit(expr)
if str == 'COUNT(1)':
return 'COUNT()' # which one is faster?
return str
|
Polychart/builder
|
server/polychartQuery/googleAnalytics/expr.py
|
Python
|
agpl-3.0
| 1,518
|
[
"VisIt"
] |
7228f3ba3720ada3a6a51b82f5ab6f393887dedf6ee9b6e4eed17bfcf44fddcd
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Licence: BSD 3 clause
from ..externals import six
from . import _tree
def export_graphviz(decision_tree, out_file="tree.dot", feature_names=None,
max_depth=None):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
feature_names : list of strings, optional (default=None)
Names of each of the features.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def node_to_str(tree, node_id, criterion):
if not isinstance(criterion, six.string_types):
criterion = "impurity"
value = tree.value[node_id]
if tree.n_outputs == 1:
value = value[0, :]
if tree.children_left[node_id] == _tree.TREE_LEAF:
return "%s = %.4f\\nsamples = %s\\nvalue = %s" \
% (criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id],
value)
else:
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X[%s]" % tree.feature[node_id]
return "%s <= %.4f\\n%s = %s\\nsamples = %s" \
% (feature,
tree.threshold[node_id],
criterion,
tree.impurity[node_id],
tree.n_node_samples[node_id])
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
out_file.write('%d [label="%s", shape="box"] ;\n' %
(node_id, node_to_str(tree, node_id, criterion)))
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
out_file.write('%d [label="(...)", shape="box"] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
out_file.write("digraph Tree {\n")
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
out_file.write("}")
finally:
if own_file:
out_file.close()
|
soulmachine/scikit-learn
|
sklearn/tree/export.py
|
Python
|
bsd-3-clause
| 4,427
|
[
"Brian"
] |
85e38714bf5c7d7607601f4912eb0b76047194e092b0fb31208d546a277e2ddf
|
#! PsiAPI pubchem access
import psi4
from psi4.driver.constants.physconst import hartree2ev
psi4.set_output_file("output.dat", False)
benz = psi4.geometry("""
pubchem:benzene
""")
psi4.set_options({"REFERENCE" : "RHF",
"MAX_ENERGY_G_CONVERGENCE" : 8,
"BASIS" : "STO-3G",
"DF_BASIS_SCF" : "CC-PVDZ-RI"})
psi4.optimize('scf')
psi4.set_options({"REFERENCE" : "RHF",
"BASIS" : "CC-PVDZ",
"DF_BASIS_SCF" : "CC-PVDZ-JKFIT"})
e_sing_rhf = psi4.energy('scf')
benz.set_multiplicity(3)
psi4.set_options({"REFERENCE" : "ROHF"})
e_trip_rohf = psi4.energy('scf')
psi4.set_options({"REFERENCE" : "UHF"})
e_trip_uhf = psi4.energy('scf')
vertical_uhf = hartree2ev * (e_trip_uhf - e_sing_rhf)
vertical_rohf = hartree2ev * (e_trip_rohf - e_sing_rhf)
psi4.core.print_out("\nSinglet-Triplet gap (vertical, UHF) = %8.2f eV\n" % vertical_uhf)
psi4.core.print_out("\nSinglet-Triplet gap (vertical, ROHF) = %8.2f eV\n" % vertical_rohf)
enuc = 204.531600152395043 #TEST
erhf = -230.72190557842444 #TEST
psi4.compare_values(enuc, benz.nuclear_repulsion_energy(), 3, "Nuclear repulsion energy") #TEST
psi4.compare_values(erhf, e_sing_rhf, 6, "Singlet benzene RHF energy") #TEST
|
amjames/psi4
|
tests/python/pubchem/input.py
|
Python
|
lgpl-3.0
| 1,465
|
[
"Psi4"
] |
af74b3aeffbe86f16a149741cdba5db2235597ef51d45723076449a73c6ba630
|
"""
Test the about xblock
"""
import datetime
import pytz
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from milestones.tests.utils import MilestonesTestCaseMixin
from mock import patch
from nose.plugins.attrib import attr
from six import text_type
from course_modes.models import CourseMode
from lms.djangoapps.ccx.tests.factories import CcxFactory
from shoppingcart.models import Order, PaidCourseRegistration
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory, CourseEnrollmentAllowedFactory, UserFactory
from track.tests import EventTrackingTestCase
from util.milestones_helpers import get_prerequisite_courses_display, set_prerequisite_courses
from xmodule.course_module import CATALOG_VISIBILITY_ABOUT, CATALOG_VISIBILITY_NONE
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MIXED_MODULESTORE,
TEST_DATA_SPLIT_MODULESTORE,
ModuleStoreTestCase,
SharedModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.utils import TEST_DATA_DIR
from xmodule.modulestore.xml_importer import import_course_from_xml
from .helpers import LoginEnrollmentTestCase
# HTML for registration button
REG_STR = "<form id=\"class_enroll_form\" method=\"post\" data-remote=\"true\" action=\"/change_enrollment\">"
SHIB_ERROR_STR = "The currently logged-in user account does not have permission to enroll in this course."
@attr(shard=1)
class AboutTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase, EventTrackingTestCase, MilestonesTestCaseMixin):
"""
Tests about xblock.
"""
@classmethod
def setUpClass(cls):
super(AboutTestCase, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course_without_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_NONE)
cls.course_with_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_ABOUT)
cls.purchase_course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
cls.about = ItemFactory.create(
category="about", parent_location=cls.course_without_about.location,
data="WITHOUT ABOUT", display_name="overview"
)
cls.about = ItemFactory.create(
category="about", parent_location=cls.course_with_about.location,
data="WITH ABOUT", display_name="overview"
)
def setUp(self):
super(AboutTestCase, self).setUp()
self.course_mode = CourseMode(
course_id=self.purchase_course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10
)
self.course_mode.save()
def test_anonymous_user(self):
"""
This test asserts that a non-logged in user can visit the course about page
"""
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
def test_logged_in(self):
"""
This test asserts that a logged-in user can visit the course about page
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_already_enrolled(self):
"""
Asserts that the end user sees the appropriate messaging
when he/she visits the course about page, but is already enrolled
"""
self.setup_user()
self.enroll(self.course, True)
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are enrolled in this course", resp.content)
self.assertIn("View Course", resp.content)
@override_settings(COURSE_ABOUT_VISIBILITY_PERMISSION="see_about_page")
def test_visible_about_page_settings(self):
"""
Verify that the About Page honors the permission settings in the course module
"""
url = reverse('about_course', args=[text_type(self.course_with_about.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("WITH ABOUT", resp.content)
url = reverse('about_course', args=[text_type(self.course_without_about.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 404)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing(self):
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
# should be redirected
self.assertEqual(resp.status_code, 302)
# follow this time, and check we're redirected to the course home page
resp = self.client.get(url, follow=True)
target_url = resp.redirect_chain[-1][0]
course_home_url = reverse('openedx.course_experience.course_home', args=[text_type(self.course.id)])
self.assertTrue(target_url.endswith(course_home_url))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_pre_requisite_course(self):
pre_requisite_course = CourseFactory.create(org='edX', course='900', display_name='pre requisite course')
course = CourseFactory.create(pre_requisite_courses=[text_type(pre_requisite_course.id)])
self.setup_user()
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[text_type(pre_requisite_courses[0]['key'])])
self.assertIn("<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.strip('\n'))
@patch.dict(settings.FEATURES, {'ENABLE_PREREQUISITE_COURSES': True})
def test_about_page_unfulfilled_prereqs(self):
pre_requisite_course = CourseFactory.create(
org='edX',
course='901',
display_name='pre requisite course',
)
pre_requisite_courses = [text_type(pre_requisite_course.id)]
# for this failure to occur, the enrollment window needs to be in the past
course = CourseFactory.create(
org='edX',
course='1000',
# closed enrollment
enrollment_start=datetime.datetime(2013, 1, 1),
enrollment_end=datetime.datetime(2014, 1, 1),
start=datetime.datetime(2013, 1, 1),
end=datetime.datetime(2030, 1, 1),
pre_requisite_courses=pre_requisite_courses,
)
set_prerequisite_courses(course.id, pre_requisite_courses)
self.setup_user()
self.enroll(self.course, True)
self.enroll(pre_requisite_course, True)
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
pre_requisite_courses = get_prerequisite_courses_display(course)
pre_requisite_course_about_url = reverse('about_course', args=[text_type(pre_requisite_courses[0]['key'])])
self.assertIn("<span class=\"important-dates-item-text pre-requisite\"><a href=\"{}\">{}</a></span>"
.format(pre_requisite_course_about_url, pre_requisite_courses[0]['display']),
resp.content.strip('\n'))
url = reverse('about_course', args=[unicode(pre_requisite_course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
@attr(shard=1)
class AboutTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the course about page
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
"""
Set up the tests
"""
super(AboutTestCaseXML, self).setUp()
# The following test course (which lives at common/test/data/2014)
# is closed; we're testing that an about page still appears when
# the course is already closed
self.xml_course_id = self.store.make_course_key('edX', 'detached_pages', '2014')
import_course_from_xml(
self.store,
'test_user',
TEST_DATA_DIR,
source_dirs=['2014'],
static_content_store=None,
target_id=self.xml_course_id,
raise_on_failure=True,
create_if_not_present=True,
)
# this text appears in that course's about page
# common/test/data/2014/about/overview.html
self.xml_data = "about page 463139"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('about_course', args=[text_type(self.xml_course_id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('about_course', args=[text_type(self.xml_course_id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@attr(shard=1)
class AboutWithCappedEnrollmentsTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test case will check the About page when a course has a capped enrollment
"""
@classmethod
def setUpClass(cls):
super(AboutWithCappedEnrollmentsTestCase, cls).setUpClass()
cls.course = CourseFactory.create(metadata={"max_student_enrollments_allowed": 1})
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_enrollment_cap(self):
"""
This test will make sure that enrollment caps are enforced
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('<a href="#" class="register">', resp.content)
self.enroll(self.course, verify=True)
# create a new account since the first account is already enrolled in the course
self.email = 'foo_second@test.com'
self.password = 'bar'
self.username = 'test_second'
self.create_account(self.username, self.email, self.password)
self.activate_user(self.email)
self.login(self.email, self.password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
# Try to enroll as well
result = self.enroll(self.course)
self.assertFalse(result)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
@attr(shard=1)
class AboutWithInvitationOnly(SharedModuleStoreTestCase):
"""
This test case will check the About page when a course is invitation only.
"""
@classmethod
def setUpClass(cls):
super(AboutWithInvitationOnly, cls).setUpClass()
cls.course = CourseFactory.create(metadata={"invitation_only": True})
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
display_name="overview"
)
def test_invitation_only(self):
"""
Test for user not logged in, invitation only course.
"""
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_invitation_only_but_allowed(self):
"""
Test for user logged in and allowed to enroll in invitation only course.
"""
# Course is invitation only, student is allowed to enroll and logged in
user = UserFactory.create(username='allowed_student', password='test', email='allowed_student@test.com')
CourseEnrollmentAllowedFactory(email=user.email, course_id=self.course.id)
self.client.login(username=user.username, password='test')
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'RESTRICT_ENROLL_BY_REG_METHOD': True})
class AboutTestCaseShibCourse(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
Test cases covering about page behavior for courses that use shib enrollment domain ("shib courses")
"""
@classmethod
def setUpClass(cls):
super(AboutTestCaseShibCourse, cls).setUpClass()
cls.course = CourseFactory.create(enrollment_domain="shib:https://idp.stanford.edu/")
cls.about = ItemFactory.create(
category="about", parent_location=cls.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_logged_in_shib_course(self):
"""
For shib courses, logged in users will see the enroll button, but get rejected once they click there
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
def test_anonymous_user_shib_course(self):
"""
For shib courses, anonymous users will also see the enroll button
"""
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Enroll in {}".format(self.course.id.course), resp.content.decode('utf-8'))
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
@attr(shard=1)
class AboutWithClosedEnrollment(ModuleStoreTestCase):
"""
This test case will check the About page for a course that has enrollment start/end
set but it is currently outside of that period.
"""
def setUp(self):
super(AboutWithClosedEnrollment, self).setUp()
self.course = CourseFactory.create(metadata={"invitation_only": False})
# Setup enrollment period to be in future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
display_name="overview"
)
def test_closed_enrollmement(self):
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_course_price_is_not_visble_in_sidebar(self):
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
# course price is not visible ihe course_about page when the course
# mode is not set to honor
self.assertNotIn('<span class="important-dates-item-text">$10</span>', resp.content)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True})
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
class AboutPurchaseCourseTestCase(LoginEnrollmentTestCase, SharedModuleStoreTestCase):
"""
This test class runs through a suite of verifications regarding
purchaseable courses
"""
@classmethod
def setUpClass(cls):
super(AboutPurchaseCourseTestCase, cls).setUpClass()
cls.course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
cls.closed_course = CourseFactory.create(
org='MITx',
number='closed',
display_name='Closed Course To Buy',
enrollment_start=tomorrow,
enrollment_end=nextday
)
def setUp(self):
super(AboutPurchaseCourseTestCase, self).setUp()
self._set_ecomm(self.course)
self._set_ecomm(self.closed_course)
def _set_ecomm(self, course):
"""
Helper method to turn on ecommerce on the course
"""
course_mode = CourseMode(
course_id=course.id,
mode_slug=CourseMode.DEFAULT_MODE_SLUG,
mode_display_name=CourseMode.DEFAULT_MODE_SLUG,
min_price=10,
)
course_mode.save()
def test_anonymous_user(self):
"""
Make sure an anonymous user sees the purchase button
"""
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_logged_in(self):
"""
Make sure a logged in user sees the purchase button
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_already_in_cart(self):
"""
This makes sure if a user has this course in the cart, that the expected message
appears
"""
self.setup_user()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, self.course.id)
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("This course is in your", resp.content)
self.assertNotIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_already_enrolled(self):
"""
This makes sure that the already enrolled message appears for paywalled courses
"""
self.setup_user()
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, self.course.id)
url = reverse('about_course', args=[text_type(self.course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are enrolled in this course", resp.content)
self.assertIn("View Course", resp.content)
self.assertNotIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
def test_closed_enrollment(self):
"""
This makes sure that paywalled courses also honor the registration
window
"""
self.setup_user()
url = reverse('about_course', args=[text_type(self.closed_course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
self.assertNotIn("Add closed to Cart <span>($10 USD)</span>", resp.content)
# course price is visible ihe course_about page when the course
# mode is set to honor and it's price is set
self.assertIn('<span class="important-dates-item-text">$10</span>', resp.content)
def test_invitation_only(self):
"""
This makes sure that the invitation only restirction takes prescendence over
any purchase enablements
"""
course = CourseFactory.create(metadata={"invitation_only": True})
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
def test_enrollment_cap(self):
"""
Make sure that capped enrollments work even with
paywalled courses
"""
course = CourseFactory.create(
metadata={
"max_student_enrollments_allowed": 1,
"display_coursenumber": "buyme",
}
)
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart <span>($10 USD)</span>", resp.content)
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, course.id)
# create a new account since the first account is already enrolled in the course
email = 'foo_second@test.com'
password = 'bar'
username = 'test_second'
self.create_account(username,
email, password)
self.activate_user(email)
self.login(email, password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_free_course_display(self):
"""
Make sure other courses that don't have shopping cart enabled don't display the add-to-cart button
and don't display the course_price field if Cosmetic Price is disabled.
"""
course = CourseFactory.create(org='MITx', number='free', display_name='Course For Free')
self.setup_user()
url = reverse('about_course', args=[text_type(course.id)])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn("Add free to Cart (Free)", resp.content)
self.assertNotIn('<p class="important-dates-item-title">Price</p>', resp.content)
class CourseAboutTestCaseCCX(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test for unenrolled student tries to access ccx.
Note: Only CCX coach can enroll a student in CCX. In sum self-registration not allowed.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(CourseAboutTestCaseCCX, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(CourseAboutTestCaseCCX, self).setUp()
# Create ccx coach account
self.coach = coach = AdminFactory.create(password="test")
self.client.login(username=coach.username, password="test")
def test_redirect_to_dashboard_unenrolled_ccx(self):
"""
Assert that when unenrolled user tries to access CCX do not allow the user to self-register.
Redirect him to his student dashboard
"""
# create ccx
ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
ccx_locator = CCXLocator.from_course_locator(self.course.id, unicode(ccx.id))
self.setup_user()
url = reverse('openedx.course_experience.course_home', args=[ccx_locator])
response = self.client.get(url)
expected = reverse('dashboard')
self.assertRedirects(response, expected, status_code=302, target_status_code=200)
|
procangroup/edx-platform
|
lms/djangoapps/courseware/tests/test_about.py
|
Python
|
agpl-3.0
| 25,700
|
[
"VisIt"
] |
b4f4459bd43e1ce6909195a752b45731e3c0d782548cc969a63a340b1431b53e
|
"""
Recurrent layers.
TODO: write more documentation
"""
__docformat__ = 'restructedtext en'
__authors__ = ("Razvan Pascanu "
"KyungHyun Cho "
"Caglar Gulcehre ")
__contact__ = "Razvan Pascanu <r.pascanu@gmail>"
import numpy
import copy
import theano
import theano.tensor as TT
# Nicer interface of scan
from theano.sandbox.scan import scan
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from groundhog import utils
from groundhog.utils import sample_weights, \
sample_weights_classic,\
sample_weights_orth, \
init_bias, \
constant_shape, \
sample_zeros
from basic import Layer
class RecurrentMultiLayer(Layer):
"""
Constructs a recurrent layer whose transition from h_tm1 to h_t is given
by an MLP or logistic regression. In our ICLR submission this is a
DT-RNN model.
"""
def __init__(self,
rng,
n_hids=[500,500],
activation = [TT.tanh, TT.tanh],
scale=.01,
sparsity = -1,
activ_noise=0.,
weight_noise=False,
dropout = 1.,
init_fn='sample_weights',
bias_fn='init_bias',
bias_scale = 0.,
grad_scale = 1.,
profile = 0,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: list of ints
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(n_hids) not in (list, tuple):
n_hids = [n_hids]
n_layers = len(n_hids)
if type(scale) not in (list, tuple):
scale = [scale] * n_layers
if type(sparsity) not in (list, tuple):
sparsity = [sparsity] * n_layers
for idx, sp in enumerate(sparsity):
if sp < 0: sparsity[idx] = n_hids[idx]
if type(activation) not in (list, tuple):
activation = [activation] * n_layers
if type(bias_scale) not in (list, tuple):
bias_scale = [bias_scale] * (n_layers-1)
if type(bias_fn) not in (list, tuple):
bias_fn = [bias_fn] * (n_layers-1)
if type(init_fn) not in (list, tuple):
init_fn = [init_fn] * n_layers
for dx in xrange(n_layers):
if dx < n_layers-1:
if type(bias_fn[dx]) is str or type(bias_fn[dx]) is unicode:
bias_fn[dx] = eval(bias_fn[dx])
if type(init_fn[dx]) is str or type(init_fn[dx]) is unicode:
init_fn[dx] = eval(init_fn[dx])
if type(activation[dx]) is str or type(activation[dx]) is unicode:
activation[dx] = eval(activation[dx])
self.scale = scale
self.n_layers = n_layers
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(RecurrentMultiLayer, self).__init__(n_hids[0],
n_hids[-1],
rng,
name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
init_state=None,
use_noise=True,
no_noise_bias=False):
"""
Constructs the computational graph of a single step of the recurrent
layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hss)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
preactiv = TT.dot(state_before, W_hhs[0]) +state_below
h = self.activation[0](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
for dx in xrange(1, self.n_layers):
preactiv = TT.dot(h, W_hhs[dx]) + b_hhs[dx-1]
h = self.activation[dx](preactiv)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
def fprop(self,
state_below,
mask=None,
init_state=None,
n_steps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False):
"""
Evaluates the forward through a recurrent layer
:type state_below: theano variable
:param state_below: the input of the recurrent layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type init_state: theano variable or None
:param init_state: initial state for the hidden layer
:type n_steps: None or int or theano scalar
:param n_steps: Number of steps the recurrent netowrk does
:type batch_size: int
:param batch_size: the size of the minibatch over which scan runs
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type truncate_gradient: int
:param truncate_gradient: If negative, no truncation is used,
otherwise truncated BPTT is used, where you go backwards only this
amount of steps
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if n_steps is None:
n_steps = state_below.shape[0]
if batch_size and batch_size != 1:
n_steps = n_steps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((n_steps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids[0])
else:
init_state = TT.alloc(floatX(0), self.n_hids[0])
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,None, z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
if self.dropout < 1. and use_noise:
# build dropout mask outside scan
allhid = numpy.sum(self.n_hids)
shape = state_below.shape
if state_below.ndim == 3:
alldpmask = self.trng.binomial(
(n_steps, batch_size, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
else:
alldpmask = self.trng.binomial(
(n_steps, allhid),
n = 1, p = self.dropout, dtype=state_below.dtype)
inps.append(alldpmask)
if mask:
fn = lambda x,y,z,u : self.step_fprop(x,y,z,u,use_noise=use_noise)
else:
fn = lambda tx, ty, tu: self.step_fprop(tx,None,ty,tu,
use_noise=use_noise)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [None]*(self.n_layers-1) +
[init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = n_steps)
if not isinstance(rval,(list, tuple)):
rval = [rval]
new_h = rval[-1]
self.out = rval[-1]
self.rval = rval
self.updates =updates
return self.out
class RecurrentMultiLayerInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayer, with the exception that the input is
fed into the top layer of the MLP (rather than being an input to the
MLP).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hss)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) + state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPath(RecurrentMultiLayer):
"""
A similar layer to RecurrentMultiLayer (the DT-RNN), with the difference
that we have shortcut connections in the MLP representing the transition
from previous hidden state to the next
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.b_hhs.append(theano.shared(
self.bias_fn[dx-1](self.n_hids[dx],
self.bias_scale[dx-1],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs,self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx-1])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentMultiLayerShortPathInp(RecurrentMultiLayer):
"""
Similar to the RecurrentMultiLayerShortPath class, just that the input
is fed into the last layer of the MLP (similar to
RecurrentMultiLayerInp).
"""
def _init_params(self):
self.W_hhs = []
self.b_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
if dx < self.n_layers-1:
self.b_hhs.append(theano.shared(
self.bias_fn[dx](self.n_hids[dx],
self.bias_scale[dx],
self.rng),
name='b%d_%s' %(dx, self.name)))
self.params = [x for x in self.W_hhs] + [x for x in self.b_hhs] +\
[x for x in self.W_shortp]
self.restricted_params = [x for x in self.params]
self.params_grad_scale = [self.grad_scale for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nb_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.b_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nb_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs, self.nW_hhs)]
if not no_noise_bias:
b_hhs = [(x+y) for x, y in zip(self.b_hhs, self.nb_hhs)]
else:
b_hhs = self.b_hhs
W_shp = [(x+y) for x, y in zip(self.W_shortp, self.nW_shortp)]
else:
W_hhs = self.W_hhs
b_hhs = self.b_hhs
W_shp = self.W_shortp
h = self.activation[0](TT.dot(state_before,
W_hhs[0])+b_hhs[0])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers-1):
h = self.activation[dx](TT.dot(h,
W_hhs[dx])+
TT.dot(state_before,
W_shp[dx-1])+b_hhs[dx])
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
h = self.activation[-1](TT.dot(h, W_hhs[-1]) +
TT.dot(state_before, W_shp[-1])+state_below)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval +=[h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval += [h]
return rval
class RecurrentMultiLayerShortPathInpAll(RecurrentMultiLayer):
"""
Similar to RecurrentMultiLayerShortPathInp class, just that the input is
fed to all layers of the MLP depicting the deep transition between h_tm1
to h_t.
"""
def _init_params(self):
self.W_hhs = []
self.W_shortp = []
for dx in xrange(self.n_layers):
W_hh = self.init_fn[dx](self.n_hids[(dx-1)%self.n_layers],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_hhs.append(theano.shared(value=W_hh, name="W%d_%s" %
(dx,self.name)))
if dx > 0:
W_shp = self.init_fn[dx](self.n_hids[self.n_layers-1],
self.n_hids[dx],
self.sparsity[dx],
self.scale[dx],
rng=self.rng)
self.W_shortp.append(theano.shared(value=W_shp,
name='W_s%d_%s'%(dx,self.name)))
self.params = [x for x in self.W_hhs] +\
[x for x in self.W_shortp]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hhs = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_hhs]
self.nW_shortp = [theano.shared(x.get_value()*0, name='noise_'+x.name) for x in self.W_shortp]
self.noise_params = [x for x in self.nW_hhs] + [x for x in self.nW_shortp]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape) for x in self.noise_params]
def step_fprop(self,
state_below,
mask=None,
dpmask=None,
state_before=None,
no_noise_bias=False,
use_noise=True):
"""
See parent class
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hhs = [(x+y) for x, y in zip(self.W_hhs,self.nW_hhs)]
W_shp = [(x+y) for x, y in zip(self.W_shortp,self.nW_shortp)]
else:
W_hhs = self.W_hhs
W_shp = self.W_shortp
def slice_state_below(dx, sb = state_below):
st = 0
for p in xrange(dx):
st += self.n_hids[p]
ed = st + self.n_hids[dx]
if sb.ndim == 1:
return sb[st:ed]
else:
return sb[:,st:ed]
h = self.activation[0](TT.dot(state_before, W_hhs[0]) + slice_state_below(0))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,:h.shape[1]]
dpidx = h.shape[1]
else:
h = h * dpmask[:h.shape[0]]
dpidx = h.shape[0]
else:
h = h * self.dropout
rval += [h]
for dx in xrange(1, self.n_layers):
h = self.activation[dx](TT.dot(h, W_hhs[dx]) +
TT.dot(state_before, W_shp[dx-1]) +
slice_state_below(dx))
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if self.dropout < 1.:
if use_noise:
if h.ndim == 2:
h = h * dpmask[:,dpidx:dpidx+h.shape[1]]
dpidx = dpidx + h.shape[1]
else:
h = h * dpmask[dpidx:dpidx+h.shape[0]]
dpidx = dpidx + h.shape[0]
else:
h = h * self.dropout
rval += [h]
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
rval[-1] = h
return rval
class RecurrentLayer(Layer):
"""
Standard recurrent layer with gates.
See arXiv verion of our paper.
"""
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
gating = False,
reseting = False,
gater_activation = TT.nnet.sigmoid,
reseter_activation = TT.nnet.sigmoid,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type gating: bool
:param gating: If true, an update gate is used
:type reseting: bool
:param reseting: If true, a reset gate is used
:type gater_activation: string or function
:param name: The activation function of the update gate
:type reseter_activation: string or function
:param name: The activation function of the reset gate
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
if type(gater_activation) is str or type(gater_activation) is unicode:
gater_activation = eval(gater_activation)
if type(reseter_activation) is str or type(reseter_activation) is unicode:
reseter_activation = eval(reseter_activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
self.gating = gating
self.reseting = reseting
self.gater_activation = gater_activation
self.reseter_activation = reseter_activation
assert rng is not None, "random number generator should not be empty!"
super(RecurrentLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_%s"%self.name)
self.params = [self.W_hh]
if self.gating:
self.G_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="G_%s"%self.name)
self.params.append(self.G_hh)
if self.reseting:
self.R_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="R_%s"%self.name)
self.params.append(self.R_hh)
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hh = theano.shared(self.W_hh.get_value()*0, name='noise_'+self.W_hh.name)
self.nG_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)
self.noise_params = [self.nW_hh,self.nG_hh]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def step_fprop(self,
state_below,
mask = None,
state_before = None,
gater_below = None,
reseter_below = None,
use_noise=True,
no_noise_bias = False):
"""
Constructs the computational graph of this layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type gater_below: theano variable
:param gater_below: the input to the update gate
:type reseter_below: theano variable
:param reseter_below: the input to the reset gate
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hh = self.W_hh + self.nW_hh
if self.gating:
G_hh = self.G_hh + self.nG_hh
if self.reseting:
R_hh = self.R_hh + self.nR_hh
else:
W_hh = self.W_hh
if self.gating:
G_hh = self.G_hh
if self.reseting:
R_hh = self.R_hh
# Reset gate:
# optionally reset the hidden state.
if self.reseting and reseter_below:
reseter = self.reseter_activation(TT.dot(state_before, R_hh) +
reseter_below)
reseted_state_before = reseter * state_before
else:
reseted_state_before = state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, W_hh) + state_below
h = self.activation(preactiv)
# Update gate:
# optionally reject the potential new state and use the new one.
if self.gating and gater_below:
gater = self.gater_activation(TT.dot(state_before, G_hh) +
gater_below)
h = gater * h + (1-gater) * state_before
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def fprop(self,
state_below,
mask=None,
init_state=None,
gater_below=None,
reseter_below=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, self.n_in))
if gater_below:
gater_below = gater_below.reshape((nsteps, batch_size, self.n_in))
if reseter_below:
reseter_below = reseter_below.reshape((nsteps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids)
else:
init_state = TT.alloc(floatX(0), self.n_hids)
# FIXME: Find a way to clean this up
if self.reseting and reseter_below:
if self.gating and gater_below:
if mask:
inps = [state_below, mask, gater_below, reseter_below]
fn = lambda x,y,g,r,z : self.step_fprop(x,y,z, gater_below=g, reseter_below=r, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below, reseter_below]
fn = lambda tx, tg,tr, ty: self.step_fprop(tx, None, ty, gater_below=tg,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if mask:
inps = [state_below, mask, reseter_below]
fn = lambda x,y,r,z : self.step_fprop(x,y,z, use_noise=use_noise,
reseter_below=r,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, reseter_below]
fn = lambda tx,tr,ty: self.step_fprop(tx, None, ty,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if self.gating and gater_below:
if mask:
inps = [state_below, mask, gater_below]
fn = lambda x,y,g,z : self.step_fprop(x,y,z, gater_below=g, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below]
fn = lambda tx, tg, ty: self.step_fprop(tx, None, ty, gater_below=tg,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
new_h = rval
self.out = rval
self.rval = rval
self.updates =updates
return self.out
class LSTMLayer(Layer):
"""
Standard LSTM Layer
"""
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
name=None,
**kwargs):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
assert rng is not None, "random number generator should not be empty!"
super(LSTMLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hi = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Whi_%s"%self.name)
self.params = [self.W_hi]
self.W_ci = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wci_%s"%self.name)
self.params += [self.W_ci]
self.W_hf = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Whf_%s"%self.name)
self.params += [self.W_hf]
self.W_cf = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_cf]
self.W_hc = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_hc]
self.W_ho = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_ho]
self.W_co = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wcf_%s"%self.name)
self.params += [self.W_co]
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.noise_params = [theano.shared(p.get_value()*0, name='noise_'+p.name) for p in self.params]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def _get_slice_below(self, state_below, to='cell'):
if to == 'cell':
offset = 0
elif to == 'input':
offset = 1 * self.n_hids
elif to == 'output':
offset = 2 * self.n_hids
elif to == 'forget':
offset = 3 * self.n_hids
else:
raise Warning('Unknown gate/cell types')
if state_below.ndim == 3:
return state_below[:,:,offset:offset+self.n_hids]
if state_below.ndim == 2:
return state_below[:,offset:offset+self.n_hids]
return state_below[offset:offset+self.n_hids]
def _get_slice_before(self, state_before, fr='cell'):
if fr == 'cell':
offset = self.n_hids
elif fr == 'hidden':
offset = 0
else:
raise Warning('Unknown cell/gate types')
if state_before.ndim == 2:
return state_before[:,offset:offset+self.n_hids]
return state_before[offset:offset+self.n_hids]
def step_fprop(self,
state_below,
mask = None,
state_before = None,
use_noise=True,
no_noise_bias = False,
**kwargs):
"""
Constructs the computational graph of this layer.
:type state_below: theano variable
:param state_below: the input to the layer
:type mask: None or theano variable
:param mask: mask describing the length of each sequence in a
minibatch
:type state_before: theano variable
:param state_before: the previous value of the hidden state of the
layer
:type use_noise: bool
:param use_noise: flag saying if weight noise should be used in
computing the output of this layer
:type no_noise_bias: bool
:param no_noise_bias: flag saying if weight noise should be added to
the bias as well
"""
rval = []
if self.weight_noise and use_noise and self.noise_params:
W_hi = self.W_hi + self.nW_hi
W_ci = self.W_ci + self.nW_ci
W_hf = self.W_hf + self.nW_hf
W_cf = self.W_cf + self.nW_cf
W_hc = self.W_hc + self.nW_hc
W_ho = self.W_ho + self.nW_ho
W_co = self.W_co + self.nW_co
else:
W_hi = self.W_hi
W_ci = self.W_ci
W_hf = self.W_hf
W_cf = self.W_cf
W_hc = self.W_hc
W_ho = self.W_ho
W_co = self.W_co
# input gate
ig = TT.nnet.sigmoid(self._get_slice_below(state_below,'input') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hi) +
TT.dot(self._get_slice_before(state_before,'cell'), W_ci))
# forget gate
fg = TT.nnet.sigmoid(self._get_slice_below(state_below,'forget') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hf) +
TT.dot(self._get_slice_before(state_before,'cell'), W_cf))
# cell
cc = fg * self._get_slice_before(state_before,'cell') + \
ig * self.activation(self._get_slice_below(state_below,'cell') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_hc))
# output gate
og = TT.nnet.sigmoid(self._get_slice_below(state_below,'output') +
TT.dot(self._get_slice_before(state_before,'hidden'), W_ho) +
TT.dot(cc, W_co))
# hidden state
hh = og * self.activation(cc)
if hh.ndim == 2:
h = TT.concatenate([hh, cc], axis=1)
else:
h = TT.concatenate([hh, cc], axis=0)
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def fprop(self,
state_below,
mask=None,
init_state=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False,
**kwargs
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, state_below.shape[-1]))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids * 2)
else:
init_state = TT.alloc(floatX(0), self.n_hids * 2)
if mask:
inps = [state_below, mask]
fn = lambda x,y,z : self.step_fprop(x,y,z, use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below]
fn = lambda tx, ty: self.step_fprop(tx, None, ty,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
new_h = rval
self.out = rval
self.rval = rval
self.updates = updates
return self.out
class DoubleRecurrentLayer(Layer):
def __init__(self, rng,
n_hids=500,
scale=.01,
sparsity = -1,
activation = TT.tanh,
activ_noise=0.,
weight_noise=False,
bias_fn='init_bias',
bias_scale = 0.,
dropout = 1.,
init_fn='sample_weights',
kind_reg = None,
grad_scale = 1.,
profile = 0,
gater_activation = TT.nnet.sigmoid,
reseter_activation = TT.nnet.sigmoid,
gating=True,
reseting=True,
name=None):
"""
:type rng: numpy random generator
:param rng: numpy random generator
:type n_in: int
:param n_in: number of inputs units
:type n_hids: int
:param n_hids: Number of hidden units on each layer of the MLP
:type activation: string/function or list of
:param activation: Activation function for the embedding layers. If
a list it needs to have a value for each layer. If not, the same
activation will be applied to all layers
:type scale: float or list of
:param scale: depending on the initialization function, it can be
the standard deviation of the Gaussian from which the weights
are sampled or the largest singular value. If a single value it
will be used for each layer, otherwise it has to have one value
for each layer
:type sparsity: int or list of
:param sparsity: if a single value, it will be used for each layer,
otherwise it has to be a list with as many values as layers. If
negative, it means the weight matrix is dense. Otherwise it
means this many randomly selected input units are connected to
an output unit
:type weight_noise: bool
:param weight_noise: If true, the model is used with weight noise
(and the right shared variable are constructed, to keep track of the
noise)
:type dropout: float
:param dropout: the probability with which hidden units are dropped
from the hidden layer. If set to 1, dropout is not used
:type init_fn: string or function
:param init_fn: function used to initialize the weights of the
layer. We recommend using either `sample_weights_classic` or
`sample_weights` defined in the utils
:type bias_fn: string or function
:param bias_fn: function used to initialize the biases. We recommend
using `init_bias` defined in the utils
:type bias_scale: float
:param bias_scale: argument passed to `bias_fn`, depicting the scale
of the initial bias
:type grad_scale: float or theano scalar
:param grad_scale: factor with which the gradients with respect to
the parameters of this layer are scaled. It is used for
differentiating between the different parameters of a model.
:type gater_activation: string or function
:param name: The activation function of the update gate
:type reseter_activation: string or function
:param name: The activation function of the reset gate
:type name: string
:param name: name of the layer (used to name parameters). NB: in
this library names are very important because certain parts of the
code relies on name to disambiguate between variables, therefore
each layer should have a unique name.
"""
self.grad_scale = grad_scale
if type(init_fn) is str or type(init_fn) is unicode:
init_fn = eval(init_fn)
if type(bias_fn) is str or type(bias_fn) is unicode:
bias_fn = eval(bias_fn)
if type(activation) is str or type(activation) is unicode:
activation = eval(activation)
if type(gater_activation) is str or type(gater_activation) is unicode:
gater_activation = eval(gater_activation)
if type(reseter_activation) is str or type(reseter_activation) is unicode:
reseter_activation = eval(reseter_activation)
self.scale = scale
self.sparsity = sparsity
self.activation = activation
self.n_hids = n_hids
self.bias_scale = bias_scale
self.bias_fn = bias_fn
self.init_fn = init_fn
self.weight_noise = weight_noise
self.activ_noise = activ_noise
self.profile = profile
self.dropout = dropout
self.gater_activation = gater_activation
self.reseter_activation = reseter_activation
assert rng is not None, "random number generator should not be empty!"
super(DoubleRecurrentLayer, self).__init__(self.n_hids,
self.n_hids, rng, name)
self.trng = RandomStreams(self.rng.randint(int(1e6)))
self.params = []
self._init_params()
def _init_params(self):
self.W_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_%s"%self.name)
self.params = [self.W_hh]
self.G_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="G_%s"%self.name)
self.params.append(self.G_hh)
self.R_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="R_%s"%self.name)
self.params.append(self.R_hh)
self.Wrev_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wrev_%s"%self.name)
self.params += [self.Wrev_hh]
self.Grev_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Grev_%s"%self.name)
self.params.append(self.Grev_hh)
self.Rrev_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Rrev_%s"%self.name)
self.params.append(self.Rrev_hh)
self.Wgg_fwd = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wgg_fwd_%s"%self.name)
self.params += [self.Wgg_fwd]
self.Wgg_rev = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="Wgg_rev_%s"%self.name)
self.params += [self.Wgg_rev]
self.W2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W2_%s"%self.name)
self.params.append(self.W2_hh)
self.U2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="U2_%s"%self.name)
self.params.append(self.U2_hh)
self.V2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="V2_%s"%self.name)
self.params.append(self.V2_hh)
self.G2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="G2_%s"%self.name)
self.params.append(self.G2_hh)
self.R2_hh = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="R2_%s"%self.name)
self.params.append(self.R2_hh)
self.S_h = theano.shared(
eval('numpy.'+theano.config.floatX)(0.01 * numpy.random.rand(self.n_hids)),
#sample_weights_classic(self.n_hids, 1, -1, 0.01, rng=self.rng),
name='S_%s'%self.name)
self.params.append(self.S_h)
self.W_att = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="W_att_%s"%self.name)
self.params.append(self.W_att)
self.U_att = theano.shared(
self.init_fn(self.n_hids,
self.n_hids,
self.sparsity,
self.scale,
rng=self.rng),
name="U_att_%s"%self.name)
self.params.append(self.U_att)
self.V_att = theano.shared(
eval('numpy.'+theano.config.floatX)(0.01 * numpy.random.rand(self.n_hids)),
name='V_att_%s'%self.name)
self.params.append(self.V_att)
self.params_grad_scale = [self.grad_scale for x in self.params]
self.restricted_params = [x for x in self.params]
if self.weight_noise:
self.nW_hh = theano.shared(self.W_hh.get_value()*0, name='noise_'+self.W_hh.name)
self.nG_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)
self.nR_hh = theano.shared(self.G_hh.get_value()*0, name='noise_'+self.G_hh.name)
self.noise_params = [self.nW_hh,self.nG_hh,self.nR_hh]
self.nW2_hh = theano.shared(self.W2_hh.get_value()*0, name='noise_'+self.W2_hh.name)
self.nG2_hh = theano.shared(self.G2_hh.get_value()*0, name='noise_'+self.G2_hh.name)
self.nR2_hh = theano.shared(self.G2_hh.get_value()*0, name='noise_'+self.G2_hh.name)
self.noise_params += [self.nW2_hh,self.nG2_hh,self.nR2_hh]
self.noise_params_shape_fn = [constant_shape(x.get_value().shape)
for x in self.noise_params]
def fprop(self,
state_below,
mask=None,
init_state=None,
gater_below=None,
reseter_below=None,
nsteps=None,
batch_size=None,
use_noise=True,
truncate_gradient=-1,
no_noise_bias = False
):
if theano.config.floatX=='float32':
floatX = numpy.float32
else:
floatX = numpy.float64
if nsteps is None:
nsteps = state_below.shape[0]
if batch_size and batch_size != 1:
nsteps = nsteps / batch_size
if batch_size is None and state_below.ndim == 3:
batch_size = state_below.shape[1]
if state_below.ndim == 2 and \
(not isinstance(batch_size,int) or batch_size > 1):
state_below = state_below.reshape((nsteps, batch_size, self.n_in))
if gater_below:
gater_below = gater_below.reshape((nsteps, batch_size, self.n_in))
if reseter_below:
reseter_below = reseter_below.reshape((nsteps, batch_size, self.n_in))
if not init_state:
if not isinstance(batch_size, int) or batch_size != 1:
init_state = TT.alloc(floatX(0), batch_size, self.n_hids)
else:
init_state = TT.alloc(floatX(0), self.n_hids)
if self.weight_noise and use_noise and self.noise_params:
W_hh = self.W_hh + self.nW_hh
G_hh = self.G_hh + self.nG_hh
R_hh = self.R_hh + self.nR_hh
W2_hh = self.W2_hh + self.nW2_hh
G2_hh = self.G2_hh + self.nG2_hh
R2_hh = self.R2_hh + self.nR2_hh
else:
W_hh = self.W_hh
G_hh = self.G_hh
R_hh = self.R_hh
W2_hh = self.W2_hh
G2_hh = self.G2_hh
R2_hh = self.R2_hh
Wrev_hh = self.Wrev_hh
Grev_hh = self.Grev_hh
Rrev_hh = self.Rrev_hh
Wgg_fwd = self.Wgg_fwd
Wgg_rev = self.Wgg_rev
U2_hh = self.U2_hh
V2_hh = self.V2_hh
W_att = self.W_att
U_att = self.U_att
V_att = self.V_att
S_h = self.S_h
W_att = self.W_att
U_att = self.U_att
V_att = self.V_att
S_h = self.S_h
def _scan1(state_below, mask, state_before, gater_below, reseter_below,
use_noise=True, no_noise_bias = False):
reseter = self.reseter_activation(TT.dot(state_before, R_hh) + reseter_below)
reseted_state_before = reseter * state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, W_hh) + state_below
h = self.activation(preactiv)
gater = self.gater_activation(TT.dot(state_before, G_hh) + gater_below)
h = gater * h + (1-gater) * state_before
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
def _scan1rev(state_below, mask, state_before, gater_below, reseter_below,
use_noise=True, no_noise_bias = False):
reseter = self.reseter_activation(TT.dot(state_before, Rrev_hh) + reseter_below)
reseted_state_before = reseter * state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, Wrev_hh) + state_below
h = self.activation(preactiv)
gater = self.gater_activation(TT.dot(state_before, Grev_hh) + gater_below)
h = gater * h + (1-gater) * state_before
if self.activ_noise and use_noise:
h = h + self.trng.normal(h.shape, avg=0, std=self.activ_noise, dtype=h.dtype)
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (1-mask) * state_before
return h
if mask:
inps = [state_below, mask, gater_below, reseter_below]
fn = lambda x,y,g,r,z : _scan1(x,y,z, gater_below=g, reseter_below=r,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below, gater_below, reseter_below]
fn = lambda tx, tg,tr, ty: _scan1(tx, None, ty, gater_below=tg,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
rval_fwd = rval
if mask:
mask_rev = mask[::-1]
else:
mask_rev = None
state_below_rev = state_below[::-1]
gater_below_rev = gater_below[::-1]
reseter_below_rev = reseter_below[::-1]
if mask_rev:
inps = [state_below_rev, mask_rev, gater_below_rev, reseter_below_rev]
fn = lambda x,y,g,r,z : _scan1rev(x,y,z, gater_below=g, reseter_below=r,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
else:
inps = [state_below_rev, gater_below_rev, reseter_below_rev]
fn = lambda tx, tg,tr, ty: _scan1rev(tx, None, ty, gater_below=tg,
reseter_below=tr,
use_noise=use_noise,
no_noise_bias=no_noise_bias)
rval, updates_rev = theano.scan(fn,
sequences = inps,
outputs_info = [init_state],
name='layer_%s'%self.name,
profile=self.profile,
truncate_gradient = truncate_gradient,
n_steps = nsteps)
updates += updates_rev
rval_rev = rval[::-1]
state_below = TT.dot(rval_fwd, Wgg_fwd) + TT.dot(rval_rev, Wgg_rev)
state_below_att = TT.dot(state_below, W_att)
def _scan2(mask, state_before, beta_before,
use_noise=True, no_noise_bias = False):
# attention
state_before_att = TT.dot(state_before, U_att)
if state_below_att.ndim == 3:
att = TT.tanh(state_below_att + state_before_att[None,:,:])
else:
att = TT.tanh(state_below_att + state_before_att[None,:])
att = TT.exp(TT.dot(att, V_att))
att = att / att.sum(0, keepdims=True)
if state_below.ndim == 3:
real_below = (state_below * att[:,:,None]).sum(axis=0)
else:
real_below = (state_below * att[:,None]).sum(axis=0)
# reset gate
reseter = self.reseter_activation(TT.dot(state_before, R2_hh) +
TT.dot(real_below, U2_hh))
reseted_state_before = reseter * state_before
# Feed the input to obtain potential new state.
preactiv = TT.dot(reseted_state_before, U2_hh) + TT.dot(real_below, W2_hh)
h = self.activation(preactiv)
# update gate
gater = self.gater_activation(TT.dot(state_before, G2_hh) +
TT.dot(real_below, V2_hh))
h = gater * h + (floatX(1)-gater) * state_before
if mask is not None:
if h.ndim ==2 and mask.ndim==1:
mask = mask.dimshuffle(0,'x')
h = mask * h + (floatX(1)-mask) * state_before
# stopping probability
s = TT.nnet.sigmoid(TT.dot(h, S_h))
beta = beta_before * (floatX(1) - s)
return h, beta
if mask:
inps = [mask]
fn = lambda y,z,b : _scan2(y,z,b,use_noise=use_noise, no_noise_bias=no_noise_bias)
else:
inps = []
fn = lambda y,b: _scan2(None, y, b, use_noise=use_noise, no_noise_bias=no_noise_bias)
if not isinstance(batch_size, int) or batch_size != 1:
init_beta = TT.alloc(floatX(1), batch_size)
else:
init_beta = TT.alloc(floatX(1), 1)
rval, updates2 = theano.scan(fn,
sequences = inps,
outputs_info = [init_state, init_beta],
name='layer2_%s'%self.name,
profile=self.profile,
truncate_gradient=truncate_gradient,
n_steps = nsteps)
new_h = rval[0]
betas = rval[1]
updates += updates2
if new_h.ndim == 3:
self.out = new_h * betas[:,:,None]
else:
self.out = new_h * betas[:,None]
self.rval = rval
self.updates =updates
return self.out
|
kyunghyuncho/GroundHog
|
groundhog/layers/rec_layers.py
|
Python
|
bsd-3-clause
| 81,816
|
[
"Gaussian"
] |
73c61aba3b70edf356ba70416ab8043e97de5cb5567944d40e526756ab0ef97e
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Does scraping for all known versions of IE."""
import pywintypes
import time
import types
from drivers import keyboard
from drivers import mouse
from drivers import windowing
# Default version
version = "7.0.5730.1"
DEFAULT_PATH = r"c:\program files\internet explorer\iexplore.exe"
def GetBrowser(path):
"""Invoke the IE browser and return the process, frame, and content window.
Args:
path: full path to browser
Returns:
A tuple of (process handle, render pane)
"""
if not path: path = DEFAULT_PATH
(iewnd, ieproc, address_bar, render_pane, tab_window) = InvokeBrowser(path)
return (ieproc, iewnd, render_pane)
def InvokeBrowser(path):
"""Invoke the IE browser.
Args:
path: full path to browser
Returns:
A tuple of (main window, process handle, address bar,
render_pane, tab_window)
"""
# Invoke IE
(ieproc, iewnd) = windowing.InvokeAndWait(path)
# Get windows we'll need
for tries in xrange(10):
try:
address_bar = windowing.FindChildWindow(
iewnd, "WorkerW|Navigation Bar/ReBarWindow32/"
"Address Band Root/ComboBoxEx32/ComboBox/Edit")
render_pane = windowing.FindChildWindow(
iewnd, "TabWindowClass/Shell DocObject View")
tab_window = windowing.FindChildWindow(
iewnd, "CommandBarClass/ReBarWindow32/TabBandClass/DirectUIHWND")
except IndexError:
time.sleep(1)
continue
break
return (iewnd, ieproc, address_bar, render_pane, tab_window)
def Scrape(urls, outdir, size, pos, timeout=20, **kwargs):
"""Invoke a browser, send it to a series of URLs, and save its output.
Args:
urls: list of URLs to scrape
outdir: directory to place output
size: size of browser window to use
pos: position of browser window
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
None if success, else an error string
"""
path = r"c:\program files\internet explorer\iexplore.exe"
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
(iewnd, ieproc, address_bar, render_pane, tab_window) = (
InvokeBrowser(path) )
# Resize and reposition the frame
windowing.MoveAndSizeWindow(iewnd, pos, size, render_pane)
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
timedout = False
for url in urls:
# Double-click in the address bar, type the name, and press Enter
mouse.DoubleClickInWindow(address_bar)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(
tab_window, (6, 8, 22, 24), timeout)
timedout = load_time < 0
if timedout:
break
# Scrape the page
image = windowing.ScrapeWindow(render_pane)
# Save to disk
if "filename" in kwargs:
if callable(kwargs["filename"]):
filename = kwargs["filename"](url)
else:
filename = kwargs["filename"]
else:
filename = windowing.URLtoFilename(url, outdir, ".bmp")
image.save(filename)
windowing.EndProcess(ieproc)
if timedout:
return "timeout"
def Time(urls, size, timeout, **kwargs):
"""Measure how long it takes to load each of a series of URLs
Args:
urls: list of URLs to time
size: size of browser window to use
timeout: amount of time to wait for page to load
kwargs: miscellaneous keyword args
Returns:
A list of tuples (url, time). "time" can be "crashed" or "timeout"
"""
if "path" in kwargs and kwargs["path"]: path = kwargs["path"]
else: path = DEFAULT_PATH
proc = None
# Visit each URL we're given
if type(urls) in types.StringTypes: urls = [urls]
ret = []
for url in urls:
try:
# Invoke the browser if necessary
if not proc:
(wnd, proc, address_bar, render_pane, tab_window) = InvokeBrowser(path)
# Resize and reposition the frame
windowing.MoveAndSizeWindow(wnd, (0,0), size, render_pane)
# Double-click in the address bar, type the name, and press Enter
mouse.DoubleClickInWindow(address_bar)
keyboard.TypeString(url)
keyboard.TypeString("\n")
# Wait for the page to finish loading
load_time = windowing.WaitForThrobber(
tab_window, (6, 8, 22, 24), timeout)
timedout = load_time < 0
if timedout:
load_time = "timeout"
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
load_time = "crashed"
proc = None
except pywintypes.error:
load_time = "crashed"
proc = None
ret.append( (url, load_time) )
# Send an alt-F4 to make the browser close; if this times out,
# we've probably got a crash
if proc:
keyboard.TypeString(r"{\4}", use_modifiers=True)
if not windowing.WaitForProcessExit(proc, timeout):
windowing.EndProcess(proc)
return ret
def main():
# We're being invoked rather than imported, so run some tests
path = r"c:\sitecompare\scrapes\ie7\7.0.5380.11"
windowing.PreparePath(path)
# Scrape three sites and save the results
Scrape(
["http://www.microsoft.com",
"http://www.google.com",
"http://www.sun.com"],
path, (1024, 768), (0, 0))
return 0
if __name__ == "__main__":
sys.exit(main())
|
Chilledheart/chromium
|
tools/site_compare/scrapers/ie/ie7.py
|
Python
|
bsd-3-clause
| 5,669
|
[
"VisIt"
] |
4b4fa900da4c2a9c31c03d2c8889755a9a7e967bfa01035723c48791b9999889
|
from tvtk.api import tvtk
from animation import Stop
class Object(object):
"""Base class for objects that can be placed into the scene."""
def _set_actor(self):
self.actor = tvtk.Actor(mapper=self.mapper)
self.actor.mapper.update()
def get_actor(self):
return self.actor
def default_animator(self):
"""Returns the default animator, which leaves the object still."""
return lambda obj, frame_no: Stop()
def update_properties(self, **props):
"""Updates the object properties, such as opacity, color, etc. (see VTK
documentation for further details)."""
properties = tvtk.Property(**props)
self.actor.property = properties
def _to_tuple(self, value):
if isinstance(value, (list, tuple)):
return value[:3]
elif isinstance(value, (int, float)):
return value, value, value
def transform(self, translate=None, scale=None, rotate=None):
"""Applies an affine transformation to the current object.
Keyword arguments are self explanatory. They can be a tuple or list
representing the three-dimensional vector or a single number to use
for each of the three components.
"""
transform = self.actor.user_transform or tvtk.Transform()
if translate is not None:
transform.translate(self._to_tuple(translate))
if scale is not None:
transform.scale(self._to_tuple(scale))
if rotate is not None:
rotate = self._to_tuple(rotate)
transform.rotate_x(rotate[0])
transform.rotate_y(rotate[1])
transform.rotate_z(rotate[2])
self.actor.user_transform = transform
class PolyObject(Object):
"""Base class for objects based on vtkPolyData."""
def _set_actor(self):
self.mapper = tvtk.PolyDataMapper()
self.mapper.set_input_data(self.poly_data)
Object._set_actor(self)
|
lukius/mlab-tools
|
mlab_tools/object.py
|
Python
|
mit
| 2,005
|
[
"VTK"
] |
63cf57d46b6769a16b6fcbc4c3de5e83dcd3139d71b40816d8dc25dc11773577
|
import pytest
import ert3
# The inverse cumulative normal distribution function evaluated at 0.995
CNORM_INV = 2.57582930
# The inverse cumulative uniform distribution function evaluated at 0.995
CUNI_INV = 0.005
def test_no_parameters():
with pytest.raises(ValueError):
ert3.algorithms.one_at_the_time([])
@pytest.mark.parametrize(
("distribution", "a", "b", "sens_low", "sens_high"),
(
(ert3.stats.Gaussian, 0, 1, -CNORM_INV, CNORM_INV),
(ert3.stats.Uniform, 0, 1, CUNI_INV, 1 - CUNI_INV),
),
)
def test_single_parameter(distribution, a, b, sens_low, sens_high):
single_dist = distribution(a, b, size=1)
evaluations = ert3.algorithms.one_at_the_time({"single": single_dist})
assert 2 == len(evaluations)
for idx, parameter_value in enumerate([sens_low, sens_high]):
evali = evaluations[idx]
assert ["single"] == list(evali.keys())
assert 1 == len(evali["single"].data)
for val in evali["single"].data:
assert parameter_value == pytest.approx(val)
def test_parameter_array():
size = 10
gauss_array = ert3.stats.Gaussian(0, 1, size=size)
evaluations = ert3.algorithms.one_at_the_time({"array": gauss_array})
assert 2 * size == len(evaluations)
for eidx, evali in enumerate(evaluations):
parameter_value = -CNORM_INV if eidx % 2 == 0 else CNORM_INV
assert ["array"] == list(evali.keys())
assert size == len(evali["array"].data)
for vidx, val in enumerate(evali["array"].data):
expected_value = parameter_value if vidx == eidx // 2 else 0
assert expected_value == pytest.approx(val)
def test_parameter_index():
index = ["a" * i + str(i) for i in range(5)]
gauss_index = ert3.stats.Gaussian(0, 1, index=index)
evaluations = ert3.algorithms.one_at_the_time({"indexed_gauss": gauss_index})
assert 2 * len(index) == len(evaluations)
for eidx, evali in enumerate(evaluations):
parameter_value = -CNORM_INV if eidx % 2 == 0 else CNORM_INV
assert ["indexed_gauss"] == list(evali.keys())
assert sorted(index) == sorted(evali["indexed_gauss"].index)
for kidx, key in enumerate(index):
expected_value = parameter_value if kidx == eidx // 2 else 0
assert expected_value == pytest.approx(evali["indexed_gauss"].data[key])
def test_multi_parameter_singletons():
expected_evaluations = [
{"a": [-CNORM_INV], "b": [0]},
{"a": [CNORM_INV], "b": [0]},
{"a": [0], "b": [-CNORM_INV]},
{"a": [0], "b": [CNORM_INV]},
]
records = {
"a": ert3.stats.Gaussian(0, 1, size=1),
"b": ert3.stats.Gaussian(0, 1, size=1),
}
evaluations = ert3.algorithms.one_at_the_time(records)
assert len(expected_evaluations) == len(evaluations)
for expected, result in zip(expected_evaluations, evaluations):
assert expected.keys() == result.keys()
for key in expected.keys():
assert len(expected[key]) == len(result[key].data)
for e, r in zip(expected[key], result[key].data):
assert e == pytest.approx(r)
def test_multi_parameter_doubles():
expected_evaluations = [
{"a": [-CNORM_INV, 0], "b": [0, 0]},
{"a": [CNORM_INV, 0], "b": [0, 0]},
{"a": [0, -CNORM_INV], "b": [0, 0]},
{"a": [0, CNORM_INV], "b": [0, 0]},
{"a": [0, 0], "b": [-CNORM_INV, 0]},
{"a": [0, 0], "b": [CNORM_INV, 0]},
{"a": [0, 0], "b": [0, -CNORM_INV]},
{"a": [0, 0], "b": [0, CNORM_INV]},
]
records = {
"a": ert3.stats.Gaussian(0, 1, size=2),
"b": ert3.stats.Gaussian(0, 1, size=2),
}
evaluations = ert3.algorithms.one_at_the_time(records)
assert len(expected_evaluations) == len(evaluations)
for expected, result in zip(expected_evaluations, evaluations):
assert expected.keys() == result.keys()
for key in expected.keys():
assert len(expected[key]) == len(result[key].data)
for e, r in zip(expected[key], result[key].data):
assert e == pytest.approx(r)
def test_uni_and_norm():
expected_evaluations = [
{"a": [-CNORM_INV, 0], "b": [0.5, 0.5]},
{"a": [CNORM_INV, 0], "b": [0.5, 0.5]},
{"a": [0, -CNORM_INV], "b": [0.5, 0.5]},
{"a": [0, CNORM_INV], "b": [0.5, 0.5]},
{"a": [0, 0], "b": [CUNI_INV, 0.5]},
{"a": [0, 0], "b": [1 - CUNI_INV, 0.5]},
{"a": [0, 0], "b": [0.5, CUNI_INV]},
{"a": [0, 0], "b": [0.5, 1 - CUNI_INV]},
]
records = {
"a": ert3.stats.Gaussian(0, 1, size=2),
"b": ert3.stats.Uniform(0, 1, size=2),
}
evaluations = ert3.algorithms.one_at_the_time(records)
assert len(expected_evaluations) == len(evaluations)
for expected, result in zip(expected_evaluations, evaluations):
assert expected.keys() == result.keys()
for key in expected.keys():
assert len(expected[key]) == len(result[key].data)
for e, r in zip(expected[key], result[key].data):
assert e == pytest.approx(r)
def test_multi_parameter_groups():
records = {}
size = 10
records["array"] = ert3.stats.Gaussian(0, 1, size=size)
index = ["a" * i + str(i) for i in range(5)]
records["indexed"] = ert3.stats.Gaussian(0, 1, index=index)
evaluations = ert3.algorithms.one_at_the_time(records)
assert 2 * (size + len(index)) == len(evaluations)
for eidx, evali in enumerate(evaluations):
parameter_value = -CNORM_INV if eidx % 2 == 0 else CNORM_INV
assert ["array", "indexed"] == sorted(evali.keys())
assert size == len(evali["array"].data)
for vidx, val in enumerate(evali["array"].data):
expected_value = (
parameter_value if eidx < 2 * size and vidx == eidx // 2 else 0
)
assert expected_value == pytest.approx(val)
assert sorted(index) == sorted(evali["indexed"].index)
for kidx, key in enumerate(index):
expected_value = (
parameter_value
if eidx >= 2 * size and kidx == (eidx - 2 * size) // 2
else 0
)
assert expected_value == pytest.approx(evali["indexed"].data[key])
@pytest.mark.parametrize(
("tail", "expected_evaluations"),
(
(
0.99,
[
{"a": [-CNORM_INV], "b": [0.5]},
{"a": [CNORM_INV], "b": [0.5]},
{"a": [0], "b": [CUNI_INV]},
{"a": [0], "b": [1 - CUNI_INV]},
],
),
(
0.75,
[
{"a": [-1.15034938], "b": [0.5]},
{"a": [1.15034938], "b": [0.5]},
{"a": [0], "b": [0.125]},
{"a": [0], "b": [1 - 0.125]},
],
),
(
0.5,
[
{"a": [-0.67448975], "b": [0.5]},
{"a": [0.67448975], "b": [0.5]},
{"a": [0], "b": [0.25]},
{"a": [0], "b": [1 - 0.25]},
],
),
),
)
def test_tail(tail, expected_evaluations):
records = {
"a": ert3.stats.Gaussian(0, 1, size=1),
"b": ert3.stats.Uniform(0, 1, size=1),
}
evaluations = ert3.algorithms.one_at_the_time(records, tail)
for expected, result in zip(expected_evaluations, evaluations):
for key in expected.keys():
for e, r in zip(expected[key], result[key].data):
assert e == pytest.approx(r)
|
joakim-hove/ert
|
tests/ert_tests/ert3/algorithms/test_one_at_the_time.py
|
Python
|
gpl-3.0
| 7,614
|
[
"Gaussian"
] |
7507f28ce5ec99ee8e0c90e866a2793f08845453ba60c6c96f4212b3d803aef5
|
#!/usr/bin/env python
###
# name : seqfiles.py
# description : Script for analyzing sequencing results
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# LICENSE:
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#Copyright (C) 2014 Martin K. M. Engqvist |
###
import os
import fnmatch
import dna
from StringIO import StringIO
import pprint
import copy
import re
import string
import ABIreader #parser
#import scf #parser
#import abi #parser
#import ztr #parser
import fastq #parser
import fasta #parser
import NeedlemanWunsch as NW #alignment
import pyalign
#TODO
#I need to make my own bindings to muscle so that I do not have to depend on biopython
##### Json style data structure used for this script #####
# [{
# "reference":
# {
# "name": "reference_dna",
# "dna": "CACCGG",
# "aln_dna": "C-ACCG-G",
# },
# "samples":
# [
# {
# "name": "rrnb_primer1.seq",
# "dna": "CTACGTG",
# "orientation": "forward",
# "aln_dna": "CTA-CGTG",
# "sign_missmatches": " * * * ",
# },
# {
# "name": "rrnb_primer2.seq",
# "dna": "CACGTAG",
# "aln_dna": "CTA-CGTG",
# "sign_missmatches": " * * * ",
# }
# ],
# "contig":
# {
# "name": "rrnb_contig"
# "dna": "CTACGTG",
# "aln_dna": "CTA-CGTG",
# "sign_missmatches": " * * * ",
# "dna_missmatches": "list of dna missmatches",
# "aa_missmatches": "list of aa missmatches",
# }
#
#
# },
# {
# "reference":
# {
# "name": "referene_dna",
# "dna": "CACCGG",
# "aln_dna": "C-ACCG-G",
# },
# "samples":
# [
# {
# "name": "ccdb_primer1.seq",
# "dna": "CTACGTG",
# "orientation": "forward",
# "aln_dna": "CTA-CGTG",
# "sign_missmatches": " * * * ",
# },
# {
# "name": "ccdb_primer2.seq",
# "dna": "CACGTAG",
# "orientation": "reverse",
# "aln_dna": "CTA-CGTG",
# "sign_missmatches": " * * * ",
# }
# ],
# "contig":
# {
# "name": "ccdb_contig"
# "dna": "CTACGTG",
# "aln_dna": "CTA-CGTG",
# "sign_missmatches": " * * * ",
# "dna_missmatches": "list of dna missmatches"
# "aa_missmatches": "list of aa missmatches",
# }
#
#
# }
# ]
#######################################################
class SeqObj:
'''
Sequencing object for storing data related to a single Sanger sequencing run.
filepath is the complete path to the file, including the file name.
The file is parsed based on its file name ending.
'''
def __init__(self, filepath, name, primer, extension):
#self.filepath = string.replace(filepath, "\\", "/") #replace slashes and hold the input filepath
self.filepath = filepath #path to file
self.filename = self.filepath.split('/').pop() #entire filename
self.name = name #name of sequence
self.primer = primer #name of primer used to generate sequence
self.input_type = extension.upper() #type of input file
self.orientation = False #fw or rv orientation of the dna
self.seq = False #complete DNA sequence
self.qual_val = False #contains the qualifier values for the sequence (if derived from an .ab1, .fastq or .scf file)
self.trace = False #sequencing trace
self.RC = False #keep track of whether the sequence has been reverse-complemented
self.seq_clipped = False #clipped DNA sequence (removal of poor sequence (based on qual_val))
# x = '!"#$%%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~' #without escape characters: '!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~'
self.getInput() #open the file
return
def setName(self, name):
self.name = name #string
def getName(self):
return self.name #string
def setPrimer(self, primer):
self.primer = primer
def getPrimer(self):
return self.primer
def setOrientation(self, orientation):
self.orientation = orientation #string
def getOrientation(self):
return self.orientation #string
def setDNA(self, seq):
self.seq = seq #string
def getDNA(self):
return self.seq #string
def setQualVal(self, qual_val):
self.qual_val = qual_val #string
def getQualVal(self):
return self.qual_val #string
def setTrace(self, trace):
self.trace = trace #list [G, A, T, C]
def getTrace(self):
return self.trace #list [G, A, T, C]
def setRC(self, bool):
self.RC = bool #true or false
def getRC(self):
return self.RC #true or false
def setDNAClipped(self, seq):
self.seq_clipped = seq #string
def getDNAClipped(self):
return self.seq_clipped #string
def getInput(self):
'''
Open a single .seq, .fasta, .fastq, .ztr, .scf, .ab1 file (or even a text file with a DNA sequence) and set variables accordingly.
'''
#read the input
if self.input_type in ['TXT', 'SEQ', 'SEQ.CLIPPED', None] and self.filename not in ['allseqs.txt']:
f = open(self.filepath, 'r')
input = f.read()
f.close()
self.setDNA(input.replace('\n', ''))
elif self.input_type in ['AB1', 'ABI', 'ABIF']:
ab1 = ABIreader.Trace(self.filepath, trimming=True) #optionally ', trimming=True'
self.setDNA(ab1.seq)
self.setQualVal(ab1.qual_val)
self.setTrace([ab1.data['raw1'], ab1.data['raw2'], ab1.data['raw3'], ab1.data['raw4']]) #need to RC this too
# elif self.input_type == 'ZTR':
# print('Support for .ztr files has not yet been implemented')
# elif self.input_type == 'SCF':
# print('Support for .scf files has not yet been implemented')
elif self.input_type is 'FASTA':
id, seq = fasta.parseFile(self.filepath) #parse the fasta file. File should contain ONE entry
self.setDNA(seq)
elif self.input_type is 'FASTQ':
id, seq, id2, qual_val = fastq.parse(self.filepath) #parse the fastq file. File should contain ONE entry
self.setDNA(seq)
self.setQualVal(qual_val)
else:
print('"%s" is not a .txt, .seq, .scf, .fasta, .fastq, .abif, .ab1, .abi or .ztr file' % self.filename)
class SeqAnalysis:
'''
Sequencing analysis object for assembling and analysing data from many Sanger sequencing runs.
There should be one instance of this object for every physical real-world construct sequenced.
For example: If you sequences two clones of the same construct with three primers each,
then there should be one SeqAnalysis object for each of these clones.
Each of the SeqAnalysis objects will hold data from the three sequencing runs done on that clone.
'''
def __init__(self, name):
self.seqdata = {} #this is where all the sequence info gets stored
self.reference = None #a SeqObj containing the reference DNA
self.name = name #name of the construct that this SeqOverview object belongs to
self.reference = None #reference to a SeqObj that holds the reference sequence
self.aligned = False #keep track of whether sequences have been aligned or not
self.consensus = None #reference to a SeqObj that represents the consensus sequence of which this sequence is a part
self.sign_missmatches = None #sign (* * * *) mismatches vs the reference or consensus
self.seq_missmatches = None #nucleotide mismatches vs the reference or consensus
self.aa_missmatches = None #amino acid mismatches vs the reference or consensus
self.next = None #reference to a SeqObj that is sequentially before this one
self.previous = None #reference to a SeqObj that is sequentially after this one
def addSeq(self, seqobject):
'''
Adds a single SeqObj sequencing object to the data structure.
'''
#if a reference sequence has been added to the SeqAnalysis object, perpetuate that down to the single sequence
# if self.getReference() != None:
# seqobject.setReference(self.getReference())
#add the object to the data structure.
primer = seqobject.primer
if self.seqdata.get(primer) == None:
self.seqdata[primer] = {}
self.seqdata[primer][seqobject.input_type] = seqobject
else:
self.seqdata[primer][seqobject.input_type] = seqobject
## I should enforce that _ is an illegal character except for separating primer and construct names ##
#this workaround works for construct names with _ in it. But it fails when the primer name has _ in it....
### this is overly complicated to deal with filenames that has more than one '_' in the filename
# nameparts = dictionary['name'].split('_')
# nameparts.pop() #remove last
# name = ''
# for item in nameparts: #stitch them together
# name += '%s_' % item
###
#construct name and primer name MUST be separated by _ . Construct name CANNOT contain _ .
# name = seqobject.getName().split('_')[0] #get first part of name
def setReference(self, seq):
self.reference = seq #string
def getReference(self):
return self.reference #string
def setSignMissmatch(self, string):
self.sign_missmatches = string #string
def getSignMissmatch(self):
return self.sign_missmatches #string
def setDNAMissmatch(self, string):
self.seq_missmatches = string #string
def getDNAMissmatch(self):
return self.seq_missmatches #string
def setAAMissmatch(self, string):
self.aa_missmatches = string #string
def getAAMissmatch(self):
return self.aa_missmatches #string
def setContig(self, seq):
self.contig = seq #string
def getContig(self):
return self.contig #string
def setNext(self, seqobj):
self.next = seqobj #SeqObj object
def getNext(self):
return self.next #SeqObj object
def setPrevious(self, seqobj):
self.previous = seqobj #SeqObj object
def getPrevious(self):
return self.previous #SeqObj object
def assemble(self):
if self.getReference() is None: #if no reference sequence is present, do de Novo assembly.
self.assemble_de_novo()
else:
self.assemble_to_ref() #if a reference sequence is present, align each sequence to it.
#check missmatches and set the associated variables
self.aligned = True
def assemble_de_novo(self):
'''
'''
#somewhere I need to pick which type of file to use if there are multiple. I'm gonna go with the AB1 data for now.
#i'm starting with the simple case of two sequences
seq1 = self.seqdata['p423-GPD-RV']['AB1'].getDNA()
seq2 = self.seqdata['p423-GPD-FW']['AB1'].getDNA()
print(self.getOverlap(seq1, seq2))
print(self.getOverlap(seq1, dna.RC(seq2)))
#### I may adapt the structure below #########
#take one seq, find the best match, combine them. Start over.
# for i in range(len(seqdata)): #for all of the sequence groups
# fw_list = []
# rv_list = []
# seqdata[i]['contig']['dna'] = ''
# samples = copy.deepcopy(seqdata[i]['samples']) #copy samples so that I can delete them as I go
# overlap_found = False #has overlap been found?
# whole_list = False #did we traverse the whole list?
# while overlap_found is False and whole_list is False:
# overlap_found = False #has overlap been found?
# whole_list = False #did we traverse the whole list?
# for n in range(len(samples)):
# for o in range(n, len(samples)):
# fw_dict = {}
# seq1 = seqdata[i]['samples'][0]['dna']
# seq2 = seqdata[i]['samples'][n]['dna']
# forward = findoverlap(seq1, seq2)
# if forward is not False:
# overlap_found = True
# fw_list.append(forward[2])
#
# seq1 = seqdata[i]['samples'][0]['dna']
# seq2 = seqdata[i]['samples'][n]['dna']
# reverse = findoverlap(seq1, dna.reversecomplement(seq2))
# if reverse is not False:
# overlap_found = True
# rv_list.append(forward[2])
# if overlap_found is True:
# if max(fw_list) >= max(rv_list):
# index, value = max(enumerate(fw_list))
# elif max(fw_list) < max(rv_list):
# index, value = max(enumerate(fw_list))
# whole_list = True
def assemble_to_ref(self):
'''
This method aligns many sequences to a reference, one sequence at a time, then makes sure that all gaps match.
'''
############# need to fix this method so that it works with the new data structure #############
#################################################################################################
temp_dictlist = []
for n in range(len(seqdata[index]['samples'])): #make sure each dna has an empty aln_dna entry
seqdata[index]['samples'][n]['aln_dna'] = ''
#make virtual FASTA file of two files and align. The first entry should be the reference.
for n in range(len(seqdata[index]['samples'])):
records = ''
if seqdata[index]['reference']['name'][0] != '>':
records += '>%s\n%s\n' % (seqdata[index]['reference']['name'], seqdata[index]['reference']['dna']) #reference
elif seqdata[index]['reference']['name'][0] == '>':
records += '%s\n%s\n' % (seqdata[index]['reference']['name'], seqdata[index]['reference']['dna'])
else:
print('Muscle name error')
if seqdata[index]['samples'][n]['name'][0] != '>':
records += '>%s\n%s\n' % (seqdata[index]['samples'][n]['name'], seqdata[index]['samples'][n]['dna']) #sample
elif seqdata[index]['samples'][n]['name'][0] == '>':
records += '%s\n%s\n' % (seqdata[index]['samples'][n]['name'], seqdata[index]['samples'][n]['dna'])
else:
print('Muscle name error')
records_handle = StringIO(records) #turn string into a handle
tempdata = records_handle.getvalue()
#for seperate fasta entries
muscle_cline = MuscleCommandline()
stdout, stderr = muscle_cline(stdin=tempdata)
stdout = parse_fasta(stdout)
#sort so that ref is first
#is that needed? seems to be working fine
if n == 0:
seqdata[index]['reference']['aln_dna'] = stdout[0]['dna']
seqdata[index]['samples'][n]['aln_dna'] = stdout[1]['dna']
else:
#compare sequences with temp dictlist, check for - in one and not the other. Make changes to all.
temp_dictlist.append(stdout[0])
temp_dictlist.append(stdout[1])
i = 0
while i <= len(seqdata[index]['reference']['aln_dna']):
#check if reference sequences have the same spaces
if len(seqdata[index]['reference']['aln_dna']) == i and len(temp_dictlist[0]['dna']) == i:
i += 1
elif len(seqdata[index]['reference']['aln_dna']) == i and len(temp_dictlist[0]['dna']) > i: #for dealing with end of sequence
seqdata[index]['reference']['aln_dna'] = seqdata[index]['reference']['aln_dna'] + '-'
for x in range(len(seqdata[index]['samples'])): #make change in all sequences present
if seqdata[index]['samples'][x]['aln_dna'] != '':
seqdata[index]['samples'][x]['aln_dna'] += '-'
i = 0
elif len(temp_dictlist[0]['dna']) == i and len(seqdata[index]['reference']['aln_dna']) > i: #for dealing with end of sequence
for entry in temp_dictlist:
entry['dna'] = entry['dna'] + '-'
i = 0
elif seqdata[index]['reference']['aln_dna'][i] == '-' and temp_dictlist[0]['dna'][i] != '-': # if gap in old alignment, but not in new
for entry in temp_dictlist:
entry['dna'] = entry['dna'][:i] + '-' + entry['dna'][i:]
i = 0
elif seqdata[index]['reference']['aln_dna'][i] != '-' and temp_dictlist[0]['dna'][i] == '-': #if gap in new alignment, but not in old
seqdata[index]['reference']['aln_dna'] = seqdata[index]['reference']['aln_dna'][:i] + '-' + seqdata[index]['reference']['aln_dna'][i:]
for x in range(len(seqdata[index]['samples'])): #make change in all sequences present
if seqdata[index]['samples'][x]['aln_dna'] != '':
seqdata[index]['samples'][x]['aln_dna'] = seqdata[index]['samples'][x]['aln_dna'][:i] + '-' + seqdata[index]['samples'][x]['aln_dna'][i:]
i = 0
else:
i += 1
seqdata[index]['samples'][n]['aln_dna'] = temp_dictlist[1]['dna']
#I should probably add something to go through and check that I don't have '-' for all sequences at some position
def findoverlap(self, Seq1, Seq2, min_overlap=20):
"""Function for finding overlaps of two sequences.
Returns start of overlap on Seq1, start of overlap on Seq2, and length of overlap"""
Seq1 = Seq1.upper()
Seq1 = Seq1.replace('\n','')
Seq2 = Seq2.upper()
Seq2 = Seq2.replace('\n','')
seq_matcher = difflib.SequenceMatcher(None, Seq1, Seq2)
seq1_loc, seq2_loc, match_len = seq_matcher.find_longest_match(0, len(Seq1), 0, len(Seq2))
if match_len < min_overlap: #the match is shorter than the minimum specified
return False
else:
return seq1_loc, seq2_loc, match_len #return overlap DNA, overlap start on first seq, overlap start on second seq
##############################################################################################
def getOverlap(self, seq1, seq2):
'''
Find and return the maximum overlap length of two sequences.
This only works when using the NeedlemanWunsch algorithm to get the overlap. (The algorithm is much too promiscous otherwise.
If there is no overlap, return empty string.
'''
alignment = NW.PairwiseAlignment(seq1, seq2)
score = alignment.score
seq1aln = alignment.seq1aligned.upper()
seq2aln = alignment.seq2aligned.upper()
assert len(seq1aln) == len(seq2aln), 'Error, the sequences are not of the same length'
overlap = False
start = False
end = len(seq1aln)
length = False
first = False
#get start, end and length of overlap. Any number of N is tolerated and a double -- is tolerated.
for i in range(len(seq1aln)):
if overlap is False and seq1aln[i] in 'ATCGN' and seq2aln[i] in 'ATCGN': #first nucleotide of overlap
start = copy.copy(i)
overlap = True
elif overlap is True and (seq1aln[i] == '-' or seq2aln[i] == '-'):
if seq1aln[i+1:i+3] == '--' or seq2aln[i+1:i+3] == '--': #allow for two missing bases, but not more
end = copy.copy(i)
break
# a = open('test.txt', 'a') #open it for writing
# a.write(seq1aln)
# a.write(seq2aln)
# a.close()
#Find which sequence is the first (the leftmost)
#possible topologies:
#AAAAAAAAAAAAAATTTTT-----------
#--------------AAAAACCCCCCCCCCC
if (seq1aln[0] in 'ATCGN' and seq2aln[0] == '-') and (seq1aln[end+1] == '-' and seq2aln[end+1] in 'ATCGN'):
first = 1 #seq1 first
#--------------AAAAACCCCCCCCCCC
#AAAAAAAAAAAAAATTTTT-----------
elif (seq1aln[0] == '-' and seq2aln[0] in 'ATCGN') and (seq1aln[end+1] in 'ATCGN' and seq2aln[end+1] == '-'):
first = 2 #seq2 first
#CCCCCCAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
#------AAAAAAAAAAAAAATTTTT-----------
elif (seq1aln[0] in 'ATCGN' and seq2aln[0] == '-') and (seq1aln[end+1] in 'ATCGN' and seq2aln[end+1] == '-'):
overlap = False #mark this as not an overlap
#------AAAAAAAAAAAAAATTTTT-----------
#CCCCCCAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
elif (seq1aln[0] == '-' and seq2aln[0] in 'ATCGN') and (seq1aln[end+1] == '-' and seq2aln[end+1] in 'ATCGN'):
overlap = False #mark this as not an overlap
#CCCCCCAAAAAAAAAAAAAATTTTTCC---------
#CCCCCCAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
elif (seq1aln[0] in 'ATCGN' and seq2aln[0] in 'ATCGN') and (seq1aln[end+1] == '-' and seq2aln[end+1] in 'ATCGN'):
first = 1 #seq1 first
#CCCCCCAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
#CCCCCCAAAAAAAAAAAAAATTTTTCC---------
elif (seq1aln[0] in 'ATCGN' and seq2aln[0] in 'ATCGN') and (seq1aln[end+1] in 'ATCGN' and seq2aln[end+1] == '-'):
first = 2 #seq2 first
#-----CAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
#CCCCCCAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
elif (seq1aln[0] == '-' and seq2aln[0] in 'ATCGN') and (seq1aln[end+1] in 'ATCGN' and seq2aln[end+1] in 'ATCGN'):
first = 2 #seq2 first
#CCCCCCAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
#-----CAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
elif (seq1aln[0] in 'ATCGN' and seq2aln[0] == '-') and (seq1aln[end+1] in 'ATCGN' and seq2aln[end+1] in 'ATCGN'):
first = 1 #seq1 first
#CCCCCCAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
#CCCCCCAAAAAAAAAAAAAATTTTTCCCCCCCCCCC
elif (seq1aln[0] in 'ATCGN' and seq2aln[0] in 'ATCGN') and (seq1aln[len(seq1aln)] in 'ATCGN' and seq2aln[len(seq2aln)] in 'ATCGN'):
first = 1 #it does not matter which one is first, but let's pick seq1
if overlap is False:
return False, False, False, False
else:
return seq1aln, seq2aln, seq1aln[start:end], first, score #return aligned seq1 (str), seq2 (str), the overlap (str), and an integer (1 or 2 ) that indicates which sequence is first in the alignment.
def findFirst(self, alnscores):
'''
Find the leftmost sequence of alignment pairs.
'''
#find the leftmost sequence
keys = alnscores.keys() #seqs with overlaps to the right
leftmost = []
for i in keys:
present = False
for j in keys:
if i in alnscores[j]: #they cannot be present as the 'right' sequence of any other sequence
present = True
break
if present is False and i not in leftmost:
leftmost.append(i)
print('leftmost', leftmost)
return leftmost # a list
def sortSeqs(self):
'''
Align each sequence with each other sequence.
Save the alignment scores in a matrix where the index (in the list) for each SeqObj is used as the identifier.
'''
alnscores = {} #store the alignment overlaps
alnseqs = {} #store the alignment sequences
for group in self.seqdata:
for i in range(len(group['samples'])-1):
for j in range(i+1, len(group['samples'])):
seqobj1 = group['samples'][i]
seqobj2 = group['samples'][j]
alnseq1, alnseq2, overlap, first = self.getOverlap(seqobj1.getDNA(), seqobj2.getDNA()) #get the overlap and which sequence is first
# print('%s and %s' % (seqobj1.getName(), seqobj2.getName()), overlap)
if overlap is not False and first == 1:
if i in alnscores:
alnscores[i].update({j:len(overlap)})
alnseqs[i].update({j:(alnseq1, alnseq2)})
else:
alnscores[i] = {j:len(overlap)}
alnseqs[i] = {j:(alnseq1, alnseq2)}
elif overlap is not False and first == 2:
if j in alnscores:
alnscores[j].update({i:len(overlap)})
alnseqs[j].update({i:(alnseq2, alnseq1)})
else:
alnscores[j] = {i:len(overlap)}
alnseqs[j] = {i:(alnseq2, alnseq1)}
print(alnscores)
#get the leftmost (the first) sequence
leftmost = self.findFirst(alnscores)[0]
#determine the order of sequences
sequence = [leftmost]
keys = alnscores.keys()
while sequence[-1] in keys:
sequence.append(max(alnscores[sequence[-1]], key=alnscores[sequence[-1]].get)) #get the max value for the dictionary under the key
print('sequence', sequence)
def setReference(self, seq):
'''
Set a reference sequence.
'''
self.reference = seq
def getReference(self):
'''
Return the reference sequence.
'''
return self.reference
def printAlnScores(self):
pass
def setConsensus(self):
'''
Build a consensus sequence from sorted sequence reads.
'''
pass
################ Generating outputs ######################
def writeseqfiles(self, path):
'''Write all sequence entries from a list of dictionaries to textfile'''
a = open(path + 'allseqs.txt', 'w') #open it for writing
for i in range(len(self.seqdata)):
for n in range(len(self.seqdata[i]['samples'])):
a.write(self.seqdata[i]['samples'][n].getName())
a.write('\n')
a.write(self.seqdata[i]['samples'][n].getDNA())
a.write('\n')
a.close()
##########################################################
class SeqOverview:
'''
The SeqOverview object manages one or several instances of SeqAnalysis and provides means to interact with these.
It should be possible to load a whole folder of files into this class and have it organize the sequences by name and make
SeqAnalysis instances as appropriate.
The aim is to have a GUI interact with this class.
'''
def __init__(self):
self.data = {} #stores all the data in a dictionary
def addFile(self, filepath):
'''
Add a single file.
'''
#get the filename. The filename may only have one _ character,
#and that character MUST be used to separate the construct name from the primer name.
filename = re.split('[\\\/]', filepath)[-1] #the entire filename
extension = '.'.join(filename.split('.')[1:]) #get the extension
if extension.upper() in ['TXT', 'AB1', 'SCF', 'FASTQ', 'SEQ', 'SEQ.CLIPPED']: #here I determine which files are allowed
print('Adding: %s' % filename)
name, primer = filename.replace('.' + extension, '').split('_') #get construct and primer names
#check whether any other SeqAnalysis object with that name is already present, if not, make one.
instance = self.data.get(name)
if instance == None: #not present, so add new SeqAnalysis instance.
self.data[name] = SeqAnalysis(name)
self.data[name].addSeq(SeqObj(filepath, name, primer, extension)) #make a new SeqObj with the info and add it to the SeqAnalysis object.
elif instance != None: #is present, so add sequence to the existing instance.
instance.addSeq(SeqObj(filepath, name, primer, extension)) #make a new SeqObj with the info and add it to the SeqAnalysis object.
else:
print('Skipping: %s' % filename)
def addFolder(self, path):
'''
Add an entire folder worth of files.
'''
file_list = sorted(os.listdir(path))
for filename in file_list:
filepath = path+filename
self.addFile(filepath)
def addList(self, path, file_list):
'''
Add a specified files from a path.
'''
for filename in file_list:
filepath = path+'/'+filename
self.addFile(filepath)
def RemoveN(Seq):
Seq = Seq.upper()
BeginningCounter = 0
EndCounter = len(Seq)
for i in range(int(len(Seq)/2)):
if 'N' in Seq[i]:
BeginningCounter = i+1
for i in range(int(len(Seq)/2)+1, len(Seq)):
if 'N' in Seq[i]:
EndCounter = i
break
Seq = Seq[BeginningCounter:EndCounter]
#print(Seq)
return Seq
#End of RemoveN function
def join(Seq1, Seq2):
"""joins two sequences that have an overlap.
Seq1 is assumed to come before Seq2.
Both are assumed to be in the same orientation."""
vars = findoverlap(Seq1, Seq2) #vars contains the start of overlap on seq1, start of overlap on seq2, and length of overlap
#print(vars)
if vars == False:
print('No overlap')
return False
elif type(var[0]) == int and type(var[1]) == int and type(var[2]) == int:
print('Overlap')
JointSeq = Seq1[0:vars[0]] + Seq2[vars[1]:len(Seq2)]
return JointSeq
else:
print('error while joining')
#align function
def align(Seq1, Seq2): #aligns two sequences that have an overlap
Seq1 = Seq1.replace('\n','')
Seq2 = Seq2.replace('\n','')
Seq1 = Seq1.upper()
Seq2 = Seq2.upper()
vars = findoverlap(Seq1, Seq2)
#print(vars)
if vars == False:
#print('No overlap')
return False
elif type(vars[0]) == int and type(vars[1]) == int and type(vars[2]) == int:
dash = '-'
dash = dash * (var[1]-var[2]) #putting dashes in front of seq2 to fill up until the alignment
Seq2 = dash + Seq2
alnvar = ''
if len(Seq1) < len(Seq2):
Seq1 = Seq1 + (len(Seq2) - len(Seq1)) * '-' #fill up the back
#print(len(Seq1))
#print(len(Seq2))
elif len(Seq1) > len(Seq2):
Seq2 = Seq2 + (len(Seq1) - len(Seq2)) * '-' #fill up the back
#print(len(Seq1))
#print(len(Seq2))
for i in range(len(Seq1)):
if Seq1[i] == Seq2[i]:
alnvar = alnvar + '|'
elif Seq1[i] != Seq2[i]:
alnvar = alnvar + 'x'
else:
print('error making alnvar')
return (Seq1, alnvar, Seq2)
else:
print('error while aligning')
#end of align function
#Function for finding a certain sequence in a string and delete everything in front. Used for finding beginning of gene in seqfile.
#Currently uses 10 nucleotides for the search
#Also deletes everything after the stop codon of the RefSeq
def findstart(RefSeq, Seq):
RefSeq = RefSeq.upper()
Seq = Seq.upper()
for i in range(len(RefSeq)):
if RefSeq[i:(i+3)] == 'ATG':
RefSeq = RefSeq[i:len(RefSeq)]
#print(RefSeq[0:10])
break
for i in range(len(Seq)):
if RefSeq[0:10] == Seq[i:(i+10)]: #Find the start here
Seq = Seq[i:len(Seq)]
if RefSeq[len(RefSeq)-11:len(RefSeq)-1] == Seq[i:(i+10)]: #find the end. Had to add an extra -1 since last character was \n
Seq = Seq[0:(i+10)]
#print(Seq[0:10])
return(RefSeq, Seq)
#End of findstart function
#Function for comparing the sequencing reaction to a reference sequence, one triplet at a time
def tripletalign(RefSeq, Seq):
RefSeq = RefSeq.upper()
Seq = Seq.upper()
counter = 0
for i in range(len(Seq)):
if i%3==0 or i == 0:
#print(RefSeq[i:i+3])
#print(Seq[i:i+3])
if RefSeq[i:(i+3)] == Seq[i:(i+3)]:
counter = 0
pass
elif i+3>len(Seq): #to make sure it's a complete codon... i.e. leave out last codon if it is shorter than 3
pass
else:
if counter <= 5:
print('DNA pos %d, %s mutated to %s --- %s%d%s' % (i+3-1, RefSeq[i:(i+3)], Seq[i:(i+3)], dna.translate(RefSeq[i:(i+3)]), int((i+3)/3), dna.translate(Seq[i:(i+3)])))
counter += 1
else:
print('over %d consecutive mismatches, rest of construct is likely out of frame' % (counter-1))
break
print('\n')
#End of triplet align function
def check(seqdataentry):
#for entry in Sequences:
print(seqdataentry['contig']['name'])
dna = findstart(seqdataentry['reference']['dna'], seqdataentry['contig']['dna'])
tripletalign(dna[0], dna[1]) #(RefSeq, dna)
### Add a Seqalign function here that analyzes the current sequence vs refseq and spits out missmatches independent of codons...
#########
def getreference(path, index):
'''Get the reference sequence for each group of sequences'''
#open the RefSeq file
#RefSeq file should contain the gene of interest, starting with ATG
filepath = path + 'RefSeq.txt' #Open reference sequence file
f = open(filepath, 'r') #open content of current file
seqdata[index]['reference'] = dict(name = 'reference_sequence', dna = f.read())
f.close() #close current file
#######################################################
#I don't think this is needed any more
def group_sequences():
'''groups sequences based on name'''
Sequences = []
Currentsequence = []
filepath = path + '/allseqs.txt' #Open reference sequence file
f = open(filepath, 'r') #open content of current file
for line in f:
line = line.replace('\n', '')
if line[0] == '>':
if Currentsequence == []:
name, rest = line.split('_')
Currentsequence.append(name)
elif Currentsequence[0] in line:
Sequences.append(Currentsequence)
else:
Currentsequence = []
name, rest = line.split('_')
Currentsequence.append(name)
elif line[0] == 'A' or line[0] == 'T' or line[0] == 'C' or line[0] == 'G':
Currentsequence.append(line)
elif line[0] == 'N':
print('Sequence starts with N')
else:
print('Something is wrong')
f.close()
return Sequences
def analyze():
###### here I call the functions #########
alnresults = path + '/' + 'alnresults.txt' #get the path for the results file (output file)
f = open(alnresults, 'w') #open it for writing
results = path + '/' + 'results.txt' #get the path for the results file (output file)
a = open(results, 'w') #open it for writing
for i in range(len(seqdata)):
f.write('>%str\n' % seqdata[i]['reference']['name'])
f.write('%s\n' % seqdata[i]['reference']['aln_dna'])
n = 1
while n < len(seqdata[i]['samples']):
f.write('%s\n' % seqdata[i]['samples'][n]['name'])
f.write('%s\n' % seqdata[i]['samples'][n]['aln_dna'])
####### fix the assembly method
####### save it in 'contig'
assembled = join(entry[n], entry[i])
if assembled==False:
n = i
else:
entry[i] = ''
entry[n] = assembled
# for i in range(len(entry)):
# if i == 0:
# a.write('>' + entry[i])
# a.write('\n')
# else:
# a.write(entry[i])
# a.write('\n')
#
#check whether any mutations are present
#this is a shortcut for now
seqdata[i]['contig'] = {'name': seqdata[i]['samples'][0]['name']}
seqdata[i]['contig']['dna'] = seqdata[i]['samples'][0]['dna']
check(seqdata[i])
f.close()
a.close()
if __name__ == '__main__': #if script is run by itself and not loaded
import sys
assert len(sys.argv) == 2, 'Error, this script requires a path to a folder containing the sequencing files as an argument.'
print('Opening %s' % str(sys.argv[1]))
path = str(sys.argv[1]) #Path to folder that contains the sequences
# There are two modes of running the software. Either by aligning sequences to a reference,
# or by assembling the sequences by finding overlaps.
#One should choose what type of sequence should be loaded. ab1>scf>fastq>fasta
#In general, the sequences should be loaded and then clipped to remove bad sequence stretches.
#Otherwise these would really mess up the process of finding overlaps.
#Then align to reference sequence OR assemble based on overlaps.
#Then missmatches and N's need to be found. The process should depend on which mode is used.
#If the sequences are aligned to reference, find missmatches and N's throughout the sequence.
#If the sequences are assembled, then find missmatches in the overlap regions.
# FindSingleGeneMissmatch = 'Y' #Test for one gene whether missmatches occur or not. It is expected that RefSeq starts with ATG
x = SeqOverview()
x.addFolder(path)
#assemble the sequences
# for key in x.keys():
# x[key].assemble()
|
openpaul/DNApy
|
seqfiles5.py
|
Python
|
gpl-3.0
| 33,689
|
[
"Biopython"
] |
6df362f6f7246b87c05f010325a84eebba09d5c3e351dc604753159b2b1cf1a3
|
#!/usr/bin/env python
# CREATED:2013-03-11 18:14:30 by Brian McFee <brm2132@columbia.edu>
# unit tests for librosa.onset
from __future__ import print_function
import pytest
from contextlib2 import nullcontext as dnr
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except:
pass
import warnings
import numpy as np
import librosa
from test_core import srand
__EXAMPLE_FILE = os.path.join("tests", "data", "test1_22050.wav")
@pytest.fixture(scope="module")
def ysr():
return librosa.load(__EXAMPLE_FILE)
@pytest.mark.parametrize(
"feature", [None, librosa.feature.melspectrogram, librosa.feature.chroma_stft]
)
@pytest.mark.parametrize("n_fft", [512, 2048])
@pytest.mark.parametrize("hop_length", [256, 512])
@pytest.mark.parametrize("lag", [1, 2])
@pytest.mark.parametrize("max_size", [1, 2])
@pytest.mark.parametrize("detrend", [False, True])
@pytest.mark.parametrize("center", [False, True])
@pytest.mark.parametrize("aggregate", [None, np.mean, np.max])
def test_onset_strength_audio(
ysr, feature, n_fft, hop_length, lag, max_size, detrend, center, aggregate
):
y, sr = ysr
oenv = librosa.onset.onset_strength(
y=y,
sr=sr,
S=None,
detrend=detrend,
center=center,
aggregate=aggregate,
feature=feature,
n_fft=n_fft,
hop_length=hop_length,
lag=lag,
max_size=max_size,
)
assert oenv.ndim == 1
S = librosa.feature.melspectrogram(y=y, n_fft=n_fft, hop_length=hop_length)
target_shape = S.shape[-1]
if not detrend:
assert np.all(oenv >= 0)
assert oenv.shape[-1] == target_shape
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_badlag(ysr):
y, sr = ysr
librosa.onset.onset_strength(y=y, sr=sr, lag=0)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_badmax(ysr):
y, sr = ysr
librosa.onset.onset_strength(y=y, sr=sr, max_size=0)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_noinput():
librosa.onset.onset_strength(y=None, S=None)
@pytest.fixture(scope="module")
def melspec_sr(ysr):
y, sr = ysr
S = librosa.feature.melspectrogram(y=y, sr=sr)
return S, sr
@pytest.mark.parametrize(
"feature", [None, librosa.feature.melspectrogram, librosa.feature.chroma_stft]
)
@pytest.mark.parametrize("n_fft", [512, 2048])
@pytest.mark.parametrize("hop_length", [256, 512])
@pytest.mark.parametrize("detrend", [False, True])
@pytest.mark.parametrize("center", [False, True])
@pytest.mark.parametrize("aggregate", [None, np.mean, np.max])
def test_onset_strength_spectrogram(
melspec_sr, feature, n_fft, hop_length, detrend, center, aggregate
):
S, sr = melspec_sr
oenv = librosa.onset.onset_strength(
y=None,
sr=sr,
S=S,
detrend=detrend,
center=center,
aggregate=aggregate,
feature=feature,
n_fft=n_fft,
hop_length=hop_length,
)
assert oenv.ndim == 1
target_shape = S.shape[-1]
if not detrend:
assert np.all(oenv >= 0)
assert oenv.shape[-1] == target_shape
@pytest.mark.parametrize("lag", [1, 2, 3])
@pytest.mark.parametrize("aggregate", [np.mean, np.max])
def test_onset_strength_multi_noagg(melspec_sr, lag, aggregate):
S, sr = melspec_sr
# We only test with max_size=1 here to make the sub-band slicing test simple
odf_multi = librosa.onset.onset_strength_multi(
S=S, lag=lag, max_size=1, aggregate=False
)
odf_mean = librosa.onset.onset_strength_multi(
S=S, lag=lag, max_size=1, aggregate=aggregate
)
# With no aggregation, output shape should = input shape
assert odf_multi.shape == S.shape
# Result should average out to the same as mean aggregation
assert np.allclose(odf_mean, aggregate(odf_multi, axis=0))
@pytest.fixture(scope="module")
def channels(melspec_sr):
S, _ = melspec_sr
return np.linspace(0, S.shape[0], num=5, dtype=int)
@pytest.mark.parametrize("lag", [1, 2, 3])
def test_onset_strength_multi(melspec_sr, lag, channels):
S, sr = melspec_sr
# We only test with max_size=1 here to make the sub-band slicing test simple
odf_multi = librosa.onset.onset_strength_multi(
S=S, lag=lag, max_size=1, channels=channels
)
assert len(odf_multi) == len(channels) - 1
for i, (s, t) in enumerate(zip(channels, channels[1:])):
odf_single = librosa.onset.onset_strength(S=S[s:t], lag=lag, max_size=1)
assert np.allclose(odf_single, odf_multi[i])
@pytest.fixture(scope="module", params=[64, 512, 2048])
def hop(request):
return request.param
@pytest.fixture(scope="module", params=[False, True], ids=["audio", "oenv"])
def oenv(ysr, hop, request):
if request.param:
y, sr = ysr
return librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop)
else:
return None
@pytest.mark.parametrize("bt", [False, True])
@pytest.mark.parametrize("normalize", [False, True])
def test_onset_detect_real(ysr, oenv, hop, bt, normalize):
y, sr = ysr
onsets = librosa.onset.onset_detect(
y=y,
sr=sr,
onset_envelope=oenv,
hop_length=hop,
backtrack=bt,
normalize=normalize,
)
if bt:
assert np.all(onsets >= 0)
else:
assert np.all(onsets > 0)
assert np.all(onsets < len(y) * sr // hop)
if oenv is not None:
assert np.all(onsets < len(oenv))
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_detect_nosignal():
librosa.onset.onset_detect(y=None, onset_envelope=None)
@pytest.mark.parametrize("sr", [4000])
@pytest.mark.parametrize("y", [np.zeros(4000), np.ones(4000), -np.ones(4000)])
@pytest.mark.parametrize("hop_length", [64, 512, 2048])
def test_onset_detect_const(y, sr, hop_length):
# Disable padding here
onsets = librosa.onset.onset_detect(
y=y, sr=sr, onset_envelope=None, hop_length=hop_length,
)
# We'll allow one onset at the start of the signal for these examples
# when y is all-ones, zero-padding induces an onset at the beginning of the
# signal
assert len(onsets) == 0 or (y[0] != 0 and len(onsets) == 1)
@pytest.mark.parametrize(
"units, ctx",
[
("frames", dnr()),
("time", dnr()),
("samples", dnr()),
("bad units", pytest.raises(librosa.ParameterError)),
],
)
@pytest.mark.parametrize("hop_length", [512, 1024])
def test_onset_units(ysr, hop_length, units, ctx):
y, sr = ysr
with ctx:
b1 = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length)
b2 = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length, units=units)
t1 = librosa.frames_to_time(b1, sr=sr, hop_length=hop_length)
if units == "time":
t2 = b2
elif units == "samples":
t2 = librosa.samples_to_time(b2, sr=sr)
elif units == "frames":
t2 = librosa.frames_to_time(b2, sr=sr, hop_length=hop_length)
assert np.allclose(t1, t2)
@pytest.fixture(scope="module", params=[False, True], ids=["oenv", "rms"])
def energy(ysr, hop, request):
y, sr = ysr
if request.param:
return librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop)
else:
return librosa.feature.rms(y=y, hop_length=hop)
def test_onset_backtrack(ysr, oenv, hop, energy):
y, sr = ysr
onsets = librosa.onset.onset_detect(
y=y, sr=sr, onset_envelope=oenv, hop_length=hop, backtrack=False
)
# Test backtracking
onsets_bt = librosa.onset.onset_backtrack(onsets, energy)
# Make sure there are no negatives
assert np.all(onsets_bt >= 0)
# And that we never roll forward
assert np.all(onsets_bt <= onsets)
# And that the detected peaks are actually minima
assert np.all(energy[onsets_bt] <= energy[np.maximum(0, onsets_bt - 1)])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_noagg():
S = np.zeros((3, 3))
librosa.onset.onset_strength(S=S, aggregate=False)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_badref():
S = np.zeros((3, 3))
librosa.onset.onset_strength(S=S, ref=S[:, :2])
def test_onset_strength_multi_ref():
srand()
# Make a random positive spectrum
S = 1 + np.abs(np.random.randn(1025, 10))
# Test with a null reference
null_ref = np.zeros_like(S)
onsets = librosa.onset.onset_strength_multi(
S=S, ref=null_ref, aggregate=False, center=False
)
# since the reference is zero everywhere, S - ref = S
# past the setup phase (first frame)
assert np.allclose(onsets[:, 1:], S[:, 1:])
def test_onset_detect_inplace_normalize():
# This test will fail if the in-place normalization modifies
# the input onset envelope
oenv_in = np.ones(50)
oenv_in[10] = 2
oenv_orig = oenv_in.copy()
librosa.onset.onset_detect(onset_envelope=oenv_in, normalize=True)
assert np.allclose(oenv_in, oenv_orig) and oenv_in is not oenv_orig
|
librosa/librosa
|
tests/test_onset.py
|
Python
|
isc
| 9,085
|
[
"Brian"
] |
26715e2acdac22144dd94f47db7f17cf58f8b28e5c8f9224932fd9ca4b6b9607
|
#!/usr/bin/env python
#============================================================================
# P Y I B E X
# File : test_thickPaving.py
# Author : Benoit Desrochers
# Copyright : Benoit Desrochers
# License : See the LICENSE file
# Created : Dec 28, 2015
#============================================================================
import unittest
import pyibex
from pyibex import Interval, IntervalVector, LargestFirst, Function, SepFwdBwd
from pyibex.sepvisitor import SepToVibes
from pyibex.paving import SepPaving
import math
from vibes import vibes
class TestSepPaving(unittest.TestCase):
def test_constructor_01(self):
X0 = IntervalVector(2,[-5, 5]);
A = SepPaving(X0);
sep = SepFwdBwd(Function("x", "y", "(x^2+y^2)"), Interval(-1e-10, 4))
# sep = SepFwdBwd(Function("x", "y", "(x^2+y^2)"), Interval(-1, 4))
# sep = SepFwdBwd(Function("x", "y", "(x^2+y^2)"), Interval(-1, 4))
A.Sivia(sep, 0.1)
vibes.beginDrawing()
A.visit(SepToVibes("Titi"))
vibes.endDrawing()
if __name__ == '__main__':
unittest.main()
|
benEnsta/pyIbex
|
pyibex/tests/test_SepPaving.py
|
Python
|
lgpl-3.0
| 1,117
|
[
"VisIt"
] |
fe1ad6bd6272ea90e26061446755ba6d77f8921f0398183a717206e06ca4ba7a
|
#!/usr/bin/env python
# coding: utf-8
# # 4 - Medium Level Example - Debugging your Scene with Custom Objects
# ### Fixed Tilt 2-up with Torque Tube + CLEAN Routine + CustomObject
#
# This journal has examples of various things, some which hav ebeen covered before and some in more depth:
#
# <ul>
# <li> Running a fixed_tilt simulation beginning to end. </li>
# <li> Creating a 2-up module with torque-tube, and detailed geometry of spacings in xgap, ygap and zgap. </li>
# <li> Calculating the tracker angle for a specific time, in case you want to use that value to model a fixed_tilt setup. </li>
# <li> Loading and cleaning results, particularly important when using setups with torquetubes / ygaps. </li>
# <li> Adding a "Custom Object" or **marker** at the Origin of the Scene, to do a visual sanity-check of the geometry. </li>
# </ul>
#
# It will look something like this (without the marker in this visualization):
#
# 
#
# ### STEPS:
#
# <ol type='1'>
# <li> <a href='#step1'> Specify Working Folder and Import Program </a></li>
# <li> <a href='#step2'> Specify all variables </a></li>
# <li> <a href='#step3'> Create the Radiance Object and generate the Sky </a></li>
# <li> <a href='#step4'> Calculating tracker angle/geometry for a specific timestamp </a></li>
# <li> <a href='#step5'> Making the Module & the Scene, Visualize and run Analysis </a></li>
# <li> <a href='#step6'> Calculate Bifacial Ratio (clean results) </a></li>
# <li> <a href='#step7'> Add Custom Elements to your Scene Example: Marker at 0,0 position </a></li>
# </ol>
# <a id='step1'></a>
# ### 1. Specify Working Folder and Import Program
#
# In[1]:
import os
from pathlib import Path
testfolder = Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP' / 'Tutorial_04'
# Another option using relative address; for some operative systems you might need '/' instead of '\'
# testfolder = os.path.abspath(r'..\..\bifacial_radiance\TEMP')
print ("Your simulation will be stored in %s" % testfolder)
if not os.path.exists(testfolder):
os.makedirs(testfolder)
import bifacial_radiance
import numpy as np
import pandas as pd
# <a id='step2'></a>
# ### 2. Specify all variables for the module and scene
#
# Below find a list of all of the possible parameters for makeModule.
# scene and simulation parameters are also organized below.
# This simulation will be a complete simulation in terms of parameters that you can modify.
#
# The below routine creates a HEXAGONAL torque tube, for a 2-UP configuration of a specific module size. Parameters for the module, the torque tube, and the scene are below.
# This is being run with gendaylit, for one specific timestamp
# In[2]:
simulationname = 'tutorial_4'
## SceneDict Parameters
gcr = 0.33 # ground cover ratio, = module_height / pitch
albedo = 0.28 #'concrete' # ground albedo
hub_height = 2.35 # we could also pass clearance_height.
azimuth_ang = 90 # Modules will be facing East.
lat = 37.5
lon = -77.6
nMods = 4 # doing a smaller array for better visualization on this example.
nRows = 2
# MakeModule Parameters
module_type='test-module'
x = 1.996 # landscape, sinze x > y. Remember that orientation has been deprecated.
y = 0.991
tilt = 10
numpanels = 2 # doing a 2-up system!
# Gaps:
xgap = 0.05 # distance between modules in the row.
ygap = 0.15 # distance between the 2 modules along the collector slope.
zgap = 0.175 # if there is a torquetube, this is the distance between the torquetube and the modules.
# If there is not a module, zgap is the distance between the module and the axis of rotation (relevant for
# tracking systems.
# TorqueTube Parameters
tubetype = 'Hex'
diameter = 0.15
material = 'Metal_Grey' # IT's NOT GRAY, IT's GREY.
# <a id='step3'></a>
# ### 3. Create the Radiance Object and generate the Sky
# In[3]:
demo = bifacial_radiance.RadianceObj(simulationname, path=str(testfolder)) # Create a RadianceObj 'object'
demo.setGround(albedo) # input albedo number or material name like 'concrete'. To see options, run this without any input.
epwfile = demo.getEPW(lat,lon) # pull TMY data for any global lat/lon
metdata = demo.readWeatherFile(epwfile, coerce_year=2001) # read in the EPW weather data from above
timestamp = metdata.datetime.index(pd.to_datetime('2001-06-17 13:0:0 -5')) # Make this timezone aware, use -5 for EST.
demo.gendaylit(timestamp) # Mid-day, June 17th
# <a id='step4'></a>
# ## 4. Calculating tracker angle/geometry for a specific timestamp
#
# This trick is useful if you are trying to use the fixed-tilt steps in bifacial_radiance to model a tracker for one specific point in time (if you take a picture of a tracker, it looks fixed, right? Well then).
#
# We assigned a 10 degree tilt at the beginning, but if we were to model a tracker as a fixed-tilt element because we are interested in only one point in time, this routine will tell us what tilt to use. *Please note that to model a tracker as fixed tilt, we suggest passing a hub_height, otherwise you will have to calculate the clearance_height manually.*
#
# <div class="alert alert-warning">
# Details: you might have noticed in the previoust tutorial looking at the tracker dictionary, but the way that bifacial_radiance handles tracking: If the tracker is N-S axis azimuth, the surface azimuth of the modules will be set to 90 always, with a tilt that is either positive (for the early morning, facing East), or negative (for the afternoon, facing west).
# </div>
#
# In[4]:
# Some tracking parameters that won't be needed after getting this angle:
axis_azimuth = 180
axis_tilt = 0
limit_angle = 60
backtrack = True
tilt = demo.getSingleTimestampTrackerAngle(metdata, timestamp, gcr, axis_azimuth, axis_tilt,limit_angle, backtrack)
print ("\n NEW Calculated Tilt: %s " % tilt)
# <a id='step5'></a>
# ### 5. Making the Module & the Scene, Visualize and run Analysis
# In[5]:
# Making module with all the variables
module = demo.makeModule(name=module_type,x=x,y=y,bifi=1,
zgap=zgap, ygap=ygap, xgap=xgap, numpanels=numpanels)
module.addTorquetube(diameter=diameter, material=material, tubetype=tubetype,
visible=True, axisofrotation=True)
# create a scene with all the variables.
# Specifying the pitch automatically with the collector width (sceney) returned by the module object.
# Height has been deprecated as an input. pass clearance_height or hub_height in the scenedict.
sceneDict = {'tilt':tilt,'pitch': np.round(module.sceney / gcr,3),
'hub_height':hub_height,'azimuth':azimuth_ang,
'module_type':module_type, 'nMods': nMods, 'nRows': nRows}
scene = demo.makeScene(module=module, sceneDict=sceneDict) #makeScene creates a .rad file of the Scene
octfile = demo.makeOct(demo.getfilelist()) # makeOct combines all of the ground, sky and object files into a .oct file.
# At this point you should be able to go into a command window (cmd.exe) and check the geometry. It should look like the image at the beginning of the journal. Example:
#
# #### rvu -vf views\front.vp -e .01 -pe 0.02 -vp -2 -12 14.5 tutorial_4.oct
#
#
# In[6]:
## Comment the line below to run rvu from the Jupyter notebook instead of your terminal.
## Simulation will stop until you close the rvu window
#!rvu -vf views\front.vp -e .01 tutorial_4.oct
# And then proceed happily with your analysis:
# In[7]:
analysis = bifacial_radiance.AnalysisObj(octfile, demo.name) # return an analysis object including the scan dimensions for back irradiance
sensorsy = 200 # setting this very high to see a detailed profile of the irradiance, including
#the shadow of the torque tube on the rear side of the module.
frontscan, backscan = analysis.moduleAnalysis(scene, modWanted = 2, rowWanted = 1, sensorsy = 200)
frontDict, backDict = analysis.analysis(octfile, demo.name, frontscan, backscan) # compare the back vs front irradiance
# print('"Annual" bifacial ratio average: %0.3f' %( sum(analysis.Wm2Back) / sum(analysis.Wm2Front) ) )
# See comment below of why this line is commented out.
# <a id='step6'></a>
#
# ### 6. Calculate Bifacial Ratio (clean results)
#
# Although we could calculate a bifacial ratio average at this point, this value would be misleading, since some of the sensors generated will fall on the torque tube, the sky, and/or the ground since we have torquetube and ygap in the scene. To calculate the real bifacial ratio average, we must use the clean routines.
#
# In[8]:
resultFile='results/irr_tutorial_4.csv'
results_loaded = bifacial_radiance.load.read1Result(resultFile)
print("Printing the dataframe containing the results just calculated in %s: " % resultFile)
results_loaded
# In[9]:
print("Looking at only 1 sensor in the middle -- position 100 out of the 200 sensors sampled:")
results_loaded.loc[100]
# As an example, we can see above that sensor 100 falls in the hextube, and in the sky. We need to remove this to calculate the real bifacial_gain from the irradiance falling into the modules. To do this we use cleanResult form the load.py module in bifacial_radiance. This finds the invalid materials and sets the irradiance values for those materials to NaN
#
# This might take some time in the current version.
# In[10]:
# Cleaning Results:
# remove invalid materials and sets the irradiance values to NaN
clean_results = bifacial_radiance.load.cleanResult(results_loaded)
# In[11]:
print("Sampling the same location as before to see what the results are now:")
clean_results.loc[100]
# In[12]:
print('CORRECT Annual bifacial ratio average: %0.3f' %( clean_results['Wm2Back'].sum() / clean_results['Wm2Front'].sum() ))
print ("\n(If we had not done the cleaning routine, the bifacial ratio would have been ", "calculated to %0.3f <-- THIS VALUE IS WRONG)" %( sum(analysis.Wm2Back) / sum(analysis.Wm2Front) ))
# <a id='step7'></a>
# ### 7. Add Custom Elements to your Scene Example: Marker at 0,0 position
# This shows how to add a custom element, in this case a Cube, that will be placed in the center of your already created scene to mark the 0,0 location.
#
# This can be added at any point after makeScene has been run once. Notice that if this extra element is in the scene and the analysis sensors fall on this element, they will measure irradiance at this element and no the modules.
# We are going to create a "MyMarker.rad" file in the objects folder, right after we make the Module.
# This is a prism (so we use 'genbox'), that is black from the ground.rad list of materials ('black')
# We are naming it 'CenterMarker'
# Its sides are going to be 0.5x0.5x0.5 m
# and We are going to leave its bottom surface coincident with the plane z=0, but going to center on X and Y.
# In[13]:
name='MyMarker'
text='! genbox black CenterMarker 0.1 0.1 4 | xform -t -0.05 -0.05 0'
customObject = demo.makeCustomObject(name,text)
# This should have created a MyMarker.rad object on your objects folder.
#
# But creating the object does not automatically adds it to the seen. So let's now add the customObject to the Scene. We are not going to translate it or anything because we want it at the center, but you can pass translation, rotation, and any other XFORM command from Radiance.
#
# I am passing a rotation 0 because xform has to have something (I think) otherwise it gets confused.
# In[14]:
demo.appendtoScene(scene.radfiles, customObject, '!xform -rz 0')
# makeOct combines all of the ground, sky and object files into a .oct file.
octfile = demo.makeOct(demo.getfilelist())
# appendtoScene appended to the Scene.rad file the name of the custom object we created and the xform transformation we included as text. Then octfile merged this new scene with the ground and sky files.
#
# At this point you should be able to go into a command window (cmd.exe) and check the geometry, and the marker should be there. Example:
#
# #### rvu -vf views\front.vp -e .01 tutorial_4.oct
#
# In[15]:
## Comment the line below to run rvu from the Jupyter notebook instead of your terminal.
## Simulation will stop until you close the rvu window
#!rvu -vf views\front.vp -e .01 tutorial_4.oct
# If you ran the getTrackerAngle detour and appended the marker, it should look like this:
#
#
# 
#
# If you do an analysis and any of the sensors hits the Box object we just created, the list of materials in the result.csv file should say something with "CenterMarker" on it.
#
# #### See more examples of the use of makeCustomObject and appendtoScene on the Bifacial Carport/Canopies Tutorial
|
NREL/bifacial_radiance
|
docs/tutorials/4 - Medium Level Example - Debugging your Scene with Custom Objects (Fixed Tilt 2-up with Torque Tube + CLEAN Routine + CustomObject).py
|
Python
|
bsd-3-clause
| 12,836
|
[
"EPW"
] |
2db723fb9b6283b5cff374619cb43c8aff47dac68a230c501df4ecd34f252a16
|
# -*- coding: utf-8 -*-
import itertools
import os
import re
import urllib
import logging
import datetime
import urlparse
from collections import OrderedDict
import warnings
import pytz
from flask import request
from django.core.urlresolvers import reverse
from modularodm import Q
from modularodm import fields
from modularodm.validators import MaxLengthValidator
from modularodm.exceptions import ValidationTypeError
from modularodm.exceptions import ValidationValueError
from api.base.utils import absolute_reverse
from framework import status
from framework.mongo import ObjectId
from framework.mongo import StoredObject
from framework.addons import AddonModelMixin
from framework.auth import get_user, User, Auth
from framework.auth import signals as auth_signals
from framework.exceptions import PermissionsError
from framework.guid.model import GuidStoredObject
from framework.auth.utils import privacy_info_handle
from framework.analytics import tasks as piwik_tasks
from framework.mongo.utils import to_mongo, to_mongo_key, unique_on
from framework.analytics import (
get_basic_counters, increment_user_activity_counters
)
from framework.sentry import log_exception
from framework.transactions.context import TokuTransaction
from framework.utils import iso8601format
from website import language, mails, settings, tokens
from website.util import web_url_for
from website.util import api_url_for
from website.util import sanitize
from website.exceptions import (
NodeStateError,
InvalidSanctionApprovalToken, InvalidSanctionRejectionToken,
)
from website.citations.utils import datetime_to_csl
from website.identifiers.model import IdentifierMixin
from website.util.permissions import expand_permissions
from website.util.permissions import CREATOR_PERMISSIONS, DEFAULT_CONTRIBUTOR_PERMISSIONS, ADMIN
from website.project.metadata.schemas import OSF_META_SCHEMAS
from website.project import signals as project_signals
logger = logging.getLogger(__name__)
VIEW_PROJECT_URL_TEMPLATE = settings.DOMAIN + '{node_id}/'
def has_anonymous_link(node, auth):
"""check if the node is anonymous to the user
:param Node node: Node which the user wants to visit
:param str link: any view-only link in the current url
:return bool anonymous: Whether the node is anonymous to the user or not
"""
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
if not view_only_link:
return False
if node.is_public:
return False
return any(
link.anonymous
for link in node.private_links_active
if link.key == view_only_link
)
class MetaSchema(StoredObject):
_id = fields.StringField(default=lambda: str(ObjectId()))
name = fields.StringField()
schema = fields.DictionaryField()
category = fields.StringField()
# Version of the Knockout metadata renderer to use (e.g. if data binds
# change)
metadata_version = fields.IntegerField()
# Version of the schema to use (e.g. if questions, responses change)
schema_version = fields.IntegerField()
def ensure_schemas(clear=True):
"""Import meta-data schemas from JSON to database, optionally clearing
database first.
:param clear: Clear schema database before import
"""
if clear:
try:
MetaSchema.remove()
except AttributeError:
if not settings.DEBUG_MODE:
raise
for schema in OSF_META_SCHEMAS:
try:
MetaSchema.find_one(
Q('name', 'eq', schema['name']) &
Q('schema_version', 'eq', schema['schema_version'])
)
except:
schema['name'] = schema['name'].replace(' ', '_')
schema_obj = MetaSchema(**schema)
schema_obj.save()
class MetaData(GuidStoredObject):
_id = fields.StringField(primary=True)
target = fields.AbstractForeignField(backref='metadata')
data = fields.DictionaryField()
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
def validate_comment_reports(value, *args, **kwargs):
for key, val in value.iteritems():
if not User.load(key):
raise ValidationValueError('Keys must be user IDs')
if not isinstance(val, dict):
raise ValidationTypeError('Values must be dictionaries')
if 'category' not in val or 'text' not in val:
raise ValidationValueError(
'Values must include `category` and `text` keys'
)
class Comment(GuidStoredObject):
_id = fields.StringField(primary=True)
user = fields.ForeignField('user', required=True, backref='commented')
node = fields.ForeignField('node', required=True, backref='comment_owner')
target = fields.AbstractForeignField(required=True, backref='commented')
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
date_modified = fields.DateTimeField(auto_now=datetime.datetime.utcnow)
modified = fields.BooleanField()
is_deleted = fields.BooleanField(default=False)
content = fields.StringField()
# Dictionary field mapping user IDs to dictionaries of report details:
# {
# 'icpnw': {'category': 'hate', 'message': 'offensive'},
# 'cdi38': {'category': 'spam', 'message': 'godwins law'},
# }
reports = fields.DictionaryField(validate=validate_comment_reports)
@classmethod
def create(cls, auth, **kwargs):
comment = cls(**kwargs)
comment.save()
comment.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': comment.node.parent_id,
'node': comment.node._id,
'user': comment.user._id,
'comment': comment._id,
},
auth=auth,
save=False,
)
comment.node.save()
return comment
def edit(self, content, auth, save=False):
self.content = content
self.modified = True
self.node.add_log(
NodeLog.COMMENT_UPDATED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def delete(self, auth, save=False):
self.is_deleted = True
self.node.add_log(
NodeLog.COMMENT_REMOVED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def undelete(self, auth, save=False):
self.is_deleted = False
self.node.add_log(
NodeLog.COMMENT_ADDED,
{
'project': self.node.parent_id,
'node': self.node._id,
'user': self.user._id,
'comment': self._id,
},
auth=auth,
save=False,
)
if save:
self.save()
def report_abuse(self, user, save=False, **kwargs):
"""Report that a comment is abuse.
:param User user: User submitting the report
:param bool save: Save changes
:param dict kwargs: Report details
:raises: ValueError if the user submitting abuse is the same as the
user who posted the comment
"""
if user == self.user:
raise ValueError
self.reports[user._id] = kwargs
if save:
self.save()
def unreport_abuse(self, user, save=False):
"""Revoke report of abuse.
:param User user: User who submitted the report
:param bool save: Save changes
:raises: ValueError if user has not reported comment as abuse
"""
try:
self.reports.pop(user._id)
except KeyError:
raise ValueError('User has not reported comment as abuse')
if save:
self.save()
@unique_on(['params.node', '_id'])
class NodeLog(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date = fields.DateTimeField(default=datetime.datetime.utcnow, index=True)
action = fields.StringField(index=True)
params = fields.DictionaryField()
should_hide = fields.BooleanField(default=False)
was_connected_to = fields.ForeignField('node', list=True)
user = fields.ForeignField('user', backref='created')
foreign_user = fields.StringField()
DATE_FORMAT = '%m/%d/%Y %H:%M UTC'
# Log action constants -- NOTE: templates stored in log_templates.mako
CREATED_FROM = 'created_from'
PROJECT_CREATED = 'project_created'
PROJECT_REGISTERED = 'project_registered'
PROJECT_DELETED = 'project_deleted'
NODE_CREATED = 'node_created'
NODE_FORKED = 'node_forked'
NODE_REMOVED = 'node_removed'
POINTER_CREATED = 'pointer_created'
POINTER_FORKED = 'pointer_forked'
POINTER_REMOVED = 'pointer_removed'
WIKI_UPDATED = 'wiki_updated'
WIKI_DELETED = 'wiki_deleted'
WIKI_RENAMED = 'wiki_renamed'
CONTRIB_ADDED = 'contributor_added'
CONTRIB_REMOVED = 'contributor_removed'
CONTRIB_REORDERED = 'contributors_reordered'
PERMISSIONS_UPDATED = 'permissions_updated'
MADE_PRIVATE = 'made_private'
MADE_PUBLIC = 'made_public'
TAG_ADDED = 'tag_added'
TAG_REMOVED = 'tag_removed'
EDITED_TITLE = 'edit_title'
EDITED_DESCRIPTION = 'edit_description'
UPDATED_FIELDS = 'updated_fields'
FILE_MOVED = 'addon_file_moved'
FILE_COPIED = 'addon_file_copied'
FOLDER_CREATED = 'folder_created'
FILE_ADDED = 'file_added'
FILE_UPDATED = 'file_updated'
FILE_REMOVED = 'file_removed'
FILE_RESTORED = 'file_restored'
ADDON_ADDED = 'addon_added'
ADDON_REMOVED = 'addon_removed'
COMMENT_ADDED = 'comment_added'
COMMENT_REMOVED = 'comment_removed'
COMMENT_UPDATED = 'comment_updated'
MADE_CONTRIBUTOR_VISIBLE = 'made_contributor_visible'
MADE_CONTRIBUTOR_INVISIBLE = 'made_contributor_invisible'
EXTERNAL_IDS_ADDED = 'external_ids_added'
EMBARGO_APPROVED = 'embargo_approved'
EMBARGO_CANCELLED = 'embargo_cancelled'
EMBARGO_COMPLETED = 'embargo_completed'
EMBARGO_INITIATED = 'embargo_initiated'
RETRACTION_APPROVED = 'retraction_approved'
RETRACTION_CANCELLED = 'retraction_cancelled'
RETRACTION_INITIATED = 'retraction_initiated'
REGISTRATION_APPROVAL_CANCELLED = 'registration_cancelled'
REGISTRATION_APPROVAL_INITIATED = 'registration_initiated'
REGISTRATION_APPROVAL_APPROVED = 'registration_approved'
def __repr__(self):
return ('<NodeLog({self.action!r}, params={self.params!r}) '
'with id {self._id!r}>').format(self=self)
@property
def node(self):
"""Return the :class:`Node` associated with this log."""
return (
Node.load(self.params.get('node')) or
Node.load(self.params.get('project'))
)
@property
def tz_date(self):
'''Return the timezone-aware date.
'''
# Date should always be defined, but a few logs in production are
# missing dates; return None and log error if date missing
if self.date:
return self.date.replace(tzinfo=pytz.UTC)
logger.error('Date missing on NodeLog {}'.format(self._primary_key))
@property
def formatted_date(self):
'''Return the timezone-aware, ISO-formatted string representation of
this log's date.
'''
if self.tz_date:
return self.tz_date.isoformat()
def resolve_node(self, node):
"""A single `NodeLog` record may be attached to multiple `Node` records
(parents, forks, registrations, etc.), so the node that the log refers
to may not be the same as the node the user is viewing. Use
`resolve_node` to determine the relevant node to use for permission
checks.
:param Node node: Node being viewed
"""
if self.node == node or self.node in node.nodes:
return self.node
if node.is_fork_of(self.node) or node.is_registration_of(self.node):
return node
for child in node.nodes:
if child.is_fork_of(self.node) or node.is_registration_of(self.node):
return child
return False
def can_view(self, node, auth):
node_to_check = self.resolve_node(node)
if node_to_check:
return node_to_check.can_view(auth)
return False
def _render_log_contributor(self, contributor, anonymous=False):
user = User.load(contributor)
if not user:
return None
if self.node:
fullname = user.display_full_name(node=self.node)
else:
fullname = user.fullname
return {
'id': privacy_info_handle(user._primary_key, anonymous),
'fullname': privacy_info_handle(fullname, anonymous, name=True),
'registered': user.is_registered,
}
class Tag(StoredObject):
_id = fields.StringField(primary=True, validate=MaxLengthValidator(128))
def __repr__(self):
return '<Tag() with id {self._id!r}>'.format(self=self)
@property
def url(self):
return '/search/?tags={}'.format(self._id)
class Pointer(StoredObject):
"""A link to a Node. The Pointer delegates all but a few methods to its
contained Node. Forking and registration are overridden such that the
link is cloned, but its contained Node is not.
"""
#: Whether this is a pointer or not
primary = False
_id = fields.StringField()
node = fields.ForeignField('node', backref='_pointed')
_meta = {'optimistic': True}
def _clone(self):
if self.node:
clone = self.clone()
clone.node = self.node
clone.save()
return clone
def fork_node(self, *args, **kwargs):
return self._clone()
def register_node(self, *args, **kwargs):
return self._clone()
def use_as_template(self, *args, **kwargs):
return self._clone()
def resolve(self):
return self.node
def __getattr__(self, item):
"""Delegate attribute access to the node being pointed to."""
# Prevent backref lookups from being overriden by proxied node
try:
return super(Pointer, self).__getattr__(item)
except AttributeError:
pass
if self.node:
return getattr(self.node, item)
raise AttributeError(
'Pointer object has no attribute {0}'.format(
item
)
)
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent.'
return parent_refs[0]
def validate_category(value):
"""Validator for Node#category. Makes sure that the value is one of the
categories defined in CATEGORY_MAP.
"""
if value not in Node.CATEGORY_MAP.keys():
raise ValidationValueError('Invalid value for category.')
return True
def validate_title(value):
"""Validator for Node#title. Makes sure that the value exists and is not
above 200 characters.
"""
if value is None or not value.strip():
raise ValidationValueError('Title cannot be blank.')
if len(value) > 200:
raise ValidationValueError('Title cannot exceed 200 characters.')
return True
def validate_user(value):
if value != {}:
user_id = value.iterkeys().next()
if User.find(Q('_id', 'eq', user_id)).count() != 1:
raise ValidationValueError('User does not exist.')
return True
class NodeUpdateError(Exception):
def __init__(self, reason, key, *args, **kwargs):
super(NodeUpdateError, self).__init__(*args, **kwargs)
self.key = key
self.reason = reason
class Node(GuidStoredObject, AddonModelMixin, IdentifierMixin):
#: Whether this is a pointer or not
primary = True
# Node fields that trigger an update to Solr on save
SOLR_UPDATE_FIELDS = {
'title',
'category',
'description',
'visible_contributor_ids',
'tags',
'is_fork',
'is_registration',
'retraction',
'embargo',
'is_public',
'is_deleted',
'wiki_pages_current',
'is_retracted',
}
# Maps category identifier => Human-readable representation for use in
# titles, menus, etc.
# Use an OrderedDict so that menu items show in the correct order
CATEGORY_MAP = OrderedDict([
('', 'Uncategorized'),
('project', 'Project'),
('hypothesis', 'Hypothesis'),
('methods and measures', 'Methods and Measures'),
('procedure', 'Procedure'),
('instrumentation', 'Instrumentation'),
('data', 'Data'),
('analysis', 'Analysis'),
('communication', 'Communication'),
('other', 'Other'),
])
WRITABLE_WHITELIST = [
'title',
'description',
'category',
]
_id = fields.StringField(primary=True)
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow, index=True)
# Privacy
is_public = fields.BooleanField(default=False, index=True)
# User mappings
permissions = fields.DictionaryField()
visible_contributor_ids = fields.StringField(list=True)
# Project Organization
is_dashboard = fields.BooleanField(default=False, index=True)
is_folder = fields.BooleanField(default=False, index=True)
# Expanded: Dictionary field mapping user IDs to expand state of this node:
# {
# 'icpnw': True,
# 'cdi38': False,
# }
expanded = fields.DictionaryField(default={}, validate=validate_user)
is_deleted = fields.BooleanField(default=False, index=True)
deleted_date = fields.DateTimeField(index=True)
is_registration = fields.BooleanField(default=False, index=True)
registered_date = fields.DateTimeField(index=True)
registered_user = fields.ForeignField('user', backref='registered')
registered_schema = fields.ForeignField('metaschema', backref='registered')
registered_meta = fields.DictionaryField()
registration_approval = fields.ForeignField('registrationapproval')
retraction = fields.ForeignField('retraction')
embargo = fields.ForeignField('embargo')
is_fork = fields.BooleanField(default=False, index=True)
forked_date = fields.DateTimeField(index=True)
title = fields.StringField(validate=validate_title)
description = fields.StringField()
category = fields.StringField(validate=validate_category, index=True)
# One of 'public', 'private'
# TODO: Add validator
comment_level = fields.StringField(default='private')
wiki_pages_current = fields.DictionaryField()
wiki_pages_versions = fields.DictionaryField()
# Dictionary field mapping node wiki page to sharejs private uuid.
# {<page_name>: <sharejs_id>}
wiki_private_uuids = fields.DictionaryField()
file_guid_to_share_uuids = fields.DictionaryField()
creator = fields.ForeignField('user', backref='created')
contributors = fields.ForeignField('user', list=True, backref='contributed')
users_watching_node = fields.ForeignField('user', list=True, backref='watched')
logs = fields.ForeignField('nodelog', list=True, backref='logged')
tags = fields.ForeignField('tag', list=True, backref='tagged')
# Tags for internal use
system_tags = fields.StringField(list=True)
nodes = fields.AbstractForeignField(list=True, backref='parent')
forked_from = fields.ForeignField('node', backref='forked', index=True)
registered_from = fields.ForeignField('node', backref='registrations', index=True)
# The node (if any) used as a template for this node's creation
template_node = fields.ForeignField('node', backref='template_node', index=True)
piwik_site_id = fields.StringField()
# Dictionary field mapping user id to a list of nodes in node.nodes which the user has subscriptions for
# {<User.id>: [<Node._id>, <Node2._id>, ...] }
child_node_subscriptions = fields.DictionaryField(default=dict)
_meta = {
'optimistic': True,
}
def __init__(self, *args, **kwargs):
super(Node, self).__init__(*args, **kwargs)
if kwargs.get('_is_loaded', False):
return
if self.creator:
self.contributors.append(self.creator)
self.set_visible(self.creator, visible=True, log=False)
# Add default creator permissions
for permission in CREATOR_PERMISSIONS:
self.add_permission(self.creator, permission, save=False)
def __repr__(self):
return ('<Node(title={self.title!r}, category={self.category!r}) '
'with _id {self._id!r}>').format(self=self)
# For Django compatibility
@property
def pk(self):
return self._id
@property
def category_display(self):
"""The human-readable representation of this node's category."""
return self.CATEGORY_MAP[self.category]
@property
def sanction(self):
sanction = self.registration_approval or self.embargo
if sanction:
return sanction
elif self.parent_node:
return self.parent_node.sanction
else:
return None
@property
def is_pending_registration(self):
if not self.is_registration:
return False
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_pending_registration
return False
return self.registration_approval.pending_approval
@property
def is_registration_approved(self):
if self.registration_approval is None:
if self.parent_node:
return self.parent_node.is_registration_approved
return False
return self.registration_approval.is_approved
@property
def is_retracted(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_retracted
return False
return self.retraction.is_approved
@property
def is_pending_retraction(self):
if self.retraction is None:
if self.parent_node:
return self.parent_node.is_pending_retraction
return False
return self.retraction.pending_approval
@property
def embargo_end_date(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.embargo_end_date
return False
return self.embargo.embargo_end_date
@property
def is_pending_embargo(self):
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo
return False
return self.embargo.pending_approval
@property
def is_pending_embargo_for_existing_registration(self):
""" Returns True if Node has an Embargo pending approval for an
existing registrations. This is used specifically to ensure
registrations pre-dating the Embargo feature do not get deleted if
their respective Embargo request is rejected.
"""
if self.embargo is None:
if self.parent_node:
return self.parent_node.is_pending_embargo_for_existing_registration
return False
return self.embargo.pending_registration
@property
def private_links(self):
return self.privatelink__shared
@property
def private_links_active(self):
return [x for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_active(self):
return [x.key for x in self.private_links if not x.is_deleted]
@property
def private_link_keys_deleted(self):
return [x.key for x in self.private_links if x.is_deleted]
def path_above(self, auth):
parents = self.parents
return '/' + '/'.join([p.title if p.can_view(auth) else '-- private project --' for p in reversed(parents)])
@property
def ids_above(self):
parents = self.parents
return {p._id for p in parents}
def can_edit(self, auth=None, user=None):
"""Return if a user is authorized to edit this node.
Must specify one of (`auth`, `user`).
:param Auth auth: Auth object to check
:param User user: User object to check
:returns: Whether user has permission to edit this node.
"""
if not auth and not user:
raise ValueError('Must pass either `auth` or `user`')
if auth and user:
raise ValueError('Cannot pass both `auth` and `user`')
user = user or auth.user
if auth:
is_api_node = auth.api_node == self
else:
is_api_node = False
return (
(user and self.has_permission(user, 'write'))
or is_api_node
)
def active_contributors(self, include=lambda n: True):
for contrib in self.contributors:
if contrib.is_active and include(contrib):
yield contrib
def is_admin_parent(self, user):
if self.has_permission(user, 'admin', check_parent=False):
return True
if self.parent_node:
return self.parent_node.is_admin_parent(user)
return False
def can_view(self, auth):
if not auth and not self.is_public:
return False
return (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read')) or
auth.private_key in self.private_link_keys_active or
self.is_admin_parent(auth.user)
)
def is_expanded(self, user=None):
"""Return if a user is has expanded the folder in the dashboard view.
Must specify one of (`auth`, `user`).
:param User user: User object to check
:returns: Boolean if the folder is expanded.
"""
if user._id in self.expanded:
return self.expanded[user._id]
else:
return False
def expand(self, user=None):
self.expanded[user._id] = True
self.save()
def collapse(self, user=None):
self.expanded[user._id] = False
self.save()
def is_derived_from(self, other, attr):
derived_from = getattr(self, attr)
while True:
if derived_from is None:
return False
if derived_from == other:
return True
derived_from = getattr(derived_from, attr)
def is_fork_of(self, other):
return self.is_derived_from(other, 'forked_from')
def is_registration_of(self, other):
return self.is_derived_from(other, 'registered_from')
@property
def forks(self):
"""List of forks of this node"""
return list(self.node__forked.find(Q('is_deleted', 'eq', False) &
Q('is_registration', 'ne', True)))
def add_permission(self, user, permission, save=False):
"""Grant permission to a user.
:param str permission: Permission to grant
:param bool save: Save changes
:raises: ValueError if user already has permission
"""
if user._id not in self.permissions:
self.permissions[user._id] = [permission]
else:
if permission in self.permissions[user._id]:
raise ValueError('User already has permission {0}'.format(permission))
self.permissions[user._id].append(permission)
if save:
self.save()
def remove_permission(self, user, permission, save=False):
"""Revoke permission from a user.
:param User user: User to revoke permission from
:param str permission: Permission to revoke
:param bool save: Save changes
:raises: ValueError if user does not have permission
"""
try:
self.permissions[user._id].remove(permission)
except (KeyError, ValueError):
raise ValueError('User does not have permission {0}'.format(permission))
if save:
self.save()
def clear_permission(self, user, save=False):
"""Clear all permissions for a user.
:param User user: User to revoke permission from
:param bool save: Save changes
:raises: ValueError if user not in permissions
"""
try:
self.permissions.pop(user._id)
except KeyError:
raise ValueError(
'User {0} not in permissions list for node {1}'.format(
user._id, self._id,
)
)
if save:
self.save()
def set_permissions(self, user, permissions, save=False):
self.permissions[user._id] = permissions
if save:
self.save()
def has_permission(self, user, permission, check_parent=True):
"""Check whether user has permission.
:param User user: User to test
:param str permission: Required permission
:returns: User has required permission
"""
if user is None:
logger.warn('User is ``None``.')
return False
if permission in self.permissions.get(user._id, []):
return True
if permission == 'read' and check_parent:
return self.is_admin_parent(user)
return False
def can_read_children(self, user):
"""Checks if the given user has read permissions on any child nodes
that are not registrations or deleted
"""
if self.has_permission(user, 'read'):
return True
for node in self.nodes:
if not node.primary or node.is_deleted:
continue
if node.can_read_children(user):
return True
return False
def get_permissions(self, user):
"""Get list of permissions for user.
:param User user: User to check
:returns: List of permissions
:raises: ValueError if user not found in permissions
"""
return self.permissions.get(user._id, [])
def adjust_permissions(self):
for key in self.permissions.keys():
if key not in self.contributors:
self.permissions.pop(key)
@property
def visible_contributors(self):
return [
User.load(_id)
for _id in self.visible_contributor_ids
]
@property
def parents(self):
if self.parent_node:
return [self.parent_node] + self.parent_node.parents
return []
@property
def admin_contributor_ids(self, contributors=None):
contributor_ids = self.contributors._to_primary_keys()
admin_ids = set()
for parent in self.parents:
admins = [
user for user, perms in parent.permissions.iteritems()
if 'admin' in perms
]
admin_ids.update(set(admins).difference(contributor_ids))
return admin_ids
@property
def admin_contributors(self):
return sorted(
[User.load(_id) for _id in self.admin_contributor_ids],
key=lambda user: user.family_name,
)
def get_visible(self, user):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
return user._id in self.visible_contributor_ids
def update_visible_ids(self, save=False):
"""Update the order of `visible_contributor_ids`. Updating on making
a contributor visible is more efficient than recomputing order on
accessing `visible_contributors`.
"""
self.visible_contributor_ids = [
contributor._id
for contributor in self.contributors
if contributor._id in self.visible_contributor_ids
]
if save:
self.save()
def set_visible(self, user, visible, log=True, auth=None, save=False):
if not self.is_contributor(user):
raise ValueError(u'User {0} not in contributors'.format(user))
if visible and user._id not in self.visible_contributor_ids:
self.visible_contributor_ids.append(user._id)
self.update_visible_ids(save=False)
elif not visible and user._id in self.visible_contributor_ids:
if len(self.visible_contributor_ids) == 1:
raise ValueError('Must have at least one visible contributor')
self.visible_contributor_ids.remove(user._id)
else:
return
message = (
NodeLog.MADE_CONTRIBUTOR_VISIBLE
if visible
else NodeLog.MADE_CONTRIBUTOR_INVISIBLE
)
if log:
self.add_log(
message,
params={
'parent': self.parent_id,
'node': self._id,
'contributors': [user._id],
},
auth=auth,
save=False,
)
if save:
self.save()
def can_comment(self, auth):
if self.comment_level == 'public':
return auth.logged_in and (
self.is_public or
(auth.user and self.has_permission(auth.user, 'read'))
)
return self.is_contributor(auth.user)
def update(self, fields, auth=None, save=True):
if self.is_registration:
raise NodeUpdateError(reason="Registered content cannot be updated")
values = {}
for key, value in fields.iteritems():
if key not in self.WRITABLE_WHITELIST:
continue
with warnings.catch_warnings():
try:
# This is in place because historically projects and components
# live on different ElasticSearch indexes, and at the time of Node.save
# there is no reliable way to check what the old Node.category
# value was. When the cateogory changes it is possible to have duplicate/dead
# search entries, so always delete the ES doc on categoryt change
# TODO: consolidate Node indexes into a single index, refactor search
if key == 'category':
self.delete_search_entry()
###############
values[key] = {
'old': getattr(self, key),
'new': value,
}
setattr(self, key, value)
except AttributeError:
raise NodeUpdateError(reason="Invalid value for attribute '{0}'".format(key), key=key)
except warnings.Warning:
raise NodeUpdateError(reason="Attribute '{0}' doesn't exist on the Node class".format(key), key=key)
if save:
updated = self.save()
else:
updated = []
for key in values:
values[key]['new'] = getattr(self, key)
self.add_log(NodeLog.UPDATED_FIELDS,
params={
'node': self._id,
'updated_fields': {
key: {
'old': values[key]['old'],
'new': values[key]['new']
}
for key in values
}
},
auth=auth)
return updated
def save(self, *args, **kwargs):
update_piwik = kwargs.pop('update_piwik', True)
self.adjust_permissions()
first_save = not self._is_loaded
if first_save and self.is_dashboard:
existing_dashboards = self.creator.node__contributed.find(
Q('is_dashboard', 'eq', True)
)
if existing_dashboards.count() > 0:
raise NodeStateError("Only one dashboard allowed per user.")
is_original = not self.is_registration and not self.is_fork
if 'suppress_log' in kwargs.keys():
suppress_log = kwargs['suppress_log']
del kwargs['suppress_log']
else:
suppress_log = False
saved_fields = super(Node, self).save(*args, **kwargs)
if first_save and is_original and not suppress_log:
# TODO: This logic also exists in self.use_as_template()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
self.add_addon(addon.short_name, auth=None, log=False)
# Define log fields for non-component project
log_action = NodeLog.PROJECT_CREATED
log_params = {
'node': self._primary_key,
}
if getattr(self, 'parent', None):
# Append log to parent
self.parent.nodes.append(self)
self.parent.save()
log_params.update({'parent_node': self.parent._primary_key})
# Add log with appropriate fields
self.add_log(
log_action,
params=log_params,
auth=Auth(user=self.creator),
log_date=self.date_created,
save=True,
)
# Only update Solr if at least one stored field has changed, and if
# public or privacy setting has changed
need_update = bool(self.SOLR_UPDATE_FIELDS.intersection(saved_fields))
if not self.is_public:
if first_save or 'is_public' not in saved_fields:
need_update = False
if self.is_folder or self.archiving:
need_update = False
if need_update:
self.update_search()
# This method checks what has changed.
if settings.PIWIK_HOST and update_piwik:
piwik_tasks.update_node(self._id, saved_fields)
# Return expected value for StoredObject::save
return saved_fields
######################################
# Methods that return a new instance #
######################################
def use_as_template(self, auth, changes=None, top_level=True):
"""Create a new project, using an existing project as a template.
:param auth: The user to be assigned as creator
:param changes: A dictionary of changes, keyed by node id, which
override the attributes of the template project or its
children.
:return: The `Node` instance created.
"""
changes = changes or dict()
# build the dict of attributes to change for the new node
try:
attributes = changes[self._id]
# TODO: explicitly define attributes which may be changed.
except (AttributeError, KeyError):
attributes = dict()
new = self.clone()
# clear permissions, which are not cleared by the clone method
new.permissions = {}
new.visible_contributor_ids = []
# Clear quasi-foreign fields
new.wiki_pages_current = {}
new.wiki_pages_versions = {}
new.wiki_private_uuids = {}
new.file_guid_to_share_uuids = {}
# set attributes which may be overridden by `changes`
new.is_public = False
new.description = None
# apply `changes`
for attr, val in attributes.iteritems():
setattr(new, attr, val)
# set attributes which may NOT be overridden by `changes`
new.creator = auth.user
new.template_node = self
new.add_contributor(contributor=auth.user, permissions=CREATOR_PERMISSIONS, log=False, save=False)
new.is_fork = False
new.is_registration = False
new.piwik_site_id = None
# If that title hasn't been changed, apply the default prefix (once)
if (new.title == self.title
and top_level
and language.TEMPLATED_FROM_PREFIX not in new.title):
new.title = ''.join((language.TEMPLATED_FROM_PREFIX, new.title, ))
# Slight hack - date_created is a read-only field.
new._fields['date_created'].__set__(
new,
datetime.datetime.utcnow(),
safe=True
)
new.save(suppress_log=True)
# Log the creation
new.add_log(
NodeLog.CREATED_FROM,
params={
'node': new._primary_key,
'template_node': {
'id': self._primary_key,
'url': self.url,
},
},
auth=auth,
log_date=new.date_created,
save=False,
)
# add mandatory addons
# TODO: This logic also exists in self.save()
for addon in settings.ADDONS_AVAILABLE:
if 'node' in addon.added_default:
new.add_addon(addon.short_name, auth=None, log=False)
# deal with the children of the node, if any
new.nodes = [
x.use_as_template(auth, changes, top_level=False)
for x in self.nodes
if x.can_view(auth)
]
new.save()
return new
############
# Pointers #
############
def add_pointer(self, node, auth, save=True):
"""Add a pointer to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if node._id in self.node_ids:
raise ValueError(
'Pointer to node {0} already in list'.format(node._id)
)
# If a folder, prevent more than one pointer to that folder. This will prevent infinite loops on the Dashboard.
# Also, no pointers to the dashboard project, which could cause loops as well.
already_pointed = node.pointed
if node.is_folder and len(already_pointed) > 0:
raise ValueError(
'Pointer to folder {0} already exists. Only one pointer to any given folder allowed'.format(node._id)
)
if node.is_dashboard:
raise ValueError(
'Pointer to dashboard ({0}) not allowed.'.format(node._id)
)
# Append pointer
pointer = Pointer(node=node)
pointer.save()
self.nodes.append(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_CREATED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
return pointer
def rm_pointer(self, pointer, auth):
"""Remove a pointer.
:param Pointer pointer: Pointer to remove
:param Auth auth: Consolidated authorization
"""
if pointer not in self.nodes:
raise ValueError
# Remove `Pointer` object; will also remove self from `nodes` list of
# parent node
Pointer.remove_one(pointer)
# Add log
self.add_log(
action=NodeLog.POINTER_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
@property
def node_ids(self):
return [
node._id if node.primary else node.node._id
for node in self.nodes
]
@property
def nodes_primary(self):
return [
node
for node in self.nodes
if node.primary
]
def node_and_primary_descendants(self):
"""Return an iterator for a node and all of its primary (non-pointer) descendants.
:param node Node: target Node
"""
return itertools.chain([self], self.get_descendants_recursive(lambda n: n.primary))
@property
def depth(self):
return len(self.parents)
def next_descendants(self, auth, condition=lambda auth, node: True):
"""
Recursively find the first set of descedants under a given node that meet a given condition
returns a list of [(node, [children]), ...]
"""
ret = []
for node in self.nodes:
if condition(auth, node):
# base case
ret.append((node, []))
else:
ret.append((node, node.next_descendants(auth, condition)))
ret = [item for item in ret if item[1] or condition(auth, item[0])] # prune empty branches
return ret
def get_descendants_recursive(self, include=lambda n: True):
for node in self.nodes:
if include(node):
yield node
if node.primary:
for descendant in node.get_descendants_recursive(include):
if include(descendant):
yield descendant
def get_aggregate_logs_queryset(self, auth):
ids = [self._id] + [n._id
for n in self.get_descendants_recursive()
if n.can_view(auth)]
query = Q('__backrefs.logged.node.logs', 'in', ids)
return NodeLog.find(query).sort('-_id')
@property
def nodes_pointer(self):
return [
node
for node in self.nodes
if not node.primary
]
@property
def has_pointers_recursive(self):
"""Recursively checks whether the current node or any of its nodes
contains a pointer.
"""
if self.nodes_pointer:
return True
for node in self.nodes_primary:
if node.has_pointers_recursive:
return True
return False
@property
def pointed(self):
return getattr(self, '_pointed', [])
def pointing_at(self, pointed_node_id):
"""This node is pointed at another node.
:param Node pointed_node_id: The node id of the node being pointed at.
:return: pointer_id
"""
for pointer in self.nodes_pointer:
node_id = pointer.node._id
if node_id == pointed_node_id:
return pointer._id
return None
def get_points(self, folders=False, deleted=False, resolve=True):
ret = []
for each in self.pointed:
pointer_node = get_pointer_parent(each)
if not folders and pointer_node.is_folder:
continue
if not deleted and pointer_node.is_deleted:
continue
if resolve:
ret.append(pointer_node)
else:
ret.append(each)
return ret
def resolve(self):
return self
def fork_pointer(self, pointer, auth, save=True):
"""Replace a pointer with a fork. If the pointer points to a project,
fork the project and replace the pointer with a new pointer pointing
to the fork. If the pointer points to a component, fork the component
and add it to the current node.
:param Pointer pointer:
:param Auth auth:
:param bool save:
:return: Forked node
"""
# Fail if pointer not contained in `nodes`
try:
index = self.nodes.index(pointer)
except ValueError:
raise ValueError('Pointer {0} not in list'.format(pointer._id))
# Get pointed node
node = pointer.node
# Fork into current node and replace pointer with forked component
forked = node.fork_node(auth)
if forked is None:
raise ValueError('Could not fork node')
self.nodes[index] = forked
# Add log
self.add_log(
NodeLog.POINTER_FORKED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'pointer': {
'id': pointer.node._id,
'url': pointer.node.url,
'title': pointer.node.title,
'category': pointer.node.category,
},
},
auth=auth,
save=False,
)
# Optionally save changes
if save:
self.save()
# Garbage-collect pointer. Note: Must save current node before
# removing pointer, else remove will fail when trying to remove
# backref from self to pointer.
Pointer.remove_one(pointer)
# Return forked content
return forked
def get_recent_logs(self, n=10):
"""Return a list of the n most recent logs, in reverse chronological
order.
:param int n: Number of logs to retrieve
"""
return list(reversed(self.logs)[:n])
@property
def date_modified(self):
'''The most recent datetime when this node was modified, based on
the logs.
'''
try:
return self.logs[-1].date
except IndexError:
return self.date_created
def set_title(self, title, auth, save=False):
"""Set the title of this Node and log it.
:param str title: The new title.
:param auth: All the auth information including user, API key.
"""
#Called so validation does not have to wait until save.
validate_title(title)
original_title = self.title
self.title = title
self.add_log(
action=NodeLog.EDITED_TITLE,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'title_new': self.title,
'title_original': original_title,
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def set_description(self, description, auth, save=False):
"""Set the description and log the event.
:param str description: The new description
:param auth: All the auth informtion including user, API key.
:param bool save: Save self after updating.
"""
original = self.description
self.description = description
self.add_log(
action=NodeLog.EDITED_DESCRIPTION,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'description_new': self.description,
'description_original': original
},
auth=auth,
save=False,
)
if save:
self.save()
return None
def update_search(self):
from website import search
try:
search.search.update_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_search_entry(self):
from website import search
try:
search.search.delete_node(self)
except search.exceptions.SearchUnavailableError as e:
logger.exception(e)
log_exception()
def delete_registration_tree(self, save=False):
self.is_deleted = True
if not getattr(self.embargo, 'for_existing_registration', False):
self.registered_from = None
if save:
self.save()
self.update_search()
for child in self.nodes_primary:
child.delete_registration_tree(save=save)
def remove_node(self, auth, date=None):
"""Marks a node as deleted.
TODO: Call a hook on addons
Adds a log to the parent node if applicable
:param auth: an instance of :class:`Auth`.
:param date: Date node was removed
:type date: `datetime.datetime` or `None`
"""
# TODO: rename "date" param - it's shadowing a global
if self.is_dashboard:
raise NodeStateError("Dashboards may not be deleted.")
if not self.can_edit(auth):
raise PermissionsError('{0!r} does not have permission to modify this {1}'.format(auth.user, self.category or 'node'))
#if this is a folder, remove all the folders that this is pointing at.
if self.is_folder:
for pointed in self.nodes_pointer:
if pointed.node.is_folder:
pointed.node.remove_node(auth=auth)
if [x for x in self.nodes_primary if not x.is_deleted]:
raise NodeStateError("Any child components must be deleted prior to deleting this project.")
# After delete callback
for addon in self.get_addons():
message = addon.after_delete(self, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
log_date = date or datetime.datetime.utcnow()
# Add log to parent
if self.node__parent:
self.node__parent[0].add_log(
NodeLog.NODE_REMOVED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
else:
self.add_log(
NodeLog.PROJECT_DELETED,
params={
'project': self._primary_key,
},
auth=auth,
log_date=log_date,
save=True,
)
self.is_deleted = True
self.deleted_date = date
self.save()
auth_signals.node_deleted.send(self)
return True
def fork_node(self, auth, title='Fork of '):
"""Recursively fork a node.
:param Auth auth: Consolidated authorization
:param str title: Optional text to prepend to forked title
:return: Forked node
"""
user = auth.user
# Non-contributors can't fork private nodes
if not (self.is_public or self.has_permission(user, 'read')):
raise PermissionsError('{0!r} does not have permission to fork node {1!r}'.format(user, self._id))
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
if original.is_deleted:
raise NodeStateError('Cannot fork deleted node.')
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
forked = original.clone()
forked.logs = self.logs
forked.tags = self.tags
# Recursively fork child nodes
for node_contained in original.nodes:
if not node_contained.is_deleted:
forked_node = None
try: # Catch the potential PermissionsError above
forked_node = node_contained.fork_node(auth=auth, title='')
except PermissionsError:
pass # If this exception is thrown omit the node from the result set
if forked_node is not None:
forked.nodes.append(forked_node)
forked.title = title + forked.title
forked.is_fork = True
forked.is_registration = False
forked.forked_date = when
forked.forked_from = original
forked.creator = user
forked.piwik_site_id = None
# Forks default to private status
forked.is_public = False
# Clear permissions before adding users
forked.permissions = {}
forked.visible_contributor_ids = []
forked.add_contributor(
contributor=user,
permissions=CREATOR_PERMISSIONS,
log=False,
save=False
)
forked.add_log(
action=NodeLog.NODE_FORKED,
params={
'parent_node': original.parent_id,
'node': original._primary_key,
'registration': forked._primary_key,
},
auth=auth,
log_date=when,
save=False,
)
forked.save()
# After fork callback
for addon in original.get_addons():
_, message = addon.after_fork(original, forked, user)
if message:
status.push_status_message(message, kind='info', trust=True)
return forked
def register_node(self, schema, auth, template, data, parent=None):
"""Make a frozen copy of a node.
:param schema: Schema object
:param auth: All the auth information including user, API key.
:param template: Template name
:param data: Form data
:param parent Node: parent registration of registration to be created
"""
# NOTE: Admins can register child nodes even if they don't have write access them
if not self.can_edit(auth=auth) and not self.is_admin_parent(user=auth.user):
raise PermissionsError(
'User {} does not have permission '
'to register this node'.format(auth.user._id)
)
if self.is_folder:
raise NodeStateError("Folders may not be registered")
template = urllib.unquote_plus(template)
template = to_mongo(template)
when = datetime.datetime.utcnow()
original = self.load(self._primary_key)
# Note: Cloning a node copies its `wiki_pages_current` and
# `wiki_pages_versions` fields, but does not clone the underlying
# database objects to which these dictionaries refer. This means that
# the cloned node must pass itself to its wiki objects to build the
# correct URLs to that content.
if original.is_deleted:
raise NodeStateError('Cannot register deleted node.')
registered = original.clone()
registered.is_registration = True
registered.registered_date = when
registered.registered_user = auth.user
registered.registered_schema = schema
registered.registered_from = original
if not registered.registered_meta:
registered.registered_meta = {}
registered.registered_meta[template] = data
registered.contributors = self.contributors
registered.forked_from = self.forked_from
registered.creator = self.creator
registered.logs = self.logs
registered.tags = self.tags
registered.piwik_site_id = None
registered.save()
if parent:
registered.parent_node = parent
# After register callback
for addon in original.get_addons():
_, message = addon.after_register(original, registered, auth.user)
if message:
status.push_status_message(message, kind='info', trust=False)
for node_contained in original.nodes:
if not node_contained.is_deleted:
child_registration = node_contained.register_node(
schema, auth, template, data, parent=registered
)
if child_registration and not child_registration.primary:
registered.nodes.append(child_registration)
registered.save()
if settings.ENABLE_ARCHIVER:
project_signals.after_create_registration.send(self, dst=registered, user=auth.user)
return registered
def remove_tag(self, tag, auth, save=True):
if tag in self.tags:
self.tags.remove(tag)
self.add_log(
action=NodeLog.TAG_REMOVED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_tag(self, tag, auth, save=True):
if tag not in self.tags:
new_tag = Tag.load(tag)
if not new_tag:
new_tag = Tag(_id=tag)
new_tag.save()
self.tags.append(new_tag)
self.add_log(
action=NodeLog.TAG_ADDED,
params={
'parent_node': self.parent_id,
'node': self._primary_key,
'tag': tag,
},
auth=auth,
save=False,
)
if save:
self.save()
def add_log(self, action, params, auth, foreign_user=None, log_date=None, save=True):
user = auth.user if auth else None
params['node'] = params.get('node') or params.get('project')
log = NodeLog(
action=action,
user=user,
foreign_user=foreign_user,
params=params,
)
if log_date:
log.date = log_date
log.save()
self.logs.append(log)
if save:
self.save()
if user:
increment_user_activity_counters(user._primary_key, action, log.date)
return log
@property
def url(self):
return '/{}/'.format(self._primary_key)
def web_url_for(self, view_name, _absolute=False, _guid=False, *args, **kwargs):
return web_url_for(view_name, pid=self._primary_key, _absolute=_absolute, _guid=_guid, *args, **kwargs)
def api_url_for(self, view_name, _absolute=False, *args, **kwargs):
return api_url_for(view_name, pid=self._primary_key, _absolute=_absolute, *args, **kwargs)
@property
def absolute_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return urlparse.urljoin(settings.DOMAIN, self.url)
@property
def display_absolute_url(self):
url = self.absolute_url
if url is not None:
return re.sub(r'https?:', '', url).strip('/')
@property
def api_v2_url(self):
return reverse('nodes:node-detail', kwargs={'node_id': self._id})
@property
def absolute_api_v2_url(self):
return absolute_reverse('nodes:node-detail', kwargs={'node_id': self._id})
# used by django and DRF
def get_absolute_url(self):
return self.absolute_api_v2_url
@property
def api_url(self):
if not self.url:
logger.error('Node {0} has a parent that is not a project'.format(self._id))
return None
return '/api/v1{0}'.format(self.deep_url)
@property
def deep_url(self):
return '/project/{}/'.format(self._primary_key)
@property
def csl(self): # formats node information into CSL format for citation parsing
"""a dict in CSL-JSON schema
For details on this schema, see:
https://github.com/citation-style-language/schema#csl-json-schema
"""
csl = {
'id': self._id,
'title': sanitize.unescape_entities(self.title),
'author': [
contributor.csl_name # method in auth/model.py which parses the names of authors
for contributor in self.visible_contributors
],
'publisher': 'Open Science Framework',
'type': 'webpage',
'URL': self.display_absolute_url,
}
doi = self.get_identifier_value('doi')
if doi:
csl['DOI'] = doi
if self.logs:
csl['issued'] = datetime_to_csl(self.logs[-1].date)
return csl
def author_list(self, and_delim='&'):
author_names = [
author.biblio_name
for author in self.visible_contributors
if author
]
if len(author_names) < 2:
return ' {0} '.format(and_delim).join(author_names)
if len(author_names) > 7:
author_names = author_names[:7]
author_names.append('et al.')
return ', '.join(author_names)
return u'{0}, {1} {2}'.format(
', '.join(author_names[:-1]),
and_delim,
author_names[-1]
)
@property
def templated_list(self):
return [
x
for x in self.node__template_node
if not x.is_deleted
]
@property
def parent_node(self):
"""The parent node, if it exists, otherwise ``None``. Note: this
property is named `parent_node` rather than `parent` to avoid a
conflict with the `parent` back-reference created by the `nodes`
field on this schema.
"""
try:
if not self.node__parent[0].is_deleted:
return self.node__parent[0]
except IndexError:
pass
return None
@parent_node.setter
def parent_node(self, parent):
parent.nodes.append(self)
parent.save()
@property
def root(self):
if self.parent_node:
return self.parent_node.root
else:
return self
@property
def archiving(self):
job = self.archive_job
return job and not job.done and not job.archive_tree_finished()
@property
def archive_job(self):
return self.archivejob__active[0] if self.archivejob__active else None
@property
def registrations(self):
return self.node__registrations.find(Q('archiving', 'eq', False))
@property
def watch_url(self):
return os.path.join(self.api_url, "watch/")
@property
def parent_id(self):
if self.node__parent:
return self.node__parent[0]._primary_key
return None
@property
def project_or_component(self):
return 'project' if self.category == 'project' else 'component'
def is_contributor(self, user):
return (
user is not None
and (
user._id in self.contributors
)
)
def add_addon(self, addon_name, auth, log=True, *args, **kwargs):
"""Add an add-on to the node. Do nothing if the addon is already
enabled.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool log: Add a log after adding the add-on
:return: A boolean, whether the addon was added
"""
ret = AddonModelMixin.add_addon(self, addon_name, auth=auth,
*args, **kwargs)
if ret and log:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save() # TODO: here, or outside the conditional? @mambocab
return ret
def delete_addon(self, addon_name, auth, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
ret = super(Node, self).delete_addon(addon_name, auth, _force)
if ret:
config = settings.ADDONS_AVAILABLE_DICT[addon_name]
self.add_log(
action=NodeLog.ADDON_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'addon': config.full_name,
},
auth=auth,
save=False,
)
self.save()
# TODO: save here or outside the conditional? @mambocab
return ret
def callback(self, callback, recursive=False, *args, **kwargs):
"""Invoke callbacks of attached add-ons and collect messages.
:param str callback: Name of callback method to invoke
:param bool recursive: Apply callback recursively over nodes
:return list: List of callback messages
"""
messages = []
for addon in self.get_addons():
method = getattr(addon, callback)
message = method(self, *args, **kwargs)
if message:
messages.append(message)
if recursive:
for child in self.nodes:
if not child.is_deleted:
messages.extend(
child.callback(
callback, recursive, *args, **kwargs
)
)
return messages
def replace_contributor(self, old, new):
for i, contrib in enumerate(self.contributors):
if contrib._primary_key == old._primary_key:
self.contributors[i] = new
# Remove unclaimed record for the project
if self._primary_key in old.unclaimed_records:
del old.unclaimed_records[self._primary_key]
old.save()
for permission in self.get_permissions(old):
self.add_permission(new, permission)
self.permissions.pop(old._id)
if old._id in self.visible_contributor_ids:
self.visible_contributor_ids[self.visible_contributor_ids.index(old._id)] = new._id
return True
return False
def remove_contributor(self, contributor, auth, log=True):
"""Remove a contributor from this node.
:param contributor: User object, the contributor to be removed
:param auth: All the auth information including user, API key.
"""
# remove unclaimed record if necessary
if self._primary_key in contributor.unclaimed_records:
del contributor.unclaimed_records[self._primary_key]
self.contributors.remove(contributor._id)
self.clear_permission(contributor)
if contributor._id in self.visible_contributor_ids:
self.visible_contributor_ids.remove(contributor._id)
if not self.visible_contributor_ids:
return False
# Node must have at least one registered admin user
# TODO: Move to validator or helper
admins = [
user for user in self.contributors
if self.has_permission(user, 'admin')
and user.is_registered
]
if not admins:
return False
# Clear permissions for removed user
self.permissions.pop(contributor._id, None)
# After remove callback
for addon in self.get_addons():
message = addon.after_remove_contributor(self, contributor, auth)
if message:
status.push_status_message(message, kind='info', trust=True)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributor': contributor._id,
},
auth=auth,
save=False,
)
self.save()
#send signal to remove this user from project subscriptions
auth_signals.contributor_removed.send(contributor, node=self)
return True
def remove_contributors(self, contributors, auth=None, log=True, save=False):
results = []
removed = []
for contrib in contributors:
outcome = self.remove_contributor(
contributor=contrib, auth=auth, log=False,
)
results.append(outcome)
removed.append(contrib._id)
if log:
self.add_log(
action=NodeLog.CONTRIB_REMOVED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': removed,
},
auth=auth,
save=False,
)
if save:
self.save()
if False in results:
return False
return True
def manage_contributors(self, user_dicts, auth, save=False):
"""Reorder and remove contributors.
:param list user_dicts: Ordered list of contributors represented as
dictionaries of the form:
{'id': <id>, 'permission': <One of 'read', 'write', 'admin'>, 'visible': bool}
:param Auth auth: Consolidated authentication information
:param bool save: Save changes
:raises: ValueError if any users in `users` not in contributors or if
no admin contributors remaining
"""
with TokuTransaction():
users = []
user_ids = []
permissions_changed = {}
visibility_removed = []
to_retain = []
to_remove = []
for user_dict in user_dicts:
user = User.load(user_dict['id'])
if user is None:
raise ValueError('User not found')
if user not in self.contributors:
raise ValueError(
'User {0} not in contributors'.format(user.fullname)
)
permissions = expand_permissions(user_dict['permission'])
if set(permissions) != set(self.get_permissions(user)):
self.set_permissions(user, permissions, save=False)
permissions_changed[user._id] = permissions
# visible must be added before removed to ensure they are validated properly
if user_dict['visible']:
self.set_visible(user,
visible=True,
auth=auth)
else:
visibility_removed.append(user)
users.append(user)
user_ids.append(user_dict['id'])
for user in visibility_removed:
self.set_visible(user,
visible=False,
auth=auth)
for user in self.contributors:
if user._id in user_ids:
to_retain.append(user)
else:
to_remove.append(user)
# TODO: Move to validator or helper @jmcarp
admins = [
user for user in users
if self.has_permission(user, 'admin')
and user.is_registered
]
if users is None or not admins:
raise ValueError(
'Must have at least one registered admin contributor'
)
if to_retain != users:
self.add_log(
action=NodeLog.CONTRIB_REORDERED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': [
user._id
for user in users
],
},
auth=auth,
save=False,
)
if to_remove:
self.remove_contributors(to_remove, auth=auth, save=False)
self.contributors = users
if permissions_changed:
self.add_log(
action=NodeLog.PERMISSIONS_UPDATED,
params={
'project': self.parent_id,
'node': self._id,
'contributors': permissions_changed,
},
auth=auth,
save=False,
)
# Update list of visible IDs
self.update_visible_ids()
if save:
self.save()
with TokuTransaction():
if to_remove or permissions_changed and ['read'] in permissions_changed.values():
project_signals.write_permissions_revoked.send(self)
def add_contributor(self, contributor, permissions=None, visible=True,
auth=None, log=True, save=False):
"""Add a contributor to the project.
:param User contributor: The contributor to be added
:param list permissions: Permissions to grant to the contributor
:param bool visible: Contributor is visible in project dashboard
:param Auth auth: All the auth information including user, API key
:param bool log: Add log to self
:param bool save: Save after adding contributor
:returns: Whether contributor was added
"""
MAX_RECENT_LENGTH = 15
# If user is merged into another account, use master account
contrib_to_add = contributor.merged_by if contributor.is_merged else contributor
if contrib_to_add not in self.contributors:
self.contributors.append(contrib_to_add)
if visible:
self.set_visible(contrib_to_add, visible=True, log=False)
# Add default contributor permissions
permissions = permissions or DEFAULT_CONTRIBUTOR_PERMISSIONS
for permission in permissions:
self.add_permission(contrib_to_add, permission, save=False)
# Add contributor to recently added list for user
if auth is not None:
user = auth.user
if contrib_to_add in user.recently_added:
user.recently_added.remove(contrib_to_add)
user.recently_added.insert(0, contrib_to_add)
while len(user.recently_added) > MAX_RECENT_LENGTH:
user.recently_added.pop()
if log:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [contrib_to_add._primary_key],
},
auth=auth,
save=False,
)
if save:
self.save()
project_signals.contributor_added.send(self, contributor=contributor)
return True
#Permissions must be overridden if changed when contributor is added to parent he/she is already on a child of.
elif contrib_to_add in self.contributors and permissions is not None:
self.set_permissions(contrib_to_add, permissions)
if save:
self.save()
return False
else:
return False
def add_contributors(self, contributors, auth=None, log=True, save=False):
"""Add multiple contributors
:param list contributors: A list of dictionaries of the form:
{
'user': <User object>,
'permissions': <Permissions list, e.g. ['read', 'write']>,
'visible': <Boolean indicating whether or not user is a bibliographic contributor>
}
:param auth: All the auth information including user, API key.
:param log: Add log to self
:param save: Save after adding contributor
"""
for contrib in contributors:
self.add_contributor(
contributor=contrib['user'], permissions=contrib['permissions'],
visible=contrib['visible'], auth=auth, log=False, save=False,
)
if log and contributors:
self.add_log(
action=NodeLog.CONTRIB_ADDED,
params={
'project': self.parent_id,
'node': self._primary_key,
'contributors': [
contrib['user']._id
for contrib in contributors
],
},
auth=auth,
save=False,
)
if save:
self.save()
def add_unregistered_contributor(self, fullname, email, auth,
permissions=None, save=False):
"""Add a non-registered contributor to the project.
:param str fullname: The full name of the person.
:param str email: The email address of the person.
:param Auth auth: Auth object for the user adding the contributor.
:returns: The added contributor
:raises: DuplicateEmailError if user with given email is already in the database.
"""
# Create a new user record
contributor = User.create_unregistered(fullname=fullname, email=email)
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
try:
contributor.save()
except ValidationValueError: # User with same email already exists
contributor = get_user(email=email)
# Unregistered users may have multiple unclaimed records, so
# only raise error if user is registered.
if contributor.is_registered or self.is_contributor(contributor):
raise
contributor.add_unclaimed_record(node=self, referrer=auth.user,
given_name=fullname, email=email)
contributor.save()
self.add_contributor(
contributor, permissions=permissions, auth=auth,
log=True, save=False,
)
self.save()
return contributor
def set_privacy(self, permissions, auth=None, log=True, save=True):
"""Set the permissions for this node.
:param permissions: A string, either 'public' or 'private'
:param auth: All the auth information including user, API key.
:param bool log: Whether to add a NodeLog for the privacy change.
"""
if permissions == 'public' and not self.is_public:
if self.is_registration:
if self.is_pending_embargo:
raise NodeStateError("A registration with an unapproved embargo cannot be made public")
if self.embargo_end_date and not self.is_pending_embargo:
self.embargo.state = Embargo.REJECTED
self.embargo.save()
self.is_public = True
elif permissions == 'private' and self.is_public:
if self.is_registration and not self.is_pending_embargo:
raise NodeStateError("Public registrations must be retracted, not made private.")
else:
self.is_public = False
else:
return False
# After set permissions callback
for addon in self.get_addons():
message = addon.after_set_privacy(self, permissions)
if message:
status.push_status_message(message, kind='info', trust=False)
if log:
action = NodeLog.MADE_PUBLIC if permissions == 'public' else NodeLog.MADE_PRIVATE
self.add_log(
action=action,
params={
'project': self.parent_id,
'node': self._primary_key,
},
auth=auth,
save=False,
)
if save:
self.save()
return True
# TODO: Move to wiki add-on
def get_wiki_page(self, name=None, version=None, id=None):
from website.addons.wiki.model import NodeWikiPage
if name:
name = (name or '').strip()
key = to_mongo_key(name)
try:
if version and (isinstance(version, int) or version.isdigit()):
id = self.wiki_pages_versions[key][int(version) - 1]
elif version == 'previous':
id = self.wiki_pages_versions[key][-2]
elif version == 'current' or version is None:
id = self.wiki_pages_current[key]
else:
return None
except (KeyError, IndexError):
return None
return NodeWikiPage.load(id)
# TODO: Move to wiki add-on
def update_node_wiki(self, name, content, auth):
"""Update the node's wiki page with new content.
:param page: A string, the page's name, e.g. ``"home"``.
:param content: A string, the posted content.
:param auth: All the auth information including user, API key.
"""
from website.addons.wiki.model import NodeWikiPage
name = (name or '').strip()
key = to_mongo_key(name)
if key not in self.wiki_pages_current:
if key in self.wiki_pages_versions:
version = len(self.wiki_pages_versions[key]) + 1
else:
version = 1
else:
current = NodeWikiPage.load(self.wiki_pages_current[key])
current.is_current = False
version = current.version + 1
current.save()
new_page = NodeWikiPage(
page_name=name,
version=version,
user=auth.user,
is_current=True,
node=self,
content=content
)
new_page.save()
# check if the wiki page already exists in versions (existed once and is now deleted)
if key not in self.wiki_pages_versions:
self.wiki_pages_versions[key] = []
self.wiki_pages_versions[key].append(new_page._primary_key)
self.wiki_pages_current[key] = new_page._primary_key
self.add_log(
action=NodeLog.WIKI_UPDATED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': new_page.page_name,
'page_id': new_page._primary_key,
'version': new_page.version,
},
auth=auth,
log_date=new_page.date,
save=False,
)
self.save()
# TODO: Move to wiki add-on
def rename_node_wiki(self, name, new_name, auth):
"""Rename the node's wiki page with new name.
:param name: A string, the page's name, e.g. ``"My Page"``.
:param new_name: A string, the new page's name, e.g. ``"My Renamed Page"``.
:param auth: All the auth information including user, API key.
"""
# TODO: Fix circular imports
from website.addons.wiki.exceptions import (
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
)
name = (name or '').strip()
key = to_mongo_key(name)
new_name = (new_name or '').strip()
new_key = to_mongo_key(new_name)
page = self.get_wiki_page(name)
if key == 'home':
raise PageCannotRenameError('Cannot rename wiki home page')
if not page:
raise PageNotFoundError('Wiki page not found')
if (new_key in self.wiki_pages_current and key != new_key) or new_key == 'home':
raise PageConflictError(
'Page already exists with name {0}'.format(
new_name,
)
)
# rename the page first in case we hit a validation exception.
old_name = page.page_name
page.rename(new_name)
# TODO: merge historical records like update (prevents log breaks)
# transfer the old page versions/current keys to the new name.
if key != new_key:
self.wiki_pages_versions[new_key] = self.wiki_pages_versions[key]
del self.wiki_pages_versions[key]
self.wiki_pages_current[new_key] = self.wiki_pages_current[key]
del self.wiki_pages_current[key]
if key in self.wiki_private_uuids:
self.wiki_private_uuids[new_key] = self.wiki_private_uuids[key]
del self.wiki_private_uuids[key]
self.add_log(
action=NodeLog.WIKI_RENAMED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
'old_page': old_name,
'version': page.version,
},
auth=auth,
save=False,
)
self.save()
def delete_node_wiki(self, name, auth):
name = (name or '').strip()
key = to_mongo_key(name)
page = self.get_wiki_page(key)
del self.wiki_pages_current[key]
self.add_log(
action=NodeLog.WIKI_DELETED,
params={
'project': self.parent_id,
'node': self._primary_key,
'page': page.page_name,
'page_id': page._primary_key,
},
auth=auth,
save=False,
)
self.save()
def get_stats(self, detailed=False):
if detailed:
raise NotImplementedError(
'Detailed stats exist, but are not yet implemented.'
)
else:
return get_basic_counters('node:%s' % self._primary_key)
# TODO: Deprecate this; it duplicates much of what serialize_project already
# does
def serialize(self, auth=None):
"""Dictionary representation of node that is nested within a NodeLog's
representation.
"""
# TODO: incomplete implementation
return {
'id': str(self._primary_key),
'category': self.category_display,
'node_type': self.project_or_component,
'url': self.url,
# TODO: Titles shouldn't contain escaped HTML in the first place
'title': sanitize.unescape_entities(self.title),
'path': self.path_above(auth),
'api_url': self.api_url,
'is_public': self.is_public,
'is_registration': self.is_registration,
}
def _initiate_retraction(self, user, justification=None):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param justification: Justification, if given, for retraction
"""
retraction = Retraction(
initiated_by=user,
justification=justification or None, # make empty strings None
state=Retraction.UNAPPROVED
)
retraction.save() # Save retraction so it has a primary key
self.retraction = retraction
self.save() # Set foreign field reference Node.retraction
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
retraction.add_authorizer(admin)
retraction.save() # Save retraction approval state
return retraction
def retract_registration(self, user, justification=None, save=True):
"""Retract public registration. Instantiate new Retraction object
and associate it with the respective registration.
"""
if not self.is_registration or (not self.is_public and not (self.embargo_end_date or self.is_pending_embargo)):
raise NodeStateError('Only public or embargoed registrations may be retracted.')
if self.root is not self:
raise NodeStateError('Retraction of non-parent registrations is not permitted.')
retraction = self._initiate_retraction(user, justification)
self.registered_from.add_log(
action=NodeLog.RETRACTION_INITIATED,
params={
'node': self._id,
'retraction_id': retraction._id,
},
auth=Auth(user),
)
self.retraction = retraction
if save:
self.save()
def _is_embargo_date_valid(self, end_date):
today = datetime.datetime.utcnow()
if (end_date - today) >= settings.EMBARGO_END_DATE_MIN:
if (end_date - today) <= settings.EMBARGO_END_DATE_MAX:
return True
return False
def _initiate_embargo(self, user, end_date, for_existing_registration=False):
"""Initiates the retraction process for a registration
:param user: User who initiated the retraction
:param end_date: Date when the registration should be made public
"""
embargo = Embargo(
initiated_by=user,
end_date=datetime.datetime.combine(end_date, datetime.datetime.min.time()),
for_existing_registration=for_existing_registration
)
embargo.save() # Save embargo so it has a primary key
self.embargo = embargo
self.save() # Set foreign field reference Node.embargo
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
embargo.add_authorizer(admin)
embargo.save() # Save embargo's approval_state
return embargo
def embargo_registration(self, user, end_date, for_existing_registration=False):
"""Enter registration into an embargo period at end of which, it will
be made public
:param user: User initiating the embargo
:param end_date: Date when the registration should be made public
:raises: NodeStateError if Node is not a registration
:raises: PermissionsError if user is not an admin for the Node
:raises: ValidationValueError if end_date is not within time constraints
"""
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
if not self._is_embargo_date_valid(end_date):
raise ValidationValueError('Embargo end date must be more than one day in the future')
embargo = self._initiate_embargo(user, end_date, for_existing_registration=for_existing_registration)
self.registered_from.add_log(
action=NodeLog.EMBARGO_INITIATED,
params={
'node': self._id,
'embargo_id': embargo._id,
},
auth=Auth(user),
save=True,
)
if self.is_public:
self.set_privacy('private', Auth(user))
def _initiate_approval(self, user):
end_date = datetime.datetime.now() + settings.REGISTRATION_APPROVAL_TIME
approval = RegistrationApproval(
initiated_by=user,
end_date=end_date,
)
approval.save() # Save approval so it has a primary key
self.registration_approval = approval
self.save() # Set foreign field reference Node.registration_approval
admins = [contrib for contrib in self.contributors if self.has_permission(contrib, 'admin') and contrib.is_active]
for admin in admins:
approval.add_authorizer(admin)
approval.save() # Save approval's approval_state
return approval
def require_approval(self, user):
if not self.is_registration:
raise NodeStateError('Only registrations may be embargoed')
if not self.has_permission(user, 'admin'):
raise PermissionsError('Only admins may embargo a registration')
approval = self._initiate_approval(user)
self.registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_INITIATED,
params={
'node': self._id,
'registration_approval_id': approval._id,
},
auth=Auth(user),
save=True,
)
# TODO make private?
@Node.subscribe('before_save')
def validate_permissions(schema, instance):
"""Ensure that user IDs in `contributors` and `permissions` match.
"""
node = instance
contributor_ids = set([user._id for user in node.contributors])
permission_ids = set(node.permissions.keys())
mismatched_contributors = contributor_ids.difference(permission_ids)
if mismatched_contributors:
raise ValidationValueError(
'Contributors {0} missing from `permissions` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
mismatched_permissions = permission_ids.difference(contributor_ids)
if mismatched_permissions:
raise ValidationValueError(
'Permission keys {0} missing from `contributors` on node {1}'.format(
', '.join(mismatched_contributors),
node._id,
)
)
@Node.subscribe('before_save')
def validate_visible_contributors(schema, instance):
"""Ensure that user IDs in `contributors` and `visible_contributor_ids`
match.
"""
node = instance
for user_id in node.visible_contributor_ids:
if user_id not in node.contributors:
raise ValidationValueError(
('User {0} is in `visible_contributor_ids` but not in '
'`contributors` on node {1}').format(
user_id,
node._id,
)
)
class WatchConfig(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
node = fields.ForeignField('Node', backref='watched')
digest = fields.BooleanField(default=False)
immediate = fields.BooleanField(default=False)
def __repr__(self):
return '<WatchConfig(node="{self.node}")>'.format(self=self)
class PrivateLink(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
date_created = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
key = fields.StringField(required=True)
name = fields.StringField()
is_deleted = fields.BooleanField(default=False)
anonymous = fields.BooleanField(default=False)
nodes = fields.ForeignField('node', list=True, backref='shared')
creator = fields.ForeignField('user', backref='created')
@property
def node_ids(self):
node_ids = [node._id for node in self.nodes]
return node_ids
def node_scale(self, node):
# node may be None if previous node's parent is deleted
if node is None or node.parent_id not in self.node_ids:
return -40
else:
offset = 20 if node.parent_node is not None else 0
return offset + self.node_scale(node.parent_node)
def to_json(self):
return {
"id": self._id,
"date_created": iso8601format(self.date_created),
"key": self.key,
"name": self.name,
"creator": {'fullname': self.creator.fullname, 'url': self.creator.profile_url},
"nodes": [{'title': x.title, 'url': x.url, 'scale': str(self.node_scale(x)) + 'px', 'category': x.category}
for x in self.nodes if not x.is_deleted],
"anonymous": self.anonymous
}
class Sanction(StoredObject):
"""Sanction object is a generic way to track approval states"""
abstract = True
UNAPPROVED = 'unapproved'
APPROVED = 'approved'
REJECTED = 'rejected'
DISPLAY_NAME = 'Sanction'
SHORT_NAME = 'sanction'
APPROVAL_NOT_AUTHORIZED_MESSAGE = 'This user is not authorized to approve this {DISPLAY_NAME}'
APPROVAL_INVALID_TOKEN_MESSAGE = 'Invalid approval token provided for this {DISPLAY_NAME}.'
REJECTION_NOT_AUTHORIZED_MESSAEGE = 'This user is not authorized to reject this {DISPLAY_NAME}'
REJECTION_INVALID_TOKEN_MESSAGE = 'Invalid rejection token provided for this {DISPLAY_NAME}.'
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
initiation_date = fields.DateTimeField(auto_now_add=datetime.datetime.utcnow)
end_date = fields.DateTimeField(default=None)
# Sanction subclasses must have an initiated_by field
# initiated_by = fields.ForeignField('user', backref='initiated')
# Expanded: Dictionary field mapping admin IDs their approval status and relevant tokens:
# {
# 'b3k97': {
# 'has_approved': False,
# 'approval_token': 'Pew7wj1Puf7DENUPFPnXSwa1rf3xPN',
# 'rejection_token': 'TwozClTFOic2PYxHDStby94bCQMwJy'}
# }
approval_state = fields.DictionaryField()
# One of 'unapproved', 'approved', or 'rejected'
state = fields.StringField(default='unapproved')
def __repr__(self):
return '<Sanction(end_date={self.end_date}) with _id {self._id}>'.format(self=self)
@property
def pending_approval(self):
return self.state == Sanction.UNAPPROVED
@property
def is_approved(self):
return self.state == Sanction.APPROVED
@property
def is_rejected(self):
return self.state == Sanction.REJECTED
def _validate_authorizer(self, user):
return True
def add_authorizer(self, user, approved=False, save=False):
valid = self._validate_authorizer(user)
if valid and user._id not in self.approval_state:
self.approval_state[user._id] = {
'has_approved': approved,
'approval_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'approve_{}'.format(self.SHORT_NAME)
}
),
'rejection_token': tokens.encode(
{
'user_id': user._id,
'sanction_id': self._id,
'action': 'reject_{}'.format(self.SHORT_NAME)
}
),
}
if save:
self.save()
return True
return False
def remove_authorizer(self, user):
if user._id not in self.approval_state:
return False
del self.approval_state[user._id]
self.save()
return True
def _on_approve(self, user, token):
if all(authorizer['has_approved'] for authorizer in self.approval_state.values()):
self.state = Sanction.APPROVED
self._on_complete(user)
def _on_reject(self, user, token):
"""Early termination of a Sanction"""
raise NotImplementedError('Sanction subclasses must implement an #_on_reject method')
def _on_complete(self, user):
"""When a Sanction has unanimous approval"""
raise NotImplementedError('Sanction subclasses must implement an #_on_complete method')
def approve(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
try:
if self.approval_state[user._id]['approval_token'] != token:
raise InvalidSanctionApprovalToken(self.APPROVAL_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.APPROVAL_NOT_AUTHORIZED_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.approval_state[user._id]['has_approved'] = True
self._on_approve(user, token)
def reject(self, user, token):
"""Cancels sanction if user is admin and token verifies."""
try:
if self.approval_state[user._id]['rejection_token'] != token:
raise InvalidSanctionRejectionToken(self.REJECTION_INVALID_TOKEN_MESSAGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
except KeyError:
raise PermissionsError(self.REJECTION_NOT_AUTHORIZED_MESSAEGE.format(DISPLAY_NAME=self.DISPLAY_NAME))
self.state = Sanction.REJECTED
self._on_reject(user, token)
def forcibly_reject(self):
self.state = Sanction.REJECTED
def _notify_authorizer(self, user):
pass
def _notify_non_authorizer(self, user):
pass
def ask(self, group):
for contrib in group:
if contrib._id in self.approval_state:
self._notify_authorizer(contrib)
else:
self._notify_non_authorizer(contrib)
class EmailApprovableSanction(Sanction):
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = None
VIEW_URL_TEMPLATE = ''
APPROVE_URL_TEMPLATE = ''
REJECT_URL_TEMPLATE = ''
# Store a persistant copy of urls for use when needed outside of a request context.
# This field gets automagically updated whenever models approval_state is modified
# and the model is saved
# {
# 'abcde': {
# 'approve': [APPROVAL_URL],
# 'reject': [REJECT_URL],
# }
# }
stashed_urls = fields.DictionaryField(default=dict)
@staticmethod
def _format_or_empty(template, context):
if context:
return template.format(**context)
return ''
def _view_url(self, user_id):
return self._format_or_empty(self.VIEW_URL_TEMPLATE, self._view_url_context(user_id))
def _view_url_context(self, user_id):
return None
def _approval_url(self, user_id):
return self._format_or_empty(self.APPROVE_URL_TEMPLATE, self._approval_url_context(user_id))
def _approval_url_context(self, user_id):
return None
def _rejection_url(self, user_id):
return self._format_or_empty(self.REJECT_URL_TEMPLATE, self._rejection_url_context(user_id))
def _rejection_url_context(self, user_id):
return None
def _send_approval_request_email(self, user, template, context):
mails.send_mail(
user.username,
template,
user=user,
**context
)
def _email_template_context(self, user, is_authorizer=False):
return {}
def _notify_authorizer(self, authorizer):
context = self._email_template_context(authorizer, is_authorizer=True)
if self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(authorizer, self.AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def _notify_non_authorizer(self, user):
context = self._email_template_context(user)
if self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE:
self._send_approval_request_email(user, self.NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE, context)
else:
raise NotImplementedError
def add_authorizer(self, user, **kwargs):
super(EmailApprovableSanction, self).add_authorizer(user, **kwargs)
self.stashed_urls[user._id] = {
'view': self._view_url(user._id),
'approve': self._approval_url(user._id),
'reject': self._rejection_url(user._id)
}
self.save()
class Embargo(EmailApprovableSanction):
"""Embargo object for registrations waiting to go public."""
COMPLETED = 'completed'
DISPLAY_NAME = 'Embargo'
SHORT_NAME = 'embargo'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_EMBARGO_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='embargoed')
for_existing_registration = fields.BooleanField(default=False)
@property
def is_completed(self):
return self.state == self.COMPLETED
@property
def embargo_end_date(self):
if self.state == self.APPROVED:
return self.end_date
return False
# NOTE(hrybacki): Old, private registrations are grandfathered and do not
# require to be made public or embargoed. This field differentiates them
# from new registrations entering into an embargo field which should not
# show up in any search related fields.
@property
def pending_registration(self):
return not self.for_existing_registration and self.pending_approval
def __repr__(self):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
return ('<Embargo(parent_registration={0}, initiated_by={1}, '
'end_date={2}) with _id {3}>').format(
parent_registration,
self.initiated_by,
self.end_date,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('embargo', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'embargo_end_date': self.end_date,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'embargo_end_date': self.end_date,
}
def _validate_authorizer(self, user):
registration = Node.find_one(Q('embargo', 'eq', self))
return registration.has_permission(user, ADMIN)
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(user),
)
# Remove backref to parent project if embargo was for a new registration
if not self.for_existing_registration:
parent_registration.registered_from = None
# Delete parent registration if it was created at the time the embargo was initiated
if not self.for_existing_registration:
parent_registration.is_deleted = True
parent_registration.save()
def disapprove_embargo(self, user, token):
"""Cancels retraction if user is admin and token verifies."""
self.reject(user, token)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('embargo', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_APPROVED,
params={
'node': parent_registration._id,
'embargo_id': self._id,
},
auth=Auth(self.initiated_by),
)
self.state == self.COMPLETED
self.save()
def approve_embargo(self, user, token):
"""Add user to approval list if user is admin and token verifies."""
self.approve(user, token)
class Retraction(EmailApprovableSanction):
"""Retraction object for public registrations."""
DISPLAY_NAME = 'Retraction'
SHORT_NAME = 'retraction'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_RETRACTION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='initiated')
justification = fields.StringField(default=None, validate=MaxLengthValidator(2048))
def __repr__(self):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
return ('<Retraction(parent_registration={0}, initiated_by={1}) '
'with _id {2}>').format(
parent_registration,
self.initiated_by,
self._id
)
def _view_url_context(self, user_id):
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('retraction', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = settings.RETRACTION_PENDING_TIME.days * 24
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _on_reject(self, user, token):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_CANCELLED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(user),
save=True,
)
def _on_complete(self, user):
parent_registration = Node.find_one(Q('retraction', 'eq', self))
parent_registration.registered_from.add_log(
action=NodeLog.RETRACTION_APPROVED,
params={
'node': parent_registration._id,
'retraction_id': self._id,
},
auth=Auth(self.initiated_by),
)
# Remove any embargoes associated with the registration
if parent_registration.embargo_end_date or parent_registration.is_pending_embargo:
parent_registration.embargo.state = self.REJECTED
parent_registration.registered_from.add_log(
action=NodeLog.EMBARGO_CANCELLED,
params={
'node': parent_registration._id,
'embargo_id': parent_registration.embargo._id,
},
auth=Auth(self.initiated_by),
)
parent_registration.embargo.save()
# Ensure retracted registration is public
if not parent_registration.is_public:
parent_registration.set_privacy('public')
parent_registration.update_search()
# Retraction status is inherited from the root project, so we
# need to recursively update search for every descendant node
# so that retracted subrojects/components don't appear in search
for node in parent_registration.get_descendants_recursive():
node.update_search()
self.state == self.APPROVED
self.save()
def approve_retraction(self, user, token):
self.approve(user, token)
def disapprove_retraction(self, user, token):
self.reject(user, token)
class RegistrationApproval(EmailApprovableSanction):
DISPLAY_NAME = 'Registration Approval'
SHORT_NAME = 'approval'
AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_ADMIN
NON_AUTHORIZER_NOTIFY_EMAIL_TEMPLATE = mails.PENDING_REGISTRATION_NON_ADMIN
VIEW_URL_TEMPLATE = VIEW_PROJECT_URL_TEMPLATE
APPROVE_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
REJECT_URL_TEMPLATE = settings.DOMAIN + 'project/{node_id}/?token={token}'
initiated_by = fields.ForeignField('user', backref='registration_approved')
def _view_url_context(self, user_id):
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id
}
def _approval_url_context(self, user_id):
approval_token = self.approval_state.get(user_id, {}).get('approval_token')
if approval_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': approval_token,
}
def _rejection_url_context(self, user_id):
rejection_token = self.approval_state.get(user_id, {}).get('rejection_token')
if rejection_token:
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'node_id': registration._id,
'token': rejection_token,
}
def _email_template_context(self, user, is_authorizer=False, urls=None):
urls = urls or self.stashed_urls.get(user._id, {})
registration_link = urls.get('view', self._view_url(user._id))
if is_authorizer:
approval_link = urls.get('approve', '')
disapproval_link = urls.get('reject', '')
approval_time_span = (24 * settings.REGISTRATION_APPROVAL_TIME.days) + (settings.REGISTRATION_APPROVAL_TIME.seconds / 60)
registration = Node.find_one(Q('registration_approval', 'eq', self))
return {
'is_initiator': self.initiated_by == user,
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
'approval_link': approval_link,
'disapproval_link': disapproval_link,
'approval_time_span': approval_time_span,
'project_name': registration.title,
}
else:
return {
'initiated_by': self.initiated_by.fullname,
'registration_link': registration_link,
}
def _add_success_logs(self, node, user):
src = node.registered_from
src.add_log(
action=NodeLog.PROJECT_REGISTERED,
params={
'parent_node': src.parent_id,
'node': src._primary_key,
'registration': node._primary_key,
},
auth=Auth(user),
save=False
)
src.save()
def _on_complete(self, user):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
auth = Auth(self.initiated_by)
register.set_privacy('public', auth, log=False)
for child in register.get_descendants_recursive(lambda n: n.primary):
child.set_privacy('public', auth, log=False)
# Accounts for system actions where no `User` performs the final approval
auth = Auth(user) if user else None
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_APPROVED,
params={
'node': registered_from._id,
'registration_approval_id': self._id,
},
auth=auth,
)
for node in register.root.node_and_primary_descendants():
self._add_success_logs(node, user)
node.update_search() # update search if public
self.state = self.APPROVED
self.save()
def _on_reject(self, user, token):
register = Node.find_one(Q('registration_approval', 'eq', self))
registered_from = register.registered_from
register.delete_registration_tree(save=True)
registered_from.add_log(
action=NodeLog.REGISTRATION_APPROVAL_CANCELLED,
params={
'node': register._id,
'registration_approval_id': self._id,
},
auth=Auth(user),
)
|
jolene-esposito/osf.io
|
website/project/model.py
|
Python
|
apache-2.0
| 123,650
|
[
"VisIt"
] |
b7ee5ed02c548a85f7a284a7cdabcec35bcae51d325a8e931367c6bb64c69f7d
|
"""Read and write notebooks as regular .py files.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import re
from .rwbase import NotebookReader, NotebookWriter
from .nbbase import (
new_code_cell, new_text_cell, new_worksheet,
new_notebook, new_heading_cell, nbformat, nbformat_minor,
)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
_encoding_declaration_re = re.compile(r"^#.*coding[:=]\s*([-\w.]+)")
class PyReaderError(Exception):
pass
class PyReader(NotebookReader):
def reads(self, s, **kwargs):
return self.to_notebook(s,**kwargs)
def to_notebook(self, s, **kwargs):
lines = s.splitlines()
cells = []
cell_lines = []
kwargs = {}
state = u'codecell'
for line in lines:
if line.startswith(u'# <nbformat>') or _encoding_declaration_re.match(line):
pass
elif line.startswith(u'# <codecell>'):
cell = self.new_cell(state, cell_lines, **kwargs)
if cell is not None:
cells.append(cell)
state = u'codecell'
cell_lines = []
kwargs = {}
elif line.startswith(u'# <htmlcell>'):
cell = self.new_cell(state, cell_lines, **kwargs)
if cell is not None:
cells.append(cell)
state = u'htmlcell'
cell_lines = []
kwargs = {}
elif line.startswith(u'# <markdowncell>'):
cell = self.new_cell(state, cell_lines, **kwargs)
if cell is not None:
cells.append(cell)
state = u'markdowncell'
cell_lines = []
kwargs = {}
# VERSIONHACK: plaintext -> raw
elif line.startswith(u'# <rawcell>') or line.startswith(u'# <plaintextcell>'):
cell = self.new_cell(state, cell_lines, **kwargs)
if cell is not None:
cells.append(cell)
state = u'rawcell'
cell_lines = []
kwargs = {}
elif line.startswith(u'# <headingcell'):
cell = self.new_cell(state, cell_lines, **kwargs)
if cell is not None:
cells.append(cell)
cell_lines = []
m = re.match(r'# <headingcell level=(?P<level>\d)>',line)
if m is not None:
state = u'headingcell'
kwargs = {}
kwargs['level'] = int(m.group('level'))
else:
state = u'codecell'
kwargs = {}
cell_lines = []
else:
cell_lines.append(line)
if cell_lines and state == u'codecell':
cell = self.new_cell(state, cell_lines)
if cell is not None:
cells.append(cell)
ws = new_worksheet(cells=cells)
nb = new_notebook(worksheets=[ws])
return nb
def new_cell(self, state, lines, **kwargs):
if state == u'codecell':
input = u'\n'.join(lines)
input = input.strip(u'\n')
if input:
return new_code_cell(input=input)
elif state == u'htmlcell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'html',source=text)
elif state == u'markdowncell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'markdown',source=text)
elif state == u'rawcell':
text = self._remove_comments(lines)
if text:
return new_text_cell(u'raw',source=text)
elif state == u'headingcell':
text = self._remove_comments(lines)
level = kwargs.get('level',1)
if text:
return new_heading_cell(source=text,level=level)
def _remove_comments(self, lines):
new_lines = []
for line in lines:
if line.startswith(u'#'):
new_lines.append(line[2:])
else:
new_lines.append(line)
text = u'\n'.join(new_lines)
text = text.strip(u'\n')
return text
def split_lines_into_blocks(self, lines):
if len(lines) == 1:
yield lines[0]
raise StopIteration()
import ast
source = '\n'.join(lines)
code = ast.parse(source)
starts = [x.lineno-1 for x in code.body]
for i in range(len(starts)-1):
yield '\n'.join(lines[starts[i]:starts[i+1]]).strip('\n')
yield '\n'.join(lines[starts[-1]:]).strip('\n')
class PyWriter(NotebookWriter):
def writes(self, nb, **kwargs):
lines = [u'# -*- coding: utf-8 -*-']
lines.extend([
u'# <nbformat>%i.%i</nbformat>' % (nbformat, nbformat_minor),
u'',
])
for ws in nb.worksheets:
for cell in ws.cells:
if cell.cell_type == u'code':
input = cell.get(u'input')
if input is not None:
lines.extend([u'# <codecell>',u''])
lines.extend(input.splitlines())
lines.append(u'')
elif cell.cell_type == u'html':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <htmlcell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
elif cell.cell_type == u'markdown':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <markdowncell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
elif cell.cell_type == u'raw':
input = cell.get(u'source')
if input is not None:
lines.extend([u'# <rawcell>',u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
elif cell.cell_type == u'heading':
input = cell.get(u'source')
level = cell.get(u'level',1)
if input is not None:
lines.extend([u'# <headingcell level=%s>' % level,u''])
lines.extend([u'# ' + line for line in input.splitlines()])
lines.append(u'')
lines.append('')
return unicode('\n'.join(lines))
_reader = PyReader()
_writer = PyWriter()
reads = _reader.reads
read = _reader.read
to_notebook = _reader.to_notebook
write = _writer.write
writes = _writer.writes
|
Carreau/gistpynb
|
nbformat/v3/nbpy.py
|
Python
|
apache-2.0
| 7,569
|
[
"Brian"
] |
87ea991c2a2bd4d65f695bfdc05d33f9a71a799422e7e420949edfbdaea8a96f
|
"""Mayavi/traits GUI for averaging two sets of KIT marker points"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
import numpy as np
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import confirm, error, FileDialog, OK, YES
from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
cached_property, Instance, Property, Array, Bool,
Button, Enum, File, Float, List, Str)
from traitsui.api import View, Item, HGroup, VGroup, CheckListEditor
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except Exception:
from ..utils import trait_wraith
HasTraits = HasPrivateTraits = object
cached_property = on_trait_change = MayaviScene = MlabSceneModel = \
Array = Bool = Button = Enum = File = Float = Instance = Int = \
List = Property = Str = View = Item = HGroup = VGroup = \
CheckListEditor = NoButtons = SceneEditor = trait_wraith
from ..transforms import apply_trans, rotation, translation
from ..coreg import fit_matched_points
from ..io.kit import read_mrk
from ..io.meas_info import _write_dig_points
from ._viewer import HeadViewController, headview_borders, PointObject
backend_is_wx = False # is there a way to determine this?
if backend_is_wx:
mrk_wildcard = ['Supported Files (*.sqd, *.mrk, *.txt, *.pickled)|'
'*.sqd;*.mrk;*.txt;*.pickled',
'Sqd marker file (*.sqd;*.mrk)|*.sqd;*.mrk',
'Text marker file (*.txt)|*.txt',
'Pickled markers (*.pickled)|*.pickled']
mrk_out_wildcard = ["Tab separated values file (*.txt)|*.txt"]
else:
mrk_wildcard = ["*.sqd;*.mrk;*.txt;*.pickled"]
mrk_out_wildcard = "*.txt"
out_ext = '.txt'
use_editor_v = CheckListEditor(cols=1, values=[(i, str(i)) for i in range(5)])
use_editor_h = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
mrk_view_editable = View(
VGroup('file',
Item('name', show_label=False, style='readonly'),
HGroup(
Item('use', editor=use_editor_v, enabled_when="enabled",
style='custom'),
'points',
),
HGroup(Item('clear', enabled_when="can_save", show_label=False),
Item('save_as', enabled_when="can_save",
show_label=False)),
))
mrk_view_basic = View(
VGroup('file',
Item('name', show_label=False, style='readonly'),
Item('use', editor=use_editor_h, enabled_when="enabled",
style='custom'),
HGroup(Item('clear', enabled_when="can_save", show_label=False),
Item('edit', show_label=False),
Item('save_as', enabled_when="can_save",
show_label=False)),
))
mrk_view_edit = View(VGroup('points'))
class MarkerPoints(HasPrivateTraits):
"""Represent 5 marker points"""
points = Array(float, (5, 3))
can_save = Property(depends_on='points')
save_as = Button()
view = View(VGroup('points',
Item('save_as', enabled_when='can_save')))
@cached_property
def _get_can_save(self):
return np.any(self.points)
def _save_as_fired(self):
dlg = FileDialog(action="save as", wildcard=mrk_out_wildcard,
default_filename=self.name,
default_directory=self.dir)
dlg.open()
if dlg.return_code != OK:
return
path, ext = os.path.splitext(dlg.path)
if not path.endswith(out_ext) and len(ext) != 0:
ValueError("The extension '%s' is not supported." % ext)
path = path + out_ext
if os.path.exists(path):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.save(path)
def save(self, path):
"""Save the marker points
Parameters
----------
path : str
Path to the file to write. The kind of file to write is determined
based on the extension: '.txt' for tab separated text file,
'.pickled' for pickled file.
"""
_write_dig_points(path, self.points)
class MarkerPointSource(MarkerPoints):
"""MarkerPoints subclass for source files"""
file = File(filter=mrk_wildcard, exists=True)
name = Property(Str, depends_on='file')
dir = Property(Str, depends_on='file')
use = List(list(range(5)), desc="Which points to use for the interpolated "
"marker.")
enabled = Property(Bool, depends_on=['points', 'use'])
clear = Button(desc="Clear the current marker data")
edit = Button(desc="Edit the marker coordinates manually")
view = mrk_view_basic
@cached_property
def _get_enabled(self):
return np.any(self.points)
@cached_property
def _get_dir(self):
if self.file:
return os.path.dirname(self.file)
@cached_property
def _get_name(self):
if self.file:
return os.path.basename(self.file)
@on_trait_change('file')
def load(self, fname):
if not fname:
self.reset_traits(['points'])
return
try:
pts = read_mrk(fname)
except Exception as err:
error(None, str(err), "Error Reading mrk")
self.reset_traits(['points'])
else:
self.points = pts
def _clear_fired(self):
self.reset_traits(['file', 'points', 'use'])
def _edit_fired(self):
self.edit_traits(view=mrk_view_edit)
class MarkerPointDest(MarkerPoints):
"""MarkerPoints subclass that serves for derived points"""
src1 = Instance(MarkerPointSource)
src2 = Instance(MarkerPointSource)
name = Property(Str, depends_on='src1.name,src2.name')
dir = Property(Str, depends_on='src1.dir,src2.dir')
points = Property(Array(float, (5, 3)),
depends_on=['method', 'src1.points', 'src1.use',
'src2.points', 'src2.use'])
enabled = Property(Bool, depends_on=['points'])
method = Enum('Transform', 'Average', desc="Transform: estimate a rotation"
"/translation from mrk1 to mrk2; Average: use the average "
"of the mrk1 and mrk2 coordinates for each point.")
view = View(VGroup(Item('method', style='custom'),
Item('save_as', enabled_when='can_save',
show_label=False)))
@cached_property
def _get_dir(self):
return self.src1.dir
@cached_property
def _get_name(self):
n1 = self.src1.name
n2 = self.src2.name
if not n1:
if n2:
return n2
else:
return ''
elif not n2:
return n1
if n1 == n2:
return n1
i = 0
l1 = len(n1) - 1
l2 = len(n1) - 2
while n1[i] == n2[i]:
if i == l1:
return n1
elif i == l2:
return n2
i += 1
return n1[:i]
@cached_property
def _get_enabled(self):
return np.any(self.points)
@cached_property
def _get_points(self):
# in case only one or no source is enabled
if not (self.src1 and self.src1.enabled):
if (self.src2 and self.src2.enabled):
return self.src2.points
else:
return np.zeros((5, 3))
elif not (self.src2 and self.src2.enabled):
return self.src1.points
# Average method
if self.method == 'Average':
if len(np.union1d(self.src1.use, self.src2.use)) < 5:
error(None, "Need at least one source for each point.",
"Marker Average Error")
return np.zeros((5, 3))
pts = (self.src1.points + self.src2.points) / 2.
for i in np.setdiff1d(self.src1.use, self.src2.use):
pts[i] = self.src1.points[i]
for i in np.setdiff1d(self.src2.use, self.src1.use):
pts[i] = self.src2.points[i]
return pts
# Transform method
idx = np.intersect1d(self.src1.use, self.src2.use, assume_unique=True)
if len(idx) < 3:
error(None, "Need at least three shared points for trans"
"formation.", "Marker Interpolation Error")
return np.zeros((5, 3))
src_pts = self.src1.points[idx]
tgt_pts = self.src2.points[idx]
est = fit_matched_points(src_pts, tgt_pts, out='params')
rot = np.array(est[:3]) / 2.
tra = np.array(est[3:]) / 2.
if len(self.src1.use) == 5:
trans = np.dot(translation(*tra), rotation(*rot))
pts = apply_trans(trans, self.src1.points)
elif len(self.src2.use) == 5:
trans = np.dot(translation(* -tra), rotation(* -rot))
pts = apply_trans(trans, self.src2.points)
else:
trans1 = np.dot(translation(*tra), rotation(*rot))
pts = apply_trans(trans1, self.src1.points)
trans2 = np.dot(translation(* -tra), rotation(* -rot))
for i in np.setdiff1d(self.src2.use, self.src1.use):
pts[i] = apply_trans(trans2, self.src2.points[i])
return pts
class CombineMarkersModel(HasPrivateTraits):
mrk1_file = Instance(File)
mrk2_file = Instance(File)
mrk1 = Instance(MarkerPointSource)
mrk2 = Instance(MarkerPointSource)
mrk3 = Instance(MarkerPointDest)
clear = Button(desc="Clear the current marker data")
# stats
distance = Property(Str, depends_on=['mrk1.points', 'mrk2.points'])
def _clear_fired(self):
self.mrk1.clear = True
self.mrk2.clear = True
self.mrk3.reset_traits(['method'])
def _mrk1_default(self):
mrk = MarkerPointSource()
return mrk
def _mrk1_file_default(self):
return self.mrk1.trait('file')
def _mrk2_default(self):
mrk = MarkerPointSource()
return mrk
def _mrk2_file_default(self):
return self.mrk2.trait('file')
def _mrk3_default(self):
mrk = MarkerPointDest(src1=self.mrk1, src2=self.mrk2)
return mrk
@cached_property
def _get_distance(self):
if (self.mrk1 is None or self.mrk2 is None or
(not np.any(self.mrk1.points)) or
(not np.any(self.mrk2.points))):
return ""
ds = np.sqrt(np.sum((self.mrk1.points - self.mrk2.points) ** 2, 1))
desc = '\t'.join('%.1f mm' % (d * 1000) for d in ds)
return desc
class CombineMarkersPanel(HasTraits):
"""Has two marker points sources and interpolates to a third one"""
model = Instance(CombineMarkersModel, ())
# model references for UI
mrk1 = Instance(MarkerPointSource)
mrk2 = Instance(MarkerPointSource)
mrk3 = Instance(MarkerPointDest)
distance = Str
# Visualization
scene = Instance(MlabSceneModel)
scale = Float(5e-3)
mrk1_obj = Instance(PointObject)
mrk2_obj = Instance(PointObject)
mrk3_obj = Instance(PointObject)
trans = Array()
view = View(VGroup(VGroup(Item('mrk1', style='custom'),
Item('mrk1_obj', style='custom'),
show_labels=False,
label="Source Marker 1", show_border=True),
VGroup(Item('mrk2', style='custom'),
Item('mrk2_obj', style='custom'),
show_labels=False,
label="Source Marker 2", show_border=True),
VGroup(Item('distance', style='readonly'),
label='Stats', show_border=True),
VGroup(Item('mrk3', style='custom'),
Item('mrk3_obj', style='custom'),
show_labels=False,
label="New Marker", show_border=True),
))
def _mrk1_default(self):
return self.model.mrk1
def _mrk2_default(self):
return self.model.mrk2
def _mrk3_default(self):
return self.model.mrk3
def __init__(self, *args, **kwargs):
super(CombineMarkersPanel, self).__init__(*args, **kwargs)
m = self.model
m.sync_trait('distance', self, 'distance', mutual=False)
self.mrk1_obj = PointObject(scene=self.scene, color=(155, 55, 55),
point_scale=self.scale)
self.sync_trait('trans', self.mrk1_obj, mutual=False)
m.mrk1.sync_trait('points', self.mrk1_obj, 'points', mutual=False)
m.mrk1.sync_trait('enabled', self.mrk1_obj, 'visible',
mutual=False)
self.mrk2_obj = PointObject(scene=self.scene, color=(55, 155, 55),
point_scale=self.scale)
self.sync_trait('trans', self.mrk2_obj, mutual=False)
m.mrk2.sync_trait('points', self.mrk2_obj, 'points', mutual=False)
m.mrk2.sync_trait('enabled', self.mrk2_obj, 'visible',
mutual=False)
self.mrk3_obj = PointObject(scene=self.scene, color=(150, 200, 255),
point_scale=self.scale)
self.sync_trait('trans', self.mrk3_obj, mutual=False)
m.mrk3.sync_trait('points', self.mrk3_obj, 'points', mutual=False)
m.mrk3.sync_trait('enabled', self.mrk3_obj, 'visible', mutual=False)
class CombineMarkersFrame(HasTraits):
"""GUI for interpolating between two KIT marker files
Parameters
----------
mrk1, mrk2 : str
Path to pre- and post measurement marker files (*.sqd) or empty string.
"""
model = Instance(CombineMarkersModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
panel = Instance(CombineMarkersPanel)
def _headview_default(self):
return HeadViewController(scene=self.scene, system='ALS')
def _panel_default(self):
return CombineMarkersPanel(model=self.model, scene=self.scene)
view = View(HGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical'),
VGroup(headview_borders,
Item('panel', style="custom"),
show_labels=False),
show_labels=False,
),
width=1100, resizable=True,
buttons=NoButtons)
|
jniediek/mne-python
|
mne/gui/_marker_gui.py
|
Python
|
bsd-3-clause
| 14,997
|
[
"Mayavi"
] |
4fd459317dba3675ac68d59d842a828a2d4ff369e32d15208b8c00d0ca55862c
|
import requests
import re
from bs4 import BeautifulSoup
from contextlib import closing
from cloudbot import hook
# This will match ANY we url including youtube, reddit, twitch, etc... Some additional work needs to go into
# not sending the web request etc if the match also matches an existing web regex.
blacklist = re.compile('.*(reddit\.com|redd.it|youtube.com|youtu.be|spotify.com|twitter.com|twitch.tv|amazon.co|amzn.com|steamcommunity.com|steampowered.com|newegg.com).*', re.I)
url_re = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
opt_in = []
traditional = [
(1024 ** 5, 'PB'),
(1024 ** 4, 'TB'),
(1024 ** 3, 'GB'),
(1024 ** 2, 'MB'),
(1024 ** 1, 'KB'),
(1024 ** 0, 'B'),
]
def bytesto(bytes, system = traditional):
""" converts bytes to something """
bytes = int(bytes)
for factor, suffix in system:
if bytes >= factor:
break
amount = int(bytes/factor)
return str(amount) + suffix
@hook.regex(url_re)
def print_url_title(match, chan):
if chan not in opt_in:
return
if re.search(blacklist, match.group()):
return
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19'
}
with closing(requests.get(match.group(), headers = HEADERS, stream = True)) as r:
if not r.encoding:
content = r.headers['content-type']
size = bytesto(r.headers['content-length'])
out = "Content Type: \x02{}\x02 Size: \x02{}\x02".format(content, size)
return out
html = BeautifulSoup(r.text)
title = html.title.text.strip()
out = "Title: \x02{}\x02".format(title)
return out
|
bharaths/CloudBot
|
plugins/link_announcer.py
|
Python
|
gpl-3.0
| 1,826
|
[
"Galaxy"
] |
1cc606411afa0ab9d5c18c1311a9040ad1716c399759a338414259da9ea99580
|
################################################################
#
# kim_compare_lammps
#
################################################################
#
# Copyright 2018 the potfit development team
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the “Software”), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall
# be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE
# AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# https://www.potfit.net/
#
#################################################################
import logging
import sys
import tempfile
from asap import asap_run
from atom_config import atom_config
from compare import compare
from lammps import lammps_run
from pathlib import Path
from potfit import potfit_run
logger = logging.getLogger('kim_compare_lammps')
class testrun(object):
def __init__(self, potfit, lammps, have_asap, model, run_index, basedir):
self.potfit = potfit
self.model = model
self.config = atom_config(model)
self.basedir = basedir / 'run_{:03d}'.format(run_index)
Path.mkdir(self.basedir, exist_ok=True)
self.have_asap = have_asap
if self.have_asap:
self.asapdir = self.basedir / 'asap'
Path.mkdir(self.asapdir, exist_ok=True)
logger.info('Generating asap input data ...')
self.asap = asap_run(model, self.config, self.asapdir)
self.lammpsdir = self.basedir / 'lammps'
Path.mkdir(self.lammpsdir, exist_ok=True)
logger.info('Generating LAMMPS input data ...')
self.lammps = lammps_run(lammps, model, self.config, self.lammpsdir)
self.potfitdir = self.basedir / 'potfit'
Path.mkdir(self.potfitdir, exist_ok=True)
logger.info('Generating potfit input data ...')
self.potfit = potfit_run(potfit, model, self.config, self.potfitdir)
def run(self):
res = True
energies = []
forces = []
if self.have_asap:
try:
logger.info('Running ASAP calculation ...')
energy_a, forces_a = self.asap.run()
energies.append(energy_a)
forces.append(forces_a)
except:
pass
try:
logger.info('Running LAMMPS calculation ...')
energy_l, forces_l = self.lammps.run()
energies.append(energy_l)
forces.append(forces_l)
except:
pass
try:
logger.info('Running potfit calculation ...')
energy_p, forces_p = self.potfit.run()
energies.append(energy_p)
forces.append(forces_p)
except:
pass
try:
compare(energies, forces).run()
except Exception as e:
logger.error(e)
res = False
finally:
if self.have_asap:
self.asap.cleanup()
self.lammps.cleanup()
self.potfit.cleanup()
return res
if __name__ == '__main__':
print('Please do not run this script directly, use kim_compare_lammps.py instead!')
sys.exit(-1)
|
potfit/potfit
|
util/kim/kim_compare_lammps/testrun.py
|
Python
|
gpl-2.0
| 3,676
|
[
"LAMMPS"
] |
95b154dedfa5401af209b6d50af6c117f382353359cf164d84bd5d953fd0a00d
|
#! /usr/bin/python
"""
Runs Lastz
Written for Lastz v. 1.01.88.
usage: lastz_wrapper.py [options]
--ref_name: The reference name to change all output matches to
--ref_source: Whether the reference is cached or from the history
--source_select: Whether to used pre-set or cached reference file
--input1: The name of the reference file if using history or reference base name if using cached
--input2: The reads file to align
--ref_sequences: The number of sequences in the reference file if using one from history
--pre_set_options: Which of the pre set options to use, if using pre-sets
--strand: Which strand of the read to search, if specifying all parameters
--seed: Seeding settings, if specifying all parameters
--gfextend: Whether to perform gap-free extension of seed hits to HSPs (high scoring segment pairs), if specifying all parameters
--chain: Whether to perform chaining of HSPs, if specifying all parameters
--transition: Number of transitions to allow in each seed hit, if specifying all parameters
--O: Gap opening penalty, if specifying all parameters
--E: Gap extension penalty, if specifying all parameters
--X: X-drop threshold, if specifying all parameters
--Y: Y-drop threshold, if specifying all parameters
--K: Threshold for HSPs, if specifying all parameters
--L: Threshold for gapped alignments, if specifying all parameters
--entropy: Whether to involve entropy when filtering HSPs, if specifying all parameters
--identity_min: Minimum identity (don't report matches under this identity)
--identity_max: Maximum identity (don't report matches above this identity)
--coverage: The minimum coverage value (don't report matches covering less than this)
--out_format: The format of the output file (sam, diffs, or tabular (general))
--output: The name of the output file
--num_threads: The number of threads to run
--lastzSeqsFileDir: Directory of local lastz_seqs.loc file
"""
import optparse, os, subprocess, shutil, sys, tempfile, threading
from Queue import Queue
from galaxy import eggs
import pkg_resources
pkg_resources.require( 'bx-python' )
from bx.seq.twobit import *
from bx.seq.fasta import FastaReader
def stop_err( msg ):
sys.stderr.write( "%s" % msg )
sys.exit()
class LastzJobRunner( object ):
"""
Lastz job runner backed by a pool of "num_threads" worker threads. FIFO scheduling
"""
def __init__( self, num_threads, commands ):
"""Start the job runner with "num_threads" worker threads"""
# start workers
self.queue = Queue()
for command in commands:
self.queue.put( command )
self.threads = []
for i in range( num_threads ):
worker = threading.Thread( target=self.run_next )
worker.start()
self.threads.append( worker )
for worker in self.threads:
worker.join()
def run_next( self ):
"""Run the next command, waiting until one is available if necessary"""
while not self.queue.empty():
command = self.queue.get()
self.run_job( command )
def run_job( self, command ):
try:
proc = subprocess.Popen( args=command, shell=True )
proc.wait()
except Exception, e:
stop_err( "Error executing command (%s) - %s" % ( str( command ), str( e ) ) )
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '', '--ref_name', dest='ref_name', help='The reference name to change all output matches to' )
parser.add_option( '', '--ref_source', dest='ref_source', help='Whether the reference is cached or from the history' )
parser.add_option( '', '--ref_sequences', dest='ref_sequences', help='Number of sequences in the reference dataset' )
parser.add_option( '', '--source_select', dest='source_select', help='Whether to used pre-set or cached reference file' )
parser.add_option( '', '--input1', dest='input1', help='The name of the reference file if using history or reference base name if using cached' )
parser.add_option( '', '--input2', dest='input2', help='The reads file to align' )
parser.add_option( '', '--pre_set_options', dest='pre_set_options', help='Which of the pre set options to use, if using pre-sets' )
parser.add_option( '', '--strand', dest='strand', help='Which strand of the read to search, if specifying all parameters' )
parser.add_option( '', '--seed', dest='seed', help='Seeding settings, if specifying all parameters' )
parser.add_option( '', '--transition', dest='transition', help='Number of transitions to allow in each seed hit, if specifying all parameters' )
parser.add_option( '', '--gfextend', dest='gfextend', help='Whether to perform gap-free extension of seed hits to HSPs (high scoring segment pairs), if specifying all parameters' )
parser.add_option( '', '--chain', dest='chain', help='Whether to perform chaining of HSPs, if specifying all parameters' )
parser.add_option( '', '--O', dest='O', help='Gap opening penalty, if specifying all parameters' )
parser.add_option( '', '--E', dest='E', help='Gap extension penalty, if specifying all parameters' )
parser.add_option( '', '--X', dest='X', help='X-drop threshold, if specifying all parameters' )
parser.add_option( '', '--Y', dest='Y', help='Y-drop threshold, if specifying all parameters' )
parser.add_option( '', '--K', dest='K', help='Threshold for HSPs, if specifying all parameters' )
parser.add_option( '', '--L', dest='L', help='Threshold for gapped alignments, if specifying all parameters' )
parser.add_option( '', '--entropy', dest='entropy', help='Whether to involve entropy when filtering HSPs, if specifying all parameters' )
parser.add_option( '', '--identity_min', dest='identity_min', help="Minimum identity (don't report matches under this identity)" )
parser.add_option( '', '--identity_max', dest='identity_max', help="Maximum identity (don't report matches above this identity)" )
parser.add_option( '', '--coverage', dest='coverage', help="The minimum coverage value (don't report matches covering less than this)" )
parser.add_option( '', '--out_format', dest='format', help='The format of the output file (sam, diffs, or tabular (general))' )
parser.add_option( '', '--output', dest='output', help='The output file' )
parser.add_option( '', '--num_threads', dest='num_threads', help='The number of threads to run' )
parser.add_option( '', '--lastzSeqsFileDir', dest='lastzSeqsFileDir', help='Directory of local lastz_seqs.loc file' )
( options, args ) = parser.parse_args()
# If the reference sequences are from the history, temporary input files will be created
# ( 1 for each sequence ), and we'll keep track of them for later removal from disk ( by closing them )
tmp_in_file_names = []
# Each thread will create a temporary file to which it writes the output from lastz
tmp_out_file_names = []
# Execution of lastz based on job splitting
commands = []
if options.ref_name != 'None':
ref_name = '[nickname=%s]' % options.ref_name
else:
ref_name = ''
# Prepare for commonly-used preset options
if options.source_select == 'pre_set':
set_options = '--%s' % options.pre_set_options
# Prepare for user-specified options
else:
set_options = '--%s --%s --gapped --strand=%s --seed=%s --%s O=%s E=%s X=%s Y=%s K=%s L=%s --%s' % \
( options.gfextend, options.chain, options.strand, options.seed,
options.transition, options.O, options.E, options.X,
options.Y, options.K, options.L, options.entropy )
# Specify input2 and add [fullnames] modifier if output format is diffs
if options.format == 'diffs':
input2 = '%s[fullnames]' % options.input2
else:
input2 = options.input2
if options.format == 'tabular':
# Change output format to general if it's tabular and add field names for tabular output
format = 'general-'
tabular_fields = ':score,name1,strand1,size1,start1,zstart1,end1,length1,text1,name2,strand2,size2,start2,zstart2,end2,start2+,zstart2+,end2+,length2,text2,diff,cigar,identity,coverage,gaprate,diagonal,shingle'
elif options.format == 'sam':
# We currently ALWAYS suppress SAM headers.
format = 'sam-'
tabular_fields = ''
else:
format = options.format
tabular_fields = ''
if options.ref_source == 'history':
# Reference is a fasta dataset from the history, so split job across number of sequences in the dataset
try:
# Ensure there is at least 1 sequence in the dataset ( this may not be necessary ).
error_msg = "The reference dataset is missing metadata, click the pencil icon in the history item and 'auto-detect' the metadata attributes."
ref_sequences = int( options.ref_sequences )
if ref_sequences < 1:
stop_err( error_msg )
except:
stop_err( error_msg )
seqs = 0
fasta_reader = FastaReader( open( options.input1 ) )
while True:
# Read the next sequence from the reference dataset
seq = fasta_reader.next()
if not seq:
break
seqs += 1
# Create a temporary file to contain the current sequence as input to lastz
tmp_in = tempfile.NamedTemporaryFile( prefix=seq.name, suffix='.fasta' )
tmp_in_name = tmp_in.name
tmp_in.close()
tmp_in = file(tmp_in_name,'w+b')
# Keep track of our list of temporary input files so we can remove them later by closing them
tmp_in_file_names.append( tmp_in_name )
# Write the current sequence to the temporary input file
tmp_in.write( '>%s\n%s\n' % ( seq.name, seq.text ) )
tmp_in.close()
# Create a 2nd temporary file to contain the output from lastz execution on the current sequence
tmp_out = tempfile.NamedTemporaryFile( prefix='%s_out' % seq.name )
tmp_out_name = tmp_out.name
tmp_out.close()
# Keep track of our list of temporary output files so we can merge them into our output dataset
tmp_out_file_names.append( tmp_out_name )
# Generate the command line for calling lastz on the current sequence
command = 'lastz %s%s %s %s --ambiguousn --nolaj --identity=%s..%s --coverage=%s --format=%s%s > %s' % \
( tmp_in_name, ref_name, input2, set_options, options.identity_min,
options.identity_max, options.coverage, format, tabular_fields, tmp_out_name )
# Append the command line to our list of commands for sending to the LastzJobRunner queue
commands.append( command )
# Make sure the value of sequences in the metadata is the
# same as the number of sequences read from the dataset ( this may not be necessary ).
if ref_sequences != seqs:
stop_error( "The value of metadata.sequences (%d) differs from the number of sequences read from the reference ( %d)." % ( ref_sequences, seqs ) )
else:
# Reference is a locally cached 2bit file, split job across number of chroms in 2bit file
tbf = TwoBitFile( open( options.input1, 'r' ) )
for chrom in tbf.keys():
# Create a temporary file to contain the output from lastz execution on the current chrom
tmp_out = tempfile.NamedTemporaryFile( prefix='%s_out' % chrom )
tmp_out_name = tmp_out.name
tmp_out.close()
# Keep track of our list of temporary output files so we can merge them into our output dataset
tmp_out_file_names.append( tmp_out_name )
command = 'lastz %s/%s%s %s %s --ambiguousn --nolaj --identity=%s..%s --coverage=%s --format=%s%s >> %s' % \
( options.input1, chrom, ref_name, input2, set_options, options.identity_min,
options.identity_max, options.coverage, format, tabular_fields, tmp_out_name )
commands.append( command )
job_runner = LastzJobRunner( int( options.num_threads ), commands )
# Merge all of the output from lastz ( currently in temporary files ) into our output dataset
command = 'cat %s >> %s' % ( ' '.join( tmp_out_file_names ), options.output )
proc = subprocess.Popen( args=command, shell=True )
proc.wait()
# Remove all temporary files from disk by closing them
for name in tmp_in_file_names:
try:
os.remove( name )
except:
pass
for name in tmp_out_file_names:
try:
os.remove( name )
except:
pass
if __name__=="__main__": __main__()
|
volpino/Yeps-EURAC
|
tools/sr_mapping/lastz_wrapper.py
|
Python
|
mit
| 12,944
|
[
"Galaxy"
] |
580a4b06a87e361ac957835272bd042c782aca61f834a4a2a20aab12eddab1c7
|
# -*- coding: utf-8 -*-
"""General functions supporting pRF fitting."""
# Part of pyprf_feature library
# Copyright (C) 2018 Marian Schneider, Ingo Marquardt
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import os
import numpy as np
import scipy as sp
import nibabel as nb
def load_nii(strPathIn, varSzeThr=5000.0):
"""
Load nii file.
Parameters
----------
strPathIn : str
Path to nii file to load.
varSzeThr : float
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
Returns
-------
aryNii : np.array
Array containing nii data. 32 bit floating point precision.
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
Notes
-----
If the nii file is larger than the specified threshold (`varSzeThr`), the
file is loaded volume-by-volume in order to prevent memory overflow. The
reason for this is that nibabel imports data at float64 precision, which
can lead to a memory overflow even for relatively small files.
"""
# Load nii file (this does not load the data into memory yet):
objNii = nb.load(strPathIn)
# Get size of nii file:
varNiiSze = os.path.getsize(strPathIn)
# Convert to MB:
varNiiSze = np.divide(float(varNiiSze), 1000000.0)
# Load volume-by-volume or all at once, depending on file size:
if np.greater(varNiiSze, float(varSzeThr)):
# Load large nii file
print(('---------Large file size ('
+ str(np.around(varNiiSze))
+ ' MB), reading volume-by-volume'))
# Get image dimensions:
tplSze = objNii.shape
# Create empty array for nii data:
aryNii = np.zeros(tplSze, dtype=np.float32)
# Loop through volumes:
for idxVol in range(tplSze[3]):
aryNii[..., idxVol] = np.asarray(
objNii.dataobj[..., idxVol]).astype(np.float32)
else:
# Load small nii file
# Load nii file (this doesn't load the data into memory yet):
objNii = nb.load(strPathIn)
# Load data into array:
aryNii = np.asarray(objNii.dataobj).astype(np.float32)
# Get headers:
objHdr = objNii.header
# Get 'affine':
aryAff = objNii.affine
# Output nii data (as numpy array), header, and 'affine':
return aryNii, objHdr, aryAff
def load_res_prm(lstFunc, lstFlsMsk=None):
"""Load result parameters from multiple nii files, with optional mask.
Parameters
----------
lstFunc : list,
list of str with file names of 3D or 4D nii files
lstFlsMsk : list, optional
list of str with paths to 3D nii files that can act as mask/s
Returns
-------
lstPrmAry : list
The list will contain as many numpy arrays as masks were provided.
Each array is 2D with shape [nr voxel in mask, nr nii files in lstFunc]
objHdr : header object
Header of nii file.
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
"""
# load parameter/functional maps into a list
lstPrm = []
for ind, path in enumerate(lstFunc):
aryFnc = load_nii(path)[0].astype(np.float32)
if aryFnc.ndim == 3:
lstPrm.append(aryFnc)
# handle cases where nii array is 4D, in this case split arrays up in
# 3D arrays and appenbd those
elif aryFnc.ndim == 4:
for indAx in range(aryFnc.shape[-1]):
lstPrm.append(aryFnc[..., indAx])
# load mask/s if available
if lstFlsMsk is not None:
lstMsk = [None] * len(lstFlsMsk)
for ind, path in enumerate(lstFlsMsk):
aryMsk = load_nii(path)[0].astype(np.bool)
lstMsk[ind] = aryMsk
else:
print('------------No masks were provided')
if lstFlsMsk is None:
# if no mask was provided we just flatten all parameter array in list
# and return resulting list
lstPrmAry = [ary.flatten() for ary in lstPrm]
else:
# if masks are available, we loop over masks and then over parameter
# maps to extract selected voxels and parameters
lstPrmAry = [None] * len(lstFlsMsk)
for indLst, aryMsk in enumerate(lstMsk):
# prepare array that will hold parameter values of selected voxels
aryPrmSel = np.empty((np.sum(aryMsk), len(lstPrm)),
dtype=np.float32)
# loop over different parameter maps
for indAry, aryPrm in enumerate(lstPrm):
# get voxels specific to this mask
aryPrmSel[:, indAry] = aryPrm[aryMsk, ...]
# put array away in list, if only one parameter map was provided
# the output will be squeezed
lstPrmAry[indLst] = aryPrmSel
# also get header object and affine array
# we simply take it for the first functional nii file, cause that is the
# only file that has to be provided by necessity
objHdr, aryAff = load_nii(lstFunc[0])[1:]
return lstPrmAry, objHdr, aryAff
def export_nii(ary2dNii, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp, aryAff,
hdrMsk, outFormat='3D'):
"""
Export nii file(s).
Parameters
----------
ary2dNii : numpy array
Numpy array with results to be exported to nii.
lstNiiNames : list
List that contains strings with the complete file names.
aryLgcMsk : numpy array
If the nii file is larger than this threshold (in MB), the file is
loaded volume-by-volume in order to prevent memory overflow. Default
threshold is 1000 MB.
aryLgcVar : np.array
1D numpy array containing logical values. One value per voxel after
mask has been applied. If `True`, the variance and mean of the voxel's
time course are greater than the provided thresholds in all runs and
the voxel is included in the output array (`aryFunc`). If `False`, the
variance or mean of the voxel's time course is lower than threshold in
at least one run and the voxel has been excluded from the output
(`aryFunc`). This is to avoid problems in the subsequent model fitting.
This array is necessary to put results into original dimensions after
model fitting.
tplNiiShp : tuple
Tuple that describes the 3D shape of the output volume
aryAff : np.array
Array containing 'affine', i.e. information about spatial positioning
of nii data.
hdrMsk : nibabel-header-object
Nii header of mask.
outFormat : string, either '3D' or '4D'
String specifying whether images will be saved as seperate 3D nii
files or one 4D nii file
Notes
-----
[1] This function does not return any arrays but instead saves to disk.
[2] Depending on whether outFormat is '3D' or '4D' images will be saved as
seperate 3D nii files or one 4D nii file.
"""
# Number of voxels that were included in the mask:
varNumVoxMsk = np.sum(aryLgcMsk)
# Number of maps in ary2dNii
varNumMaps = ary2dNii.shape[-1]
# Place voxels based on low-variance exlusion:
aryPrfRes01 = np.zeros((varNumVoxMsk, varNumMaps), dtype=np.float32)
for indMap in range(varNumMaps):
aryPrfRes01[aryLgcVar, indMap] = ary2dNii[:, indMap]
# Total number of voxels:
varNumVoxTlt = (tplNiiShp[0] * tplNiiShp[1] * tplNiiShp[2])
# Place voxels based on mask-exclusion:
aryPrfRes02 = np.zeros((varNumVoxTlt, aryPrfRes01.shape[-1]),
dtype=np.float32)
for indDim in range(aryPrfRes01.shape[-1]):
aryPrfRes02[aryLgcMsk, indDim] = aryPrfRes01[:, indDim]
# Reshape pRF finding results into original image dimensions:
aryPrfRes = np.reshape(aryPrfRes02,
[tplNiiShp[0],
tplNiiShp[1],
tplNiiShp[2],
aryPrfRes01.shape[-1]])
if outFormat == '3D':
# Save nii results:
for idxOut in range(0, aryPrfRes.shape[-1]):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryPrfRes[..., idxOut],
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = lstNiiNames[idxOut]
nb.save(niiOut, strTmp)
elif outFormat == '4D':
# adjust header
hdrMsk.set_data_shape(aryPrfRes.shape)
# Create nii object for results:
niiOut = nb.Nifti1Image(aryPrfRes,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = lstNiiNames[0]
nb.save(niiOut, strTmp)
def joinRes(lstPrfRes, varPar, idxPos, inFormat='1D'):
"""Join results from different processing units (here cores).
Parameters
----------
lstPrfRes : list
Output of results from parallelization.
varPar : integer, positive
Number of cores that were used during parallelization
idxPos : integer, positive
List position index that we expect the results to be collected to have.
inFormat : string
Specifies whether input will be 1d or 2d.
Returns
-------
aryOut : numpy array
Numpy array with results collected from different cores
"""
if inFormat == '1D':
# initialize output array
aryOut = np.zeros((0,))
# gather arrays from different processing units
for idxRes in range(0, varPar):
aryOut = np.append(aryOut, lstPrfRes[idxRes][idxPos])
elif inFormat == '2D':
# initialize output array
aryOut = np.zeros((0, lstPrfRes[0][idxPos].shape[-1]))
# gather arrays from different processing units
for idxRes in range(0, varPar):
aryOut = np.concatenate((aryOut, lstPrfRes[idxRes][idxPos]),
axis=0)
return aryOut
def cmp_res_R2(lstRat, lstNiiNames, strPathOut, strPathMdl, lgcSveMdlTc=True,
lgcDel=False, strNmeExt=''):
""""Compare results for different exponents and create winner nii.
Parameters
----------
lstRat : list
List of floats containing the ratios that were tested for surround
suppression.
lstNiiNames : list
List of names of the different pRF maps (e.g. xpos, ypos, SD)
strPathOut : string
Path to the parent directory where the results should be saved.
strPathMdl : string
Path to the parent directory where pRF models should be saved.
lgcDel : boolean
Should model time courses be saved as npy file?
lgcDel : boolean
Should inbetween results (in form of nii files) be deleted?
strNmeExt : string
Extra name appendix to denominate experiment name. If undesidered,
provide empty string.
Notes
-----
[1] This function does not return any arrays but instead saves to disk.
"""
print('---Compare results for different ratios')
# Extract the index position for R2 and Betas map in lstNiiNames
indPosR2 = [ind for ind, item in enumerate(lstNiiNames) if 'R2' in item]
indPosBetas = [ind for ind, item in enumerate(lstNiiNames) if 'Betas' in
item]
# Check that only one index was found
msgError = 'More than one nii file was provided that could serve as R2 map'
assert len(indPosR2) == 1, msgError
assert len(indPosBetas) == 1, msgError
# turn list int index
indPosR2 = indPosR2[0]
indPosBetas = indPosBetas[0]
# Get the names of the nii files with in-between results
lstCmpRes = []
for indRat in range(len(lstRat)):
# Get strExpSve
strExpSve = '_' + str(lstRat[indRat])
# If ratio is marked with 1.0, set empty string to find results.
# 1.0 is the key for fitting without a surround.
if lstRat[indRat] == 1.0:
strExpSve = ''
# Create full path names from nii file names and output path
lstPthNames = [strPathOut + strNii + strNmeExt + strExpSve + '.nii.gz'
for strNii in lstNiiNames]
# Append list to list that contains nii names for all exponents
lstCmpRes.append(lstPthNames)
print('------Find ratio that yielded highest R2 per voxel')
# Initialize winner R2 map with R2 values from fit without surround
aryWnrR2 = load_nii(lstCmpRes[0][indPosR2])[0]
# Initialize ratio map with 1 where no-surround model was fit, otherwise 0
aryRatMap = np.zeros(aryWnrR2.shape)
aryRatMap[np.nonzero(aryWnrR2)] = 1.0
# Loop over R2 maps to establish which exponents wins
# Skip the first ratio, since this is the reference ratio (no surround)
# and is reflected already in the initialized arrays - aryWnrR2 & aryRatMap
for indRat, lstMaps in zip(lstRat[1:], lstCmpRes[1:]):
# Load R2 map for this particular exponent
aryTmpR2 = load_nii(lstMaps[indPosR2])[0]
# Load beta values for this particular exponent
aryTmpBetas = load_nii(lstMaps[indPosBetas])[0]
# Get logical that tells us where current R2 map is greater than
# previous ones
aryLgcWnr = np.greater(aryTmpR2, aryWnrR2)
# Get logical that tells us where the beta parameter estimate for the
# centre is positive and the estimate for the surround is negative
aryLgcCtrSur1 = np.logical_and(np.greater(aryTmpBetas[..., 0], 0.0),
np.less(aryTmpBetas[..., 1], 0.0))
# Get logical that tells us where the absolute beta parameter estimate
# for the surround is less than beta parameter estimate for the center
aryLgcCtrSur2 = np.less(np.abs(aryTmpBetas[..., 1]),
np.abs(aryTmpBetas[..., 0]))
# Combine the two logicals
aryLgcCtrSur = np.logical_and(aryLgcCtrSur1, aryLgcCtrSur2)
# Combine logical for winner R2 and center-surround conditions
aryLgcWnr = np.logical_and(aryLgcWnr, aryLgcCtrSur)
# Replace values of R2, where current R2 map was greater
aryWnrR2[aryLgcWnr] = np.copy(aryTmpR2[aryLgcWnr])
# Remember the index of the exponent that gave rise to this new R2
aryRatMap[aryLgcWnr] = indRat
# Initialize list with winner maps. The winner maps are initialized with
# the same shape as the maps that the last tested ratio maps had.
lstRatMap = []
for strPthMaps in lstCmpRes[-1]:
lstRatMap.append(np.zeros(nb.load(strPthMaps).shape))
# Compose other maps by assigning map value from the map that resulted from
# the exponent that won for particular voxel
for indRat, lstMaps in zip(lstRat, lstCmpRes):
# Find out where this exponent won in terms of R2
lgcWinnerMap = [aryRatMap == indRat][0]
# Loop over all the maps
for indMap, _ in enumerate(lstMaps):
# Load map for this particular ratio
aryTmpMap = load_nii(lstMaps[indMap])[0]
# Handle exception: beta map will be 1D, if from ratio 1.0
# In this case we want to make it 2D. In particular, the second
# set of beta weights should be all zeros, so that later when
# forming the model time course, the 2nd predictors contributes 0
if indRat == 1.0 and indMap == indPosBetas:
aryTmpMap = np.concatenate((aryTmpMap,
np.zeros(aryTmpMap.shape)),
axis=-1)
# Load current winner map from array
aryCrrWnrMap = np.copy(lstRatMap[indMap])
# Assign values in temporary map to current winner map for voxels
# where this ratio won
aryCrrWnrMap[lgcWinnerMap] = np.copy(aryTmpMap[lgcWinnerMap])
lstRatMap[indMap] = aryCrrWnrMap
print('------Export results as nii')
# Save winner maps as nii files
# Get header and affine array
hdrMsk, aryAff = load_nii(lstMaps[indPosR2])[1:]
# Loop over all the maps
for indMap, aryMap in enumerate(lstRatMap):
# Create nii object for results:
niiOut = nb.Nifti1Image(aryMap,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = strPathOut + '_supsur' + lstNiiNames[indMap] + strNmeExt + \
'.nii.gz'
nb.save(niiOut, strTmp)
# Save map with best ratios as nii
niiOut = nb.Nifti1Image(aryRatMap,
aryAff,
header=hdrMsk
)
# Save nii:
strTmp = strPathOut + '_supsur' + '_Ratios' + strNmeExt + '.nii.gz'
nb.save(niiOut, strTmp)
if lgcSveMdlTc:
print('------Save model time courses/parameters/responses for ' +
'centre and surround, across all ratios')
# Get the names of the npy files with inbetween model responses
lstCmpMdlRsp = []
for indRat in range(len(lstRat)):
# Get strExpSve
strExpSve = '_' + str(lstRat[indRat])
# If ratio is marked with 0, set empty string to find results.
# This is the code for fitting without a surround.
if lstRat[indRat] == 1.0:
strExpSve = ''
# Create full path names from npy file names and output path
lstPthNames = [strPathMdl + strNpy + strNmeExt + strExpSve + '.npy'
for strNpy in ['', '_params', '_mdlRsp']]
# Append list to list that contains nii names for all exponents
lstCmpMdlRsp.append(lstPthNames)
# Load tc/parameters/responses for different ratios, for now skip "0.0"
# ratio because its tc/parameters/responses differs in shape
lstPrfTcSur = []
lstMdlParamsSur = []
lstMdlRspSur = []
for indNpy, lstNpy in enumerate(lstCmpMdlRsp[1:]):
lstPrfTcSur.append(np.load(lstNpy[0]))
lstMdlParamsSur.append(np.load(lstNpy[1]))
lstMdlRspSur.append(np.load(lstNpy[2]))
# Turn into arrays
aryPrfTcSur = np.stack(lstPrfTcSur, axis=2)
aryMdlParamsSur = np.stack(lstMdlParamsSur, axis=2)
aryMdlRspSur = np.stack(lstMdlRspSur, axis=2)
# Now handle the "1.0" ratio
# Load the tc/parameters/responses of the "1.0" ratio
aryPrfTc = np.load(lstCmpMdlRsp[0][0])
aryMdlParams = np.load(lstCmpMdlRsp[0][1])
aryMdlRsp = np.load(lstCmpMdlRsp[0][2])
# Make 2nd row of time courses all zeros so they get no weight in lstsq
aryPrfTc = np.concatenate((aryPrfTc, np.zeros(aryPrfTc.shape)), axis=1)
# Make 2nd row of parameters the same as first row
aryMdlParams = np.stack((aryMdlParams, aryMdlParams), axis=1)
# Make 2nd row of responses all zeros so they get no weight in lstsq
aryMdlRsp = np.stack((aryMdlRsp, np.zeros(aryMdlRsp.shape)), axis=1)
# Add the "1.0" ratio to tc/parameters/responses of other ratios
aryPrfTcSur = np.concatenate((np.expand_dims(aryPrfTc, axis=2),
aryPrfTcSur), axis=2)
aryMdlParamsSur = np.concatenate((np.expand_dims(aryMdlParams, axis=2),
aryMdlParamsSur), axis=2)
aryMdlRspSur = np.concatenate((np.expand_dims(aryMdlRsp, axis=2),
aryMdlRspSur), axis=2)
# Save parameters/response for centre and surround, for all ratios
np.save(strPathMdl + '_supsur' + '', aryPrfTcSur)
np.save(strPathMdl + '_supsur' + '_params', aryMdlParamsSur)
np.save(strPathMdl + '_supsur' + '_mdlRsp', aryMdlRspSur)
# Delete all the inbetween results, if desired by user, skip "0.0" ratio
if lgcDel:
lstCmpRes = [item for sublist in lstCmpRes[1:] for item in sublist]
print('------Delete in-between results')
for strMap in lstCmpRes[:]:
os.remove(strMap)
if lgcSveMdlTc:
lstCmpMdlRsp = [item for sublist in lstCmpMdlRsp[1:] for item in
sublist]
for strMap in lstCmpMdlRsp[:]:
os.remove(strMap)
def map_crt_to_pol(aryXCrds, aryYrds):
"""Remap coordinates from cartesian to polar
Parameters
----------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
Returns
-------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
"""
aryRad = np.sqrt(aryXCrds**2+aryYrds**2)
aryTht = np.arctan2(aryYrds, aryXCrds)
return aryTht, aryRad
def map_pol_to_crt(aryTht, aryRad):
"""Remap coordinates from polar to cartesian
Parameters
----------
aryTht : 1D numpy array
Angle of coordinates
aryRad : 1D numpy array
Radius of coordinates.
Returns
-------
aryXCrds : 1D numpy array
Array with x coordinate values.
aryYrds : 1D numpy array
Array with y coordinate values.
"""
aryXCrds = aryRad * np.cos(aryTht)
aryYrds = aryRad * np.sin(aryTht)
return aryXCrds, aryYrds
def find_near_pol_ang(aryEmpPlrAng, aryExpPlrAng):
"""Return index of nearest expected polar angle.
Parameters
----------
aryEmpPlrAng : 1D numpy array
Empirically found polar angle estimates
aryExpPlrAng : 1D numpy array
Theoretically expected polar angle estimates
Returns
-------
aryXCrds : 1D numpy array
Indices of nearest theoretically expected polar angle.
aryYrds : 1D numpy array
Distances to nearest theoretically expected polar angle.
"""
dist = np.abs(np.subtract(aryEmpPlrAng[:, None],
aryExpPlrAng[None, :]))
return np.argmin(dist, axis=-1), np.min(dist, axis=-1)
def rmp_rng(aryVls, varNewMin, varNewMax, varOldThrMin=None,
varOldAbsMax=None):
"""Remap values in an array from one range to another.
Parameters
----------
aryVls : 1D numpy array
Array with values that need to be remapped.
varNewMin : float
Desired minimum value of new, remapped array.
varNewMax : float
Desired maximum value of new, remapped array.
varOldThrMin : float
Theoretical minimum of old distribution. Can be specified if this
theoretical minimum does not occur in empirical distribution but
should be considered nontheless.
varOldThrMin : float
Theoretical maximum of old distribution. Can be specified if this
theoretical maximum does not occur in empirical distribution but
should be considered nontheless.
Returns
-------
aryVls : 1D numpy array
Array with remapped values.
"""
if varOldThrMin is None:
varOldMin = aryVls.min()
else:
varOldMin = varOldThrMin
if varOldAbsMax is None:
varOldMax = aryVls.max()
else:
varOldMax = varOldAbsMax
aryNewVls = np.empty((aryVls.shape), dtype=aryVls.dtype)
for ind, val in enumerate(aryVls):
aryNewVls[ind] = (((val - varOldMin) * (varNewMax - varNewMin)) /
(varOldMax - varOldMin)) + varNewMin
return aryNewVls
def rmp_deg_pixel_xys(vecX, vecY, vecPrfSd, tplPngSize,
varExtXmin, varExtXmax, varExtYmin, varExtYmax):
"""Remap x, y, sigma parameters from degrees to pixel.
Parameters
----------
vecX : 1D numpy array
Array with possible x parametrs in degree
vecY : 1D numpy array
Array with possible y parametrs in degree
vecPrfSd : 1D numpy array
Array with possible sd parametrs in degree
tplPngSize : tuple, 2
Pixel dimensions of the visual space in pixel (width, height).
varExtXmin : float
Extent of visual space from centre in negative x-direction (width)
varExtXmax : float
Extent of visual space from centre in positive x-direction (width)
varExtYmin : float
Extent of visual space from centre in negative y-direction (height)
varExtYmax : float
Extent of visual space from centre in positive y-direction (height)
Returns
-------
vecX : 1D numpy array
Array with possible x parametrs in pixel
vecY : 1D numpy array
Array with possible y parametrs in pixel
vecPrfSd : 1D numpy array
Array with possible sd parametrs in pixel
"""
# Remap modelled x-positions of the pRFs:
vecXpxl = rmp_rng(vecX, 0.0, (tplPngSize[0] - 1), varOldThrMin=varExtXmin,
varOldAbsMax=varExtXmax)
# Remap modelled y-positions of the pRFs:
vecYpxl = rmp_rng(vecY, 0.0, (tplPngSize[1] - 1), varOldThrMin=varExtYmin,
varOldAbsMax=varExtYmax)
# We calculate the scaling factor from degrees of visual angle to
# pixels separately for the x- and the y-directions (the two should
# be the same).
varDgr2PixX = np.divide(tplPngSize[0], (varExtXmax - varExtXmin))
varDgr2PixY = np.divide(tplPngSize[1], (varExtYmax - varExtYmin))
# Check whether varDgr2PixX and varDgr2PixY are similar:
strErrMsg = 'ERROR. The ratio of X and Y dimensions in ' + \
'stimulus space (in degrees of visual angle) and the ' + \
'ratio of X and Y dimensions in the upsampled visual space' + \
'do not agree'
assert 0.5 > np.absolute((varDgr2PixX - varDgr2PixY)), strErrMsg
# Convert prf sizes from degrees of visual angles to pixel
vecPrfSdpxl = np.multiply(vecPrfSd, varDgr2PixX)
# Return new values in column stack.
# Since values are now in pixel, they should be integer
return np.column_stack((vecXpxl, vecYpxl, vecPrfSdpxl)).astype(np.int32)
def crt_2D_gauss(varSizeX, varSizeY, varPosX, varPosY, varSd):
"""Create 2D Gaussian kernel.
Parameters
----------
varSizeX : int, positive
Width of the visual field.
varSizeY : int, positive
Height of the visual field..
varPosX : int, positive
X position of centre of 2D Gauss.
varPosY : int, positive
Y position of centre of 2D Gauss.
varSd : float, positive
Standard deviation of 2D Gauss.
Returns
-------
aryGauss : 2d numpy array, shape [varSizeX, varSizeY]
2d Gaussian.
Reference
---------
[1] mathworld.wolfram.com/GaussianFunction.html
"""
varSizeX = int(varSizeX)
varSizeY = int(varSizeY)
# create x and y in meshgrid:
aryX, aryY = sp.mgrid[0:varSizeX, 0:varSizeY]
# The actual creation of the Gaussian array:
aryGauss = (
(np.square((aryX - varPosX)) + np.square((aryY - varPosY))) /
(2.0 * np.square(varSd))
)
aryGauss = np.exp(-aryGauss) / (2 * np.pi * np.square(varSd))
return aryGauss
def cnvl_2D_gauss(idxPrc, aryMdlParamsChnk, arySptExpInf, tplPngSize, queOut,
strCrd='crt'):
"""Spatially convolve input with 2D Gaussian model.
Parameters
----------
idxPrc : int
Process ID of the process calling this function (for CPU
multi-threading). In GPU version, this parameter is 0 (just one thread
on CPU).
aryMdlParamsChnk : 2d numpy array, shape [n_models, n_model_params]
Array with the model parameter combinations for this chunk.
arySptExpInf : 3d numpy array, shape [n_x_pix, n_y_pix, n_conditions]
All spatial conditions stacked along second axis.
tplPngSize : tuple, 2.
Pixel dimensions of the visual space (width, height).
queOut : multiprocessing.queues.Queue
Queue to put the results on. If this is None, the user is not running
multiprocessing but is just calling the function
strCrd, string, either 'crt' or 'pol'
Whether model parameters are provided in cartesian or polar coordinates
Returns
-------
data : 2d numpy array, shape [n_models, n_conditions]
Closed data.
Reference
---------
[1]
"""
# Number of combinations of model parameters in the current chunk:
varChnkSze = aryMdlParamsChnk.shape[0]
# Number of conditions / time points of the input data
varNumLstAx = arySptExpInf.shape[-1]
# Output array with results of convolution:
aryOut = np.zeros((varChnkSze, varNumLstAx))
# Loop through combinations of model parameters:
for idxMdl in range(0, varChnkSze):
# Spatial parameters of current model:
if strCrd == 'pol':
# Position was given in polar coordinates
varTmpEcc = aryMdlParamsChnk[idxMdl, 0]
varTmpPlrAng = aryMdlParamsChnk[idxMdl, 1]
# Convert from polar to to cartesian coordinates
varTmpX = varTmpEcc * np.cos(varTmpPlrAng) + tplPngSize[0]/2.
varTmpY = varTmpEcc * np.sin(varTmpPlrAng) + tplPngSize[1]/2.
elif strCrd == 'crt':
varTmpX = aryMdlParamsChnk[idxMdl, 0]
varTmpY = aryMdlParamsChnk[idxMdl, 1]
# Standard deviation does not depend on coordinate system
varTmpSd = aryMdlParamsChnk[idxMdl, 2]
# Create pRF model (2D):
aryGauss = crt_2D_gauss(tplPngSize[0],
tplPngSize[1],
varTmpX,
varTmpY,
varTmpSd)
# Multiply pixel-time courses with Gaussian pRF models:
aryCndTcTmp = np.multiply(arySptExpInf, aryGauss[:, :, None])
# Calculate sum across x- and y-dimensions - the 'area under the
# Gaussian surface'.
aryCndTcTmp = np.sum(aryCndTcTmp, axis=(0, 1))
# Put model time courses into function's output with 2d Gaussian
# arrray:
aryOut[idxMdl, :] = aryCndTcTmp
if queOut is None:
# if user is not using multiprocessing, return the array directly
return aryOut
else:
# Put column with the indices of model-parameter-combinations into the
# output array (in order to be able to put the pRF model time courses
# into the correct order after the parallelised function):
lstOut = [idxPrc,
aryOut]
# Put output to queue:
queOut.put(lstOut)
class cls_set_config(object):
"""
Set config parameters from dictionary into local namespace.
Parameters
----------
dicCnfg : dict
Dictionary containing parameter names (as keys) and parameter values
(as values). For example, `dicCnfg['varTr']` contains a float, such as
`2.94`.
"""
def __init__(self, dicCnfg):
"""Set config parameters from dictionary into local namespace."""
self.__dict__.update(dicCnfg)
|
MSchnei/py_pRF_motion
|
pyprf_feature/analysis/utils_general.py
|
Python
|
gpl-3.0
| 31,893
|
[
"Gaussian"
] |
9fbfcbb9df9a2b0225e419a9c066ec1eb237f6fb060ff161db731c23e994a011
|
#!/usr/bin/env python
"""
Converts SAM data to sorted BAM data.
usage: sam_to_bam.py [options]
--input1: SAM file to be converted
--index: path of the indexed reference genome
--ref_file: Reference file if choosing from history
--output1: output dataset in bam format
"""
import optparse, os, sys, subprocess, tempfile, shutil
def stop_err( msg ):
sys.stderr.write( '%s\n' % msg )
sys.exit()
def __main__():
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '', '--input1', dest='input1', help='The input SAM dataset' )
parser.add_option( '', '--threads', dest='threads', help='Number of threads' )
parser.add_option( '', '--index', dest='index', help='The path of the indexed reference genome' )
parser.add_option( '', '--ref_file', dest='ref_file', help='The reference dataset from the history' )
parser.add_option( '', '--output1', dest='output1', help='The output BAM dataset' )
( options, args ) = parser.parse_args()
# output version # of tool
try:
tmp = tempfile.NamedTemporaryFile().name
tmp_stdout = open( tmp, 'wb' )
proc = subprocess.Popen( args='samtools 2>&1', shell=True, stdout=tmp_stdout )
tmp_stdout.close()
returncode = proc.wait()
stdout = None
for line in open( tmp_stdout.name, 'rb' ):
if line.lower().find( 'version' ) >= 0:
stdout = line.strip()
break
if stdout:
sys.stdout.write( 'samtools %s\n' % stdout )
else:
raise Exception
except:
sys.stdout.write( 'Could not determine /opt/installed/samtools version\n' )
tmp_dir = tempfile.mkdtemp( dir='.' )
print tmp_dir
if not options.ref_file or options.ref_file == 'None':
# We're using locally cached reference sequences( e.g., /galaxy/data/equCab2/sam_index/equCab2.fa ).
# The indexes for /galaxy/data/equCab2/sam_index/equCab2.fa will be contained in
# a file named /galaxy/data/equCab2/sam_index/equCab2.fa.fai
fai_index_file_path = '%s.fai' % options.index
if not os.path.exists( fai_index_file_path ):
#clean up temp files
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
stop_err( 'Indexed genome %s not present, request it by reporting this error.' % options.index )
else:
try:
# Create indexes for history reference ( e.g., ~/database/files/000/dataset_1.dat ) using /opt/Genomics/ohsu/dnapipeline/samtools-0.1.19/samtools faidx, which will:
# - index reference sequence in the FASTA format or extract subsequence from indexed reference sequence
# - if no region is specified, faidx will index the file and create <ref.fasta>.fai on the disk
# - if regions are specified, the subsequences will be retrieved and printed to stdout in the FASTA format
# - the input file can be compressed in the RAZF format.
# IMPORTANT NOTE: a real weakness here is that we are creating indexes for the history dataset
# every time we run this tool. It would be nice if we could somehow keep track of user's specific
# index files so they could be re-used.
fai_index_file_base = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
# At this point, fai_index_file_path will look something like /tmp/dataset_13.dat
os.symlink( options.ref_file, fai_index_file_base )
fai_index_file_path = '%s.fai' % fai_index_file_base
command = 'samtools faidx %s' % fai_index_file_base
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=command, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
if os.path.getsize( fai_index_file_path ) == 0:
raise Exception, 'Index file empty, there may be an error with your reference file or settings.'
except Exception, e:
#clean up temp files
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
stop_err( 'Error creating indexes from reference (%s), %s' % ( options.ref_file, str( e ) ) )
try:
# Extract all alignments from the input SAM file to BAM format ( since no region is specified, all the alignments will be extracted ).
tmp_aligns_file = tempfile.NamedTemporaryFile( dir=tmp_dir )
tmp_aligns_file_name = tmp_aligns_file.name
tmp_aligns_file.close()
command = 'samtools view -bt %s -@ %s -o %s -S %s' % ( fai_index_file_path, options.threads, tmp_aligns_file_name, options.input1 )
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
print command
proc = subprocess.Popen( args=command, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
#clean up temp files
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
stop_err( 'Error extracting alignments from (%s), %s' % ( options.input1, str( e ) ) )
try:
# Sort alignments by leftmost coordinates. File <out.prefix>.bam will be created. This command
# may also create temporary files <out.prefix>.%d.bam when the whole alignment cannot be fitted
# into memory ( controlled by option -m ).
tmp_sorted_aligns_file = tempfile.NamedTemporaryFile( dir=tmp_dir )
tmp_sorted_aligns_file_name = tmp_sorted_aligns_file.name
tmp_sorted_aligns_file.close()
command = 'samtools sort -@ %s -l 5 %s %s' % ( options.threads, tmp_aligns_file_name, tmp_sorted_aligns_file_name )
tmp = tempfile.NamedTemporaryFile( dir=tmp_dir ).name
tmp_stderr = open( tmp, 'wb' )
proc = subprocess.Popen( args=command, shell=True, cwd=tmp_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
tmp_stderr.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open( tmp, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
tmp_stderr.close()
if returncode != 0:
raise Exception, stderr
except Exception, e:
#clean up temp files
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
stop_err( 'Error sorting alignments from (%s), %s' % ( tmp_aligns_file_name, str( e ) ) )
# Move tmp_aligns_file_name to our output dataset location
sorted_bam_file = '%s.bam' % tmp_sorted_aligns_file_name
shutil.move( sorted_bam_file, options.output1 )
#clean up temp files
if os.path.exists( tmp_dir ):
shutil.rmtree( tmp_dir )
# check that there are results in the output file
if os.path.getsize( options.output1 ) > 0:
sys.stdout.write( 'SAM file converted to BAM' )
else:
stop_err( 'Error creating sorted version of BAM file.' )
if __name__=="__main__": __main__()
|
jhl667/galaxy_tools
|
tools/sam_to_bam/sam_to_bam.py
|
Python
|
apache-2.0
| 8,364
|
[
"Galaxy"
] |
13b52255e77ce53312f8e05d224b508fcb344bcd3e1e535229c5b88354cb7467
|
from insights.tests import context_wrap
from insights.parsers import tomcat_xml
from insights.parsers.tomcat_xml import TomcatWebXml
from insights.parsers.tomcat_xml import TomcatServerXml
import doctest
web_xml_content = """
<?xml version="1.0" encoding="ISO-8859-1"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<web-app xmlns="http://java.sun.com/xml/ns/javaee"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
version="2.5">
<!-- ======================== Introduction ============================== -->
<!-- This document defines default values for *all* web applications -->
<!-- loaded into this instance of Tomcat. As each application is -->
<!-- deployed, this file is processed, followed by the -->
<!-- "/WEB-INF/web.xml" deployment descriptor from your own -->
<!-- applications. -->
<!-- -->
<!-- WARNING: Do not configure application-specific resources here! -->
<!-- They should go in the "/WEB-INF/web.xml" file in your application. -->
<!-- ================== Built In Servlet Definitions ==================== -->
<!-- The default servlet for all web applications, that serves static -->
<!-- resources. It processes all requests that are not mapped to other -->
<!-- servlets with servlet mappings (defined either here or in your own -->
<!-- web.xml file. This servlet supports the following initialization -->
<!-- parameters (default values are in square brackets): -->
<!-- -->
<!-- debug Debugging detail level for messages logged -->
<!-- by this servlet. [0] -->
<!-- -->
<!-- fileEncoding Encoding to be used to read static resources -->
<!-- [platform default] -->
<!-- -->
<!-- input Input buffer size (in bytes) when reading -->
<!-- resources to be served. [2048] -->
<!-- -->
<!-- listings Should directory listings be produced if there -->
<!-- is no welcome file in this directory? [false] -->
<!-- WARNING: Listings for directories with many -->
<!-- entries can be slow and may consume -->
<!-- significant proportions of server resources. -->
<!-- -->
<!-- output Output buffer size (in bytes) when writing -->
<!-- resources to be served. [2048] -->
<!-- -->
<!-- readonly Is this context "read only", so HTTP -->
<!-- commands like PUT and DELETE are -->
<!-- rejected? [true] -->
<!-- -->
<!-- readmeFile File name to display with the directory -->
<!-- contents. [null] -->
<!-- -->
<!-- sendfileSize If the connector used supports sendfile, this -->
<!-- represents the minimal file size in KB for -->
<!-- which sendfile will be used. Use a negative -->
<!-- value to always disable sendfile. [48] -->
<!-- -->
<!-- useAcceptRanges Should the Accept-Ranges header be included -->
<!-- in responses where appropriate? [true] -->
<!-- -->
<!-- For directory listing customization. Checks localXsltFile, then -->
<!-- globalXsltFile, then defaults to original behavior. -->
<!-- -->
<!-- localXsltFile Make directory listings an XML doc and -->
<!-- pass the result to this style sheet residing -->
<!-- in that directory. This overrides -->
<!-- globalXsltFile[null] -->
<!-- -->
<!-- globalXsltFile Site wide configuration version of -->
<!-- localXsltFile. This argument must either -->
<!-- an aboslute or relative ( to either -->
<!-- CATALINA_BASE/conf or CATALINA_HOME/conf -->
<!-- path that points to the location below -->
<!-- CATALINA_BASE/conf (checked first) or -->
<!-- $CATLINA_HOME/conf (checked second). -->
<!-- -->
<!-- -->
<servlet>
<servlet-name>default</servlet-name>
<servlet-class>org.apache.catalina.servlets.DefaultServlet</servlet-class>
<init-param>
<param-name>debug</param-name>
<param-value>0</param-value>
</init-param>
<init-param>
<param-name>listings</param-name>
<param-value>false</param-value>
</init-param>
<load-on-startup>1</load-on-startup>
</servlet>
<!-- This servlet has been deprecated due to security concerns. Servlets -->
<!-- should be explicitly mapped in web.xml -->
<!-- -->
<!-- The "invoker" servlet, which executes anonymous servlet classes -->
<!-- that have not been defined in a web.xml file. Traditionally, this -->
<!-- servlet is mapped to the URL pattern "/servlet/*", but you can map -->
<!-- it to other patterns as well. The extra path info portion of such a -->
<!-- request must be the fully qualified class name of a Java class that -->
<!-- implements Servlet (or extends HttpServlet), or the servlet name -->
<!-- of an existing servlet definition. This servlet supports the -->
<!-- following initialization parameters (default values are in square -->
<!-- brackets): -->
<!-- -->
<!-- debug Debugging detail level for messages logged -->
<!-- by this servlet. [0] -->
<!--
<servlet>
<servlet-name>invoker</servlet-name>
<servlet-class>
org.apache.catalina.servlets.InvokerServlet
</servlet-class>
<init-param>
<param-name>debug</param-name>
<param-value>0</param-value>
</init-param>
<load-on-startup>2</load-on-startup>
</servlet>
-->
<!-- The JSP page compiler and execution servlet, which is the mechanism -->
<!-- used by Tomcat to support JSP pages. Traditionally, this servlet -->
<!-- is mapped to the URL pattern "*.jsp". This servlet supports the -->
<!-- following initialization parameters (default values are in square -->
<!-- brackets): -->
<!-- -->
<!-- checkInterval If development is false and checkInterval is -->
<!-- greater than zero, background compilations are -->
<!-- enabled. checkInterval is the time in seconds -->
<!-- between checks to see if a JSP page (and its -->
<!-- dependent files) needs to be recompiled. [0] -->
<!-- -->
<!-- classdebuginfo Should the class file be compiled with -->
<!-- debugging information? [true] -->
<!-- -->
<!-- classpath What class path should I use while compiling -->
<!-- generated servlets? [Created dynamically -->
<!-- based on the current web application] -->
<!-- -->
<!-- compiler Which compiler Ant should use to compile JSP -->
<!-- pages. See the jasper documentation for more -->
<!-- information. -->
<!-- -->
<!-- compilerSourceVM Compiler source VM. [1.5] -->
<!-- -->
<!-- compilerTargetVM Compiler target VM. [1.5] -->
<!-- -->
<!-- development Is Jasper used in development mode? If true, -->
<!-- the frequency at which JSPs are checked for -->
<!-- modification may be specified via the -->
<!-- modificationTestInterval parameter. [true] -->
<!-- -->
<!-- displaySourceFragment -->
<!-- Should a source fragment be included in -->
<!-- exception messages? [true] -->
<!-- -->
<!-- dumpSmap Should the SMAP info for JSR45 debugging be -->
<!-- dumped to a file? [false] -->
<!-- False if suppressSmap is true -->
<!-- -->
<!-- enablePooling Determines whether tag handler pooling is -->
<!-- enabled. This is a compilation option. It will -->
<!-- not alter the behaviour of JSPs that have -->
<!-- already been compiled. [true] -->
<!-- -->
<!-- engineOptionsClass Allows specifying the Options class used to -->
<!-- configure Jasper. If not present, the default -->
<!-- EmbeddedServletOptions will be used. -->
<!-- -->
<!-- errorOnUseBeanInvalidClassAttribute -->
<!-- Should Jasper issue an error when the value of -->
<!-- the class attribute in an useBean action is -->
<!-- not a valid bean class? [true] -->
<!-- -->
<!-- fork Tell Ant to fork compiles of JSP pages so that -->
<!-- a separate JVM is used for JSP page compiles -->
<!-- from the one Tomcat is running in. [true] -->
<!-- -->
<!-- genStrAsCharArray Should text strings be generated as char -->
<!-- arrays, to improve performance in some cases? -->
<!-- [false] -->
<!-- -->
<!-- ieClassId The class-id value to be sent to Internet -->
<!-- Explorer when using <jsp:plugin> tags. -->
<!-- [clsid:8AD9C840-044E-11D1-B3E9-00805F499D93] -->
<!-- -->
<!-- javaEncoding Java file encoding to use for generating java -->
<!-- source files. [UTF8] -->
<!-- -->
<!-- keepgenerated Should we keep the generated Java source code -->
<!-- for each page instead of deleting it? [true] -->
<!-- -->
<!-- mappedfile Should we generate static content with one -->
<!-- print statement per input line, to ease -->
<!-- debugging? [true] -->
<!-- -->
<!-- modificationTestInterval -->
<!-- Causes a JSP (and its dependent files) to not -->
<!-- be checked for modification during the -->
<!-- specified time interval (in seconds) from the -->
<!-- last time the JSP was checked for -->
<!-- modification. A value of 0 will cause the JSP -->
<!-- to be checked on every access. -->
<!-- Used in development mode only. [4] -->
<!-- -->
<!-- scratchdir What scratch directory should we use when -->
<!-- compiling JSP pages? [default work directory -->
<!-- for the current web application] -->
<!-- -->
<!-- suppressSmap Should the generation of SMAP info for JSR45 -->
<!-- debugging be suppressed? [false] -->
<!-- -->
<!-- trimSpaces Should white spaces in template text between -->
<!-- actions or directives be trimmed? [false] -->
<!-- -->
<!-- xpoweredBy Determines whether X-Powered-By response -->
<!-- header is added by generated servlet [false] -->
<!-- -->
<!-- If you wish to use Jikes to compile JSP pages: -->
<!-- Please see the "Using Jikes" section of the Jasper-HowTo -->
<!-- page in the Tomcat documentation. -->
<servlet>
<servlet-name>jsp</servlet-name>
<servlet-class>org.apache.jasper.servlet.JspServlet</servlet-class>
<init-param>
<param-name>fork</param-name>
<param-value>false</param-value>
</init-param>
<init-param>
<param-name>xpoweredBy</param-name>
<param-value>false</param-value>
</init-param>
<init-param>
<param-name>development</param-name>
<param-value>false</param-value>
</init-param>
<load-on-startup>3</load-on-startup>
</servlet>
<!-- NOTE: An SSI Filter is also available as an alternative SSI -->
<!-- implementation. Use either the Servlet or the Filter but NOT both. -->
<!-- -->
<!-- Server Side Includes processing servlet, which processes SSI -->
<!-- directives in HTML pages consistent with similar support in web -->
<!-- servers like Apache. Traditionally, this servlet is mapped to the -->
<!-- URL pattern "*.shtml". This servlet supports the following -->
<!-- initialization parameters (default values are in square brackets): -->
<!-- -->
<!-- buffered Should output from this servlet be buffered? -->
<!-- (0=false, 1=true) [0] -->
<!-- -->
<!-- debug Debugging detail level for messages logged -->
<!-- by this servlet. [0] -->
<!-- -->
<!-- expires The number of seconds before a page with SSI -->
<!-- directives will expire. [No default] -->
<!-- -->
<!-- isVirtualWebappRelative -->
<!-- Should "virtual" paths be interpreted as -->
<!-- relative to the context root, instead of -->
<!-- the server root? (0=false, 1=true) [0] -->
<!-- -->
<!-- inputEncoding The encoding to assume for SSI resources if -->
<!-- one is not available from the resource. -->
<!-- [Platform default] -->
<!-- -->
<!-- outputEncoding The encoding to use for the page that results -->
<!-- from the SSI processing. [UTF-8] -->
<!--
<servlet>
<servlet-name>ssi</servlet-name>
<servlet-class>
org.apache.catalina.ssi.SSIServlet
</servlet-class>
<init-param>
<param-name>buffered</param-name>
<param-value>1</param-value>
</init-param>
<init-param>
<param-name>debug</param-name>
<param-value>0</param-value>
</init-param>
<init-param>
<param-name>expires</param-name>
<param-value>666</param-value>
</init-param>
<init-param>
<param-name>isVirtualWebappRelative</param-name>
<param-value>0</param-value>
</init-param>
<load-on-startup>4</load-on-startup>
</servlet>
-->
<!-- Common Gateway Includes (CGI) processing servlet, which supports -->
<!-- execution of external applications that conform to the CGI spec -->
<!-- requirements. Typically, this servlet is mapped to the URL pattern -->
<!-- "/cgi-bin/*", which means that any CGI applications that are -->
<!-- executed must be present within the web application. This servlet -->
<!-- supports the following initialization parameters (default values -->
<!-- are in square brackets): -->
<!-- -->
<!-- cgiPathPrefix The CGI search path will start at -->
<!-- webAppRootDir + File.separator + this prefix. -->
<!-- [WEB-INF/cgi] -->
<!-- -->
<!-- debug Debugging detail level for messages logged -->
<!-- by this servlet. [0] -->
<!-- -->
<!-- executable Name of the executable used to run the -->
<!-- script. [perl] -->
<!-- -->
<!-- parameterEncoding Name of parameter encoding to be used with -->
<!-- CGI servlet. -->
<!-- [System.getProperty("file.encoding","UTF-8")] -->
<!-- -->
<!-- passShellEnvironment Should the shell environment variables (if -->
<!-- any) be passed to the CGI script? [false] -->
<!-- -->
<!-- stderrTimeout The time (in milliseconds) to wait for the -->
<!-- reading of stderr to complete before -->
<!-- terminating the CGI process. [2000] -->
<!--
<servlet>
<servlet-name>cgi</servlet-name>
<servlet-class>org.apache.catalina.servlets.CGIServlet</servlet-class>
<init-param>
<param-name>debug</param-name>
<param-value>0</param-value>
</init-param>
<init-param>
<param-name>cgiPathPrefix</param-name>
<param-value>WEB-INF/cgi</param-value>
</init-param>
<load-on-startup>5</load-on-startup>
</servlet>
-->
<!-- ================ Built In Servlet Mappings ========================= -->
<!-- The servlet mappings for the built in servlets defined above. Note -->
<!-- that, by default, the CGI and SSI servlets are *not* mapped. You -->
<!-- must uncomment these mappings (or add them to your application's own -->
<!-- web.xml deployment descriptor) to enable these services -->
<!-- The mapping for the default servlet -->
<servlet-mapping>
<servlet-name>default</servlet-name>
<url-pattern>/</url-pattern>
</servlet-mapping>
<!-- The mapping for the deprecated invoker servlet -->
<!--
<servlet-mapping>
<servlet-name>invoker</servlet-name>
<url-pattern>/servlet/*</url-pattern>
</servlet-mapping>
-->
<!-- The mapping for the JSP servlet -->
<servlet-mapping>
<servlet-name>jsp</servlet-name>
<url-pattern>*.jsp</url-pattern>
</servlet-mapping>
<servlet-mapping>
<servlet-name>jsp</servlet-name>
<url-pattern>*.jspx</url-pattern>
</servlet-mapping>
<!-- The mapping for the SSI servlet -->
<!--
<servlet-mapping>
<servlet-name>ssi</servlet-name>
<url-pattern>*.shtml</url-pattern>
</servlet-mapping>
-->
<!-- The mapping for the CGI Gateway servlet -->
<!--
<servlet-mapping>
<servlet-name>cgi</servlet-name>
<url-pattern>/cgi-bin/*</url-pattern>
</servlet-mapping>
-->
<!-- ================== Built In Filter Definitions ===================== -->
<!-- NOTE: An SSI Servlet is also available as an alternative SSI -->
<!-- implementation. Use either the Servlet or the Filter but NOT both. -->
<!-- -->
<!-- Server Side Includes processing filter, which processes SSI -->
<!-- directives in HTML pages consistent with similar support in web -->
<!-- servers like Apache. Traditionally, this filter is mapped to the -->
<!-- URL pattern "*.shtml", though it can be mapped to "*" as it will -->
<!-- selectively enable/disable SSI processing based on mime types. For -->
<!-- this to work you will need to uncomment the .shtml mime type -->
<!-- definition towards the bottom of this file. -->
<!-- The contentType init param allows you to apply SSI processing to JSP -->
<!-- pages, javascript, or any other content you wish. This filter -->
<!-- supports the following initialization parameters (default values are -->
<!-- in square brackets): -->
<!-- -->
<!-- contentType A regex pattern that must be matched before -->
<!-- SSI processing is applied. -->
<!-- [text/x-server-parsed-html(;.*)?] -->
<!-- -->
<!-- debug Debugging detail level for messages logged -->
<!-- by this servlet. [0] -->
<!-- -->
<!-- expires The number of seconds before a page with SSI -->
<!-- directives will expire. [No default] -->
<!-- -->
<!-- isVirtualWebappRelative -->
<!-- Should "virtual" paths be interpreted as -->
<!-- relative to the context root, instead of -->
<!-- the server root? (0=false, 1=true) [0] -->
<!--
<filter>
<filter-name>ssi</filter-name>
<filter-class>
org.apache.catalina.ssi.SSIFilter
</filter-class>
<init-param>
<param-name>contentType</param-name>
<param-value>text/x-server-parsed-html(;.*)?</param-value>
</init-param>
<init-param>
<param-name>debug</param-name>
<param-value>0</param-value>
</init-param>
<init-param>
<param-name>expires</param-name>
<param-value>666</param-value>
</init-param>
<init-param>
<param-name>isVirtualWebappRelative</param-name>
<param-value>0</param-value>
</init-param>
</filter>
-->
<!-- ==================== Built In Filter Mappings ====================== -->
<!-- The mapping for the SSI Filter -->
<!--
<filter-mapping>
<filter-name>ssi</filter-name>
<url-pattern>*.shtml</url-pattern>
</filter-mapping>
-->
<!-- ==================== Default Session Configuration ================= -->
<!-- You can set the default session timeout (in minutes) for all newly -->
<!-- created sessions by modifying the value below. -->
<session-config>
<session-timeout>30</session-timeout>
</session-config>
<!-- ===================== Default MIME Type Mappings =================== -->
<!-- When serving static resources, Tomcat will automatically generate -->
<!-- a "Content-Type" header based on the resource's filename extension, -->
<!-- based on these mappings. Additional mappings can be added here (to -->
<!-- apply to all web applications), or in your own application's web.xml -->
<!-- deployment descriptor. -->
<mime-mapping>
<extension>abs</extension>
<mime-type>audio/x-mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ai</extension>
<mime-type>application/postscript</mime-type>
</mime-mapping>
<mime-mapping>
<extension>aif</extension>
<mime-type>audio/x-aiff</mime-type>
</mime-mapping>
<mime-mapping>
<extension>aifc</extension>
<mime-type>audio/x-aiff</mime-type>
</mime-mapping>
<mime-mapping>
<extension>aiff</extension>
<mime-type>audio/x-aiff</mime-type>
</mime-mapping>
<mime-mapping>
<extension>aim</extension>
<mime-type>application/x-aim</mime-type>
</mime-mapping>
<mime-mapping>
<extension>art</extension>
<mime-type>image/x-jg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>asf</extension>
<mime-type>video/x-ms-asf</mime-type>
</mime-mapping>
<mime-mapping>
<extension>asx</extension>
<mime-type>video/x-ms-asf</mime-type>
</mime-mapping>
<mime-mapping>
<extension>au</extension>
<mime-type>audio/basic</mime-type>
</mime-mapping>
<mime-mapping>
<extension>avi</extension>
<mime-type>video/x-msvideo</mime-type>
</mime-mapping>
<mime-mapping>
<extension>avx</extension>
<mime-type>video/x-rad-screenplay</mime-type>
</mime-mapping>
<mime-mapping>
<extension>bcpio</extension>
<mime-type>application/x-bcpio</mime-type>
</mime-mapping>
<mime-mapping>
<extension>bin</extension>
<mime-type>application/octet-stream</mime-type>
</mime-mapping>
<mime-mapping>
<extension>bmp</extension>
<mime-type>image/bmp</mime-type>
</mime-mapping>
<mime-mapping>
<extension>body</extension>
<mime-type>text/html</mime-type>
</mime-mapping>
<mime-mapping>
<extension>cdf</extension>
<mime-type>application/x-cdf</mime-type>
</mime-mapping>
<mime-mapping>
<extension>cer</extension>
<mime-type>application/x-x509-ca-cert</mime-type>
</mime-mapping>
<mime-mapping>
<extension>class</extension>
<mime-type>application/java</mime-type>
</mime-mapping>
<mime-mapping>
<extension>cpio</extension>
<mime-type>application/x-cpio</mime-type>
</mime-mapping>
<mime-mapping>
<extension>csh</extension>
<mime-type>application/x-csh</mime-type>
</mime-mapping>
<mime-mapping>
<extension>css</extension>
<mime-type>text/css</mime-type>
</mime-mapping>
<mime-mapping>
<extension>dib</extension>
<mime-type>image/bmp</mime-type>
</mime-mapping>
<mime-mapping>
<extension>doc</extension>
<mime-type>application/msword</mime-type>
</mime-mapping>
<mime-mapping>
<extension>dtd</extension>
<mime-type>application/xml-dtd</mime-type>
</mime-mapping>
<mime-mapping>
<extension>dv</extension>
<mime-type>video/x-dv</mime-type>
</mime-mapping>
<mime-mapping>
<extension>dvi</extension>
<mime-type>application/x-dvi</mime-type>
</mime-mapping>
<mime-mapping>
<extension>eps</extension>
<mime-type>application/postscript</mime-type>
</mime-mapping>
<mime-mapping>
<extension>etx</extension>
<mime-type>text/x-setext</mime-type>
</mime-mapping>
<mime-mapping>
<extension>exe</extension>
<mime-type>application/octet-stream</mime-type>
</mime-mapping>
<mime-mapping>
<extension>gif</extension>
<mime-type>image/gif</mime-type>
</mime-mapping>
<mime-mapping>
<extension>gtar</extension>
<mime-type>application/x-gtar</mime-type>
</mime-mapping>
<mime-mapping>
<extension>gz</extension>
<mime-type>application/x-gzip</mime-type>
</mime-mapping>
<mime-mapping>
<extension>hdf</extension>
<mime-type>application/x-hdf</mime-type>
</mime-mapping>
<mime-mapping>
<extension>hqx</extension>
<mime-type>application/mac-binhex40</mime-type>
</mime-mapping>
<mime-mapping>
<extension>htc</extension>
<mime-type>text/x-component</mime-type>
</mime-mapping>
<mime-mapping>
<extension>htm</extension>
<mime-type>text/html</mime-type>
</mime-mapping>
<mime-mapping>
<extension>html</extension>
<mime-type>text/html</mime-type>
</mime-mapping>
<mime-mapping>
<extension>hqx</extension>
<mime-type>application/mac-binhex40</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ief</extension>
<mime-type>image/ief</mime-type>
</mime-mapping>
<mime-mapping>
<extension>jad</extension>
<mime-type>text/vnd.sun.j2me.app-descriptor</mime-type>
</mime-mapping>
<mime-mapping>
<extension>jar</extension>
<mime-type>application/java-archive</mime-type>
</mime-mapping>
<mime-mapping>
<extension>java</extension>
<mime-type>text/plain</mime-type>
</mime-mapping>
<mime-mapping>
<extension>jnlp</extension>
<mime-type>application/x-java-jnlp-file</mime-type>
</mime-mapping>
<mime-mapping>
<extension>jpe</extension>
<mime-type>image/jpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>jpeg</extension>
<mime-type>image/jpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>jpg</extension>
<mime-type>image/jpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>js</extension>
<mime-type>text/javascript</mime-type>
</mime-mapping>
<mime-mapping>
<extension>jsf</extension>
<mime-type>text/plain</mime-type>
</mime-mapping>
<mime-mapping>
<extension>jspf</extension>
<mime-type>text/plain</mime-type>
</mime-mapping>
<mime-mapping>
<extension>kar</extension>
<mime-type>audio/x-midi</mime-type>
</mime-mapping>
<mime-mapping>
<extension>latex</extension>
<mime-type>application/x-latex</mime-type>
</mime-mapping>
<mime-mapping>
<extension>m3u</extension>
<mime-type>audio/x-mpegurl</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mac</extension>
<mime-type>image/x-macpaint</mime-type>
</mime-mapping>
<mime-mapping>
<extension>man</extension>
<mime-type>application/x-troff-man</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mathml</extension>
<mime-type>application/mathml+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>me</extension>
<mime-type>application/x-troff-me</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mid</extension>
<mime-type>audio/x-midi</mime-type>
</mime-mapping>
<mime-mapping>
<extension>midi</extension>
<mime-type>audio/x-midi</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mif</extension>
<mime-type>application/x-mif</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mov</extension>
<mime-type>video/quicktime</mime-type>
</mime-mapping>
<mime-mapping>
<extension>movie</extension>
<mime-type>video/x-sgi-movie</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mp1</extension>
<mime-type>audio/x-mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mp2</extension>
<mime-type>audio/x-mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mp3</extension>
<mime-type>audio/x-mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mp4</extension>
<mime-type>video/mp4</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mpa</extension>
<mime-type>audio/x-mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mpe</extension>
<mime-type>video/mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mpeg</extension>
<mime-type>video/mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mpega</extension>
<mime-type>audio/x-mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mpg</extension>
<mime-type>video/mpeg</mime-type>
</mime-mapping>
<mime-mapping>
<extension>mpv2</extension>
<mime-type>video/mpeg2</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ms</extension>
<mime-type>application/x-wais-source</mime-type>
</mime-mapping>
<mime-mapping>
<extension>nc</extension>
<mime-type>application/x-netcdf</mime-type>
</mime-mapping>
<mime-mapping>
<extension>oda</extension>
<mime-type>application/oda</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Database -->
<extension>odb</extension>
<mime-type>application/vnd.oasis.opendocument.database</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Chart -->
<extension>odc</extension>
<mime-type>application/vnd.oasis.opendocument.chart</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Formula -->
<extension>odf</extension>
<mime-type>application/vnd.oasis.opendocument.formula</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Drawing -->
<extension>odg</extension>
<mime-type>application/vnd.oasis.opendocument.graphics</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Image -->
<extension>odi</extension>
<mime-type>application/vnd.oasis.opendocument.image</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Master Document -->
<extension>odm</extension>
<mime-type>application/vnd.oasis.opendocument.text-master</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Presentation -->
<extension>odp</extension>
<mime-type>application/vnd.oasis.opendocument.presentation</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Spreadsheet -->
<extension>ods</extension>
<mime-type>application/vnd.oasis.opendocument.spreadsheet</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Text -->
<extension>odt</extension>
<mime-type>application/vnd.oasis.opendocument.text</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ogg</extension>
<mime-type>application/ogg</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Drawing Template -->
<extension>otg </extension>
<mime-type>application/vnd.oasis.opendocument.graphics-template</mime-type>
</mime-mapping>
<mime-mapping>
<!-- HTML Document Template -->
<extension>oth</extension>
<mime-type>application/vnd.oasis.opendocument.text-web</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Presentation Template -->
<extension>otp</extension>
<mime-type>application/vnd.oasis.opendocument.presentation-template</mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Spreadsheet Template -->
<extension>ots</extension>
<mime-type>application/vnd.oasis.opendocument.spreadsheet-template </mime-type>
</mime-mapping>
<mime-mapping>
<!-- OpenDocument Text Template -->
<extension>ott</extension>
<mime-type>application/vnd.oasis.opendocument.text-template</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pbm</extension>
<mime-type>image/x-portable-bitmap</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pct</extension>
<mime-type>image/pict</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pdf</extension>
<mime-type>application/pdf</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pgm</extension>
<mime-type>image/x-portable-graymap</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pic</extension>
<mime-type>image/pict</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pict</extension>
<mime-type>image/pict</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pls</extension>
<mime-type>audio/x-scpls</mime-type>
</mime-mapping>
<mime-mapping>
<extension>png</extension>
<mime-type>image/png</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pnm</extension>
<mime-type>image/x-portable-anymap</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pnt</extension>
<mime-type>image/x-macpaint</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ppm</extension>
<mime-type>image/x-portable-pixmap</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ppt</extension>
<mime-type>application/vnd.ms-powerpoint</mime-type>
</mime-mapping>
<mime-mapping>
<extension>pps</extension>
<mime-type>application/vnd.ms-powerpoint</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ps</extension>
<mime-type>application/postscript</mime-type>
</mime-mapping>
<mime-mapping>
<extension>psd</extension>
<mime-type>image/x-photoshop</mime-type>
</mime-mapping>
<mime-mapping>
<extension>qt</extension>
<mime-type>video/quicktime</mime-type>
</mime-mapping>
<mime-mapping>
<extension>qti</extension>
<mime-type>image/x-quicktime</mime-type>
</mime-mapping>
<mime-mapping>
<extension>qtif</extension>
<mime-type>image/x-quicktime</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ras</extension>
<mime-type>image/x-cmu-raster</mime-type>
</mime-mapping>
<mime-mapping>
<extension>rdf</extension>
<mime-type>application/rdf+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>rgb</extension>
<mime-type>image/x-rgb</mime-type>
</mime-mapping>
<mime-mapping>
<extension>rm</extension>
<mime-type>application/vnd.rn-realmedia</mime-type>
</mime-mapping>
<mime-mapping>
<extension>roff</extension>
<mime-type>application/x-troff</mime-type>
</mime-mapping>
<mime-mapping>
<extension>rtf</extension>
<mime-type>application/rtf</mime-type>
</mime-mapping>
<mime-mapping>
<extension>rtx</extension>
<mime-type>text/richtext</mime-type>
</mime-mapping>
<mime-mapping>
<extension>sh</extension>
<mime-type>application/x-sh</mime-type>
</mime-mapping>
<mime-mapping>
<extension>shar</extension>
<mime-type>application/x-shar</mime-type>
</mime-mapping>
<!--
<mime-mapping>
<extension>shtml</extension>
<mime-type>text/x-server-parsed-html</mime-type>
</mime-mapping>
-->
<mime-mapping>
<extension>smf</extension>
<mime-type>audio/x-midi</mime-type>
</mime-mapping>
<mime-mapping>
<extension>sit</extension>
<mime-type>application/x-stuffit</mime-type>
</mime-mapping>
<mime-mapping>
<extension>snd</extension>
<mime-type>audio/basic</mime-type>
</mime-mapping>
<mime-mapping>
<extension>src</extension>
<mime-type>application/x-wais-source</mime-type>
</mime-mapping>
<mime-mapping>
<extension>sv4cpio</extension>
<mime-type>application/x-sv4cpio</mime-type>
</mime-mapping>
<mime-mapping>
<extension>sv4crc</extension>
<mime-type>application/x-sv4crc</mime-type>
</mime-mapping>
<mime-mapping>
<extension>svg</extension>
<mime-type>image/svg+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>svgz</extension>
<mime-type>image/svg+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>swf</extension>
<mime-type>application/x-shockwave-flash</mime-type>
</mime-mapping>
<mime-mapping>
<extension>t</extension>
<mime-type>application/x-troff</mime-type>
</mime-mapping>
<mime-mapping>
<extension>tar</extension>
<mime-type>application/x-tar</mime-type>
</mime-mapping>
<mime-mapping>
<extension>tcl</extension>
<mime-type>application/x-tcl</mime-type>
</mime-mapping>
<mime-mapping>
<extension>tex</extension>
<mime-type>application/x-tex</mime-type>
</mime-mapping>
<mime-mapping>
<extension>texi</extension>
<mime-type>application/x-texinfo</mime-type>
</mime-mapping>
<mime-mapping>
<extension>texinfo</extension>
<mime-type>application/x-texinfo</mime-type>
</mime-mapping>
<mime-mapping>
<extension>tif</extension>
<mime-type>image/tiff</mime-type>
</mime-mapping>
<mime-mapping>
<extension>tiff</extension>
<mime-type>image/tiff</mime-type>
</mime-mapping>
<mime-mapping>
<extension>tr</extension>
<mime-type>application/x-troff</mime-type>
</mime-mapping>
<mime-mapping>
<extension>tsv</extension>
<mime-type>text/tab-separated-values</mime-type>
</mime-mapping>
<mime-mapping>
<extension>txt</extension>
<mime-type>text/plain</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ulw</extension>
<mime-type>audio/basic</mime-type>
</mime-mapping>
<mime-mapping>
<extension>ustar</extension>
<mime-type>application/x-ustar</mime-type>
</mime-mapping>
<mime-mapping>
<extension>vxml</extension>
<mime-type>application/voicexml+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xbm</extension>
<mime-type>image/x-xbitmap</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xht</extension>
<mime-type>application/xhtml+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xhtml</extension>
<mime-type>application/xhtml+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xls</extension>
<mime-type>application/vnd.ms-excel</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xml</extension>
<mime-type>application/xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xpm</extension>
<mime-type>image/x-xpixmap</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xsl</extension>
<mime-type>application/xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xslt</extension>
<mime-type>application/xslt+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xul</extension>
<mime-type>application/vnd.mozilla.xul+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>xwd</extension>
<mime-type>image/x-xwindowdump</mime-type>
</mime-mapping>
<mime-mapping>
<extension>vsd</extension>
<mime-type>application/x-visio</mime-type>
</mime-mapping>
<mime-mapping>
<extension>wav</extension>
<mime-type>audio/x-wav</mime-type>
</mime-mapping>
<mime-mapping>
<!-- Wireless Bitmap -->
<extension>wbmp</extension>
<mime-type>image/vnd.wap.wbmp</mime-type>
</mime-mapping>
<mime-mapping>
<!-- WML Source -->
<extension>wml</extension>
<mime-type>text/vnd.wap.wml</mime-type>
</mime-mapping>
<mime-mapping>
<!-- Compiled WML -->
<extension>wmlc</extension>
<mime-type>application/vnd.wap.wmlc</mime-type>
</mime-mapping>
<mime-mapping>
<!-- WML Script Source -->
<extension>wmls</extension>
<mime-type>text/vnd.wap.wmlscript</mime-type>
</mime-mapping>
<mime-mapping>
<!-- Compiled WML Script -->
<extension>wmlscriptc</extension>
<mime-type>application/vnd.wap.wmlscriptc</mime-type>
</mime-mapping>
<mime-mapping>
<extension>wmv</extension>
<mime-type>video/x-ms-wmv</mime-type>
</mime-mapping>
<mime-mapping>
<extension>wrl</extension>
<mime-type>x-world/x-vrml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>wspolicy</extension>
<mime-type>application/wspolicy+xml</mime-type>
</mime-mapping>
<mime-mapping>
<extension>Z</extension>
<mime-type>application/x-compress</mime-type>
</mime-mapping>
<mime-mapping>
<extension>z</extension>
<mime-type>application/x-compress</mime-type>
</mime-mapping>
<mime-mapping>
<extension>zip</extension>
<mime-type>application/zip</mime-type>
</mime-mapping>
<!-- ==================== Default Welcome File List ===================== -->
<!-- When a request URI refers to a directory, the default servlet looks -->
<!-- for a "welcome file" within that directory and, if present, -->
<!-- to the corresponding resource URI for display. If no welcome file -->
<!-- is present, the default servlet either serves a directory listing, -->
<!-- or returns a 404 status, depending on how it is configured. -->
<!-- -->
<!-- If you define welcome files in your own application's web.xml -->
<!-- deployment descriptor, that list *replaces* the list configured -->
<!-- here, so be sure that you include any of the default values that -->
<!-- you wish to include. -->
<welcome-file-list>
<welcome-file>index.html</welcome-file>
<welcome-file>index.htm</welcome-file>
<welcome-file>index.jsp</welcome-file>
</welcome-file-list>
</web-app>
""".strip()
web_xml_content_missing_timeout = """
<?xml version="1.0" encoding="ISO-8859-1"?>
<web-app xmlns="http://java.sun.com/xml/ns/javaee"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://java.sun.com/xml/ns/javaee http://java.sun.com/xml/ns/javaee/web-app_2_5.xsd"
version="2.5">
<servlet>
<servlet-name>default</servlet-name>
<servlet-class>org.apache.catalina.servlets.DefaultServlet</servlet-class>
<init-param>
<param-name>debug</param-name>
<param-value>0</param-value>
</init-param>
<init-param>
<param-name>listings</param-name>
<param-value>false</param-value>
</init-param>
<load-on-startup>1</load-on-startup>
</servlet>
</web-app>
"""
def test_get_tmo():
result = TomcatWebXml(context_wrap(web_xml_content))
assert result.get("session-timeout") == 30
def test_get_tmo_missing_timeout():
result = TomcatWebXml(context_wrap(web_xml_content_missing_timeout))
assert result.get("session-timeout") is None
server_xml_content = """
<?xml version='1.0' encoding='utf-8'?>
<Server port="8005" shutdown="SHUTDOWN">
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
<Listener className="org.apache.catalina.core.JasperListener" />
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
<GlobalNamingResources>
<Resource name="UserDatabase" auth="Container"
type="org.apache.catalina.UserDatabase"
description="User database that can be updated and saved"
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
pathname="conf/tomcat-users.xml" />
</GlobalNamingResources>
<!-- A "Service" is a collection of one or more "Connectors" that share
a single "Container" Note: A "Service" is not itself a "Container",
so you may not define subcomponents such as "Valves" at this level.
Documentation at /docs/config/service.html
-->
<Service name="Catalina">
<Connector port="8080" protocol="HTTP/1.1"
connectionTimeout="20000"
redirectPort="8443" />
<Connector port="8443" protocol="HTTP/1.1" SSLEnabled="true"
maxThreads="150" scheme="https" secure="true"
clientAuth="want"
sslProtocols="TLSv1.2,TLSv1.1,TLSv1"
keystoreFile="conf/keystore"
truststoreFile="conf/keystore"
keystorePass="oXQ8LfAGsf97KQxwwPta2X3vnUv7P5QM"
keystoreType="PKCS12"
ciphers="SSL_RSA_WITH_3DES_EDE_CBC_SHA,
TLS_RSA_WITH_AES_256_CBC_SHA,
TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA"
truststorePass="oXQ8LfAGsf97KQxwwPta2X3vnUv7P5QM" />
<!-- Define an AJP 1.3 Connector on port 8009 -->
<Connector port="8009" protocol="AJP/1.3" redirectPort="8443" />
<Engine name="Catalina" defaultHost="localhost">
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
resourceName="UserDatabase"/>
<Host name="localhost" appBase="webapps"
unpackWARs="true" autoDeploy="true"
xmlValidation="false" xmlNamespaceAware="false">
</Host>
</Engine>
</Service>
</Server>
"""
def test_tomcat_server_xml():
result = TomcatServerXml(context_wrap(server_xml_content))
engines = result.get_elements(".//Service/Engine")
assert len(engines) == 1
assert engines[0].get('name') == "Catalina"
def test_web_xml_doc_examples():
env = {
'TomcatWebXml': TomcatWebXml,
'web_xml': TomcatWebXml(context_wrap(web_xml_content, path='/usr/share/tomcat/web.xml')),
'TomcatServerXml': TomcatServerXml,
'server_xml': TomcatServerXml(context_wrap(server_xml_content, path='/usr/share/tomcat/server.xml'))
}
failed, total = doctest.testmod(tomcat_xml, globs=env)
assert failed == 0
|
RedHatInsights/insights-core
|
insights/parsers/tests/test_tomcat_xml.py
|
Python
|
apache-2.0
| 55,650
|
[
"NetCDF"
] |
1de98b93a165bd29916b9f49e40830dd6177aba73af9782552e554a6fb9e25b3
|
"""P65 Intermediate Representation
Classes for representing the Intermediate nodes upon which the
assembler passes operate."""
# Copyright 2002 Michael C. Martin.
# You may use, modify, and distribute this file under the BSD
# license: See LICENSE.txt for details.
from __future__ import nested_scopes
import Ophis.Errors as Err
class Node:
"""The default IR Node
Instances of Node always have the three fields ppt(Program Point),
nodetype(a string), and data (a list)."""
def __init__(self, ppt, nodetype, *data):
self.ppt = ppt
self.nodetype = nodetype
self.data = list(data)
def accept(self, asmpass, env=None):
"""Implements the Visitor pattern for an assembler pass.
Calls the routine 'asmpass.visitTYPE(self, env)' where
TYPE is the value of self.nodetype."""
Err.currentpoint = self.ppt
routine = getattr(asmpass, "visit"+self.nodetype, asmpass.visitUnknown)
routine(self, env)
def __str__(self):
if self.nodetype != "SEQUENCE":
return str(self.ppt)+": "+self.nodetype+" - "+" ".join(map(str, self.data))
else:
return "\n".join(map(str, self.data))
def __repr__(self):
args = [self.ppt, self.nodetype] + self.data
return "Node(" + ", ".join(map(repr, args)) + ")"
NullNode = Node("<none>", "None")
def SequenceNode(ppt, nodelist):
return Node(ppt, "SEQUENCE", *nodelist)
class Expr:
"""Base class for P65 expressions
All expressions have a field called "data" and a boolean field
called "hardcoded". An expression is hardcoded if it has no
symbolic values in it."""
def __init__(self, data):
self.data = data
self.hardcoded = 0
def __str__(self):
return "<UNKNOWN: "+`self.data`+">"
def valid(self, env=None, PCvalid=0):
"""Returns true if the the expression can be successfully
evaluated in the specified environment."""
return 0
def value(self, env=None):
"Evaluates this expression in the given environment."
return None
class ConstantExpr(Expr):
"Represents a numeric constant"
def __init__(self, data):
self.data = data
self.hardcoded = 1
def __str__(self):
return str(self.data)
def valid(self, env=None, PCvalid=0):
return 1
def value(self, env=None):
return self.data
class LabelExpr(Expr):
"Represents a symbolic constant"
def __init__(self, data):
self.data = data
self.hardcoded = 0
def __str__(self):
return self.data
def valid(self, env=None, PCvalid=0):
return (env is not None) and self.data in env
def value(self, env=None):
return env[self.data]
class PCExpr(Expr):
"Represents the current program counter: ^"
def __init__(self):
self.hardcoded = 0
def __str__(self):
return "^"
def valid(self, env=None, PCvalid=0):
return env is not None and PCvalid
def value(self, env=None):
return env.getPC()
class HighByteExpr(Expr):
"Represents the expression >{data}"
def __init__(self, data):
self.data = data
self.hardcoded = data.hardcoded
def __str__(self):
return ">"+str(self.data)
def valid(self, env=None, PCvalid=0):
return self.data.valid(env, PCvalid)
def value(self, env=None):
val = self.data.value(env)
return (val >> 8) & 0xff
class LowByteExpr(Expr):
"Represents the expression <{data}"
def __init__(self, data):
self.data = data
self.hardcoded = data.hardcoded
def __str__(self):
return "<"+str(self.data)
def valid(self, env=None, PCvalid=0):
return self.data.valid(env, PCvalid)
def value(self, env=None):
val = self.data.value(env)
return val & 0xff
class SequenceExpr(Expr):
"""Represents an interleaving of operands (of type Expr) and
operators (of type String). Subclasses must provide a routine
operate(self, firstarg, op, secondarg) that evaluates the
operator."""
def __init__(self, data):
"""Constructor for Sequence Expressions. Results will be
screwy if the data inpot isn't a list with types
[Expr, str, Expr, str, Expr, str, ... Expr, str, Expr]."""
self.data = data
self.operands = [x for x in data if isinstance(x, Expr)]
self.operators = [x for x in data if type(x)==str]
for i in self.operands:
if not i.hardcoded:
self.hardcoded = 0
break
else:
self.hardcoded = 1
def __str__(self):
return "["+" ".join(map(str, self.data))+"]"
def valid(self, env=None, PCvalid=0):
for i in self.operands:
if not i.valid(env, PCvalid):
return 0
return 1
def value(self, env=None):
subs = map((lambda x: x.value(env)), self.operands)
result = subs[0]
index = 1
for op in self.operators:
result = self.operate(result, op, subs[index])
index += 1
return result
def operate(self, start, op, other):
if op=="*": return start * other
if op=="/": return start // other
if op=="+": return start + other
if op=="-": return start - other
if op=="&": return start & other
if op=="|": return start | other
if op=="^": return start ^ other
|
cacciatc/happiNES-dev
|
tools/assembler/Ophis-1.0/lib/Ophis/IR.py
|
Python
|
lgpl-2.1
| 4,970
|
[
"VisIt"
] |
b822dd2cd79da1e7eaf752209759d1526d6da24d514a19aab17341ad1d695199
|
"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import atexit
import os
import tarfile
import tempfile
import time
from . import types as t
from .config import (
IntegrationConfig,
ShellConfig,
)
from .util import (
display,
ANSIBLE_ROOT,
ANSIBLE_SOURCE_ROOT,
remove_tree,
is_subdir,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
# improve performance by disabling uid/gid lookups
tarfile.pwd = None
tarfile.grp = None
# this bin symlink map must exactly match the contents of the bin directory
# it is necessary for payload creation to reconstruct the bin directory when running ansible-test from an installed version of ansible
ANSIBLE_BIN_SYMLINK_MAP = {
'ansible': '../lib/ansible/cli/scripts/ansible_cli_stub.py',
'ansible-config': 'ansible',
'ansible-connection': '../lib/ansible/cli/scripts/ansible_connection_cli_stub.py',
'ansible-console': 'ansible',
'ansible-doc': 'ansible',
'ansible-galaxy': 'ansible',
'ansible-inventory': 'ansible',
'ansible-playbook': 'ansible',
'ansible-pull': 'ansible',
'ansible-test': '../test/lib/ansible_test/_data/cli/ansible_test_cli_stub.py',
'ansible-vault': 'ansible',
}
def create_payload(args, dst_path): # type: (CommonConfig, str) -> None
"""Create a payload for delegation."""
if args.explain:
return
files = list(data_context().ansible_source)
if not ANSIBLE_SOURCE_ROOT:
# reconstruct the bin directory which is not available when running from an ansible install
files.extend(create_temporary_bin_files(args))
if not data_context().content.is_ansible:
# exclude unnecessary files when not testing ansible itself
files = [f for f in files if
is_subdir(f[1], 'bin/') or
is_subdir(f[1], 'lib/ansible/') or
(is_subdir(f[1], 'test/lib/ansible_test/') and not is_subdir(f[1], 'test/lib/ansible_test/tests/'))]
if not isinstance(args, (ShellConfig, IntegrationConfig)):
# exclude built-in ansible modules when they are not needed
files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
if data_context().content.collection:
# include collections content for testing
files.extend((os.path.join(data_context().content.root, path), os.path.join(data_context().content.collection.directory, path))
for path in data_context().content.all_files())
# these files need to be migrated to the ansible-test data directory
hack_files_to_keep = (
'test/integration/integration.cfg',
'test/integration/integration_config.yml',
'test/integration/inventory',
'test/integration/network-integration.cfg',
'test/integration/target-prefixes.network',
'test/integration/windows-integration.cfg',
)
# temporary solution to include files not yet present in the ansible-test data directory
files.extend([(os.path.join(ANSIBLE_ROOT, path), path) for path in hack_files_to_keep])
for callback in data_context().payload_callbacks:
callback(files)
# maintain predictable file order
files = sorted(files)
display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
start = time.time()
with tarfile.TarFile.gzopen(dst_path, mode='w', compresslevel=4) as tar:
for src, dst in files:
display.info('%s -> %s' % (src, dst), verbosity=4)
tar.add(src, dst)
duration = time.time() - start
payload_size_bytes = os.path.getsize(dst_path)
display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...]
"""Create a temporary ansible bin directory populated using the symlink map."""
if args.explain:
temp_path = '/tmp/ansible-tmp-bin'
else:
temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
atexit.register(remove_tree, temp_path)
for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
path = os.path.join(temp_path, name)
os.link(dest, path)
return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
|
aperigault/ansible
|
test/lib/ansible_test/_internal/payload.py
|
Python
|
gpl-3.0
| 4,646
|
[
"Galaxy"
] |
00f828dcea6b1fe9187470f6d3ec71a52391ce72ad65219c416037cc0573cf51
|
from wsgiref.simple_server import make_server
__author__ = 'pahaz'
def application(environ, start_response):
assert environ.get('PATH_INFO') is not None, "environ['PATH_INFO'] is None"
status = "200 OK"
headers = [('Content-type', 'text/html; charset=utf-8')]
body = """<!DOCTYPE html>
<h1>Example-mini-application</h1>
"""
start_response(status, headers)
return [body.encode('utf-8')]
def run(host='', port=31338):
print("It's work! Visit http://{host}:{port}/".format(
host=host or 'localhost',
port=port))
httpd = make_server(host, port, application)
httpd.serve_forever()
if __name__ == "__main__":
run()
|
pahaz/homework-simple-python-web-application
|
main.py
|
Python
|
gpl-3.0
| 682
|
[
"VisIt"
] |
db0c0ba262deb117690f8e6f0e3b2a1cd0b785570b4e20a63aa07f5096332759
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import json
import os
from monty.json import MontyDecoder
from pymatgen.analysis.defects.dilute_solution_model import *
import random
import sympy
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
with open(
os.path.join(test_dir, 'mp1048_defect_formation_energies.json')) as fp:
formation_energy_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, 'mp1048_raw_defect_energies.json')) as fp:
raw_energy_dict = json.load(fp, cls=MontyDecoder)
with open(os.path.join(test_dir, 'mp1487_raw_defect_energies.json')) as fp:
mp1487_raw_energy_dict = json.load(fp, cls=MontyDecoder)
# TODO (from SP): You MUST redo this entire test. The whole tset is
# monstrously slow. It takes more than 10 mins to get through this test alone.
@unittest.skipIf(random.randint(0, 10) % 10 != 0,
"random skip.")
class DiluteSolutionModelTest(unittest.TestCase):
def setUp(self):
"""
Setup mandatory inputs for dilute_solution_model
"""
self.e0 = raw_energy_dict['bulk_energy']
self.asites = raw_energy_dict['antisites']
self.vac = raw_energy_dict['vacancies']
self.struct = raw_energy_dict['structure']
self.T = 600
self.trial_mu = formation_energy_dict[str(self.T)]['chemical_potential']
def test_formation_energies_without_chem_pot(self):
"""
Should generate formation energies without input chempot
"""
energies, chem_pot = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
generate='energy')
self.assertIsNotNone(energies)
self.assertIsNotNone(chem_pot)
def test_formation_energies_with_chem_pot(self):
energies, chem_pot = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='energy')
self.assertIsNotNone(energies)
self.assertIsNotNone(chem_pot)
def test_plot_data_without_chem_pot(self):
conc_data, en_data, mu_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
generate='plot')
self.assertIsNotNone(conc_data)
self.assertIsNotNone(en_data)
self.assertIsNotNone(mu_data)
for key, value in conc_data.items():
self.assertIsNotNone(value)
for key, value in mu_data.items():
self.assertIsNotNone(value)
for key, value in en_data.items():
self.assertIsNotNone(value)
def test_plot_data_with_chem_pot(self):
conc_data, en_data, mu_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='plot')
self.assertIsNotNone(conc_data)
self.assertIsNotNone(en_data)
self.assertIsNotNone(mu_data)
for key, value in conc_data.items():
self.assertIsNotNone(value)
for key, value in mu_data.items():
self.assertIsNotNone(value)
for key, value in en_data.items():
self.assertIsNotNone(value)
# print(plot_data['y'])
@unittest.skipIf(random.randint(0, 10) % 10 != 0,
"random skip.")
class SoluteSiteFinderTest(unittest.TestCase):
def setUp(self):
"""
Setup mandatory inputs for dilute_solution_model
"""
self.e0 = mp1487_raw_energy_dict['bulk_energy']
self.asites = mp1487_raw_energy_dict['antisites']
self.vac = mp1487_raw_energy_dict['vacancies']
self.solutes = mp1487_raw_energy_dict['solutes']
self.struct = mp1487_raw_energy_dict['structure']
self.T = 1000
def test_plot_data_without_chem_pot(self):
plot_data = solute_site_preference_finder(
self.struct, self.e0, self.T, self.vac, self.asites, self.solutes,
solute_concen=0.01)
self.assertIsNotNone(plot_data)
def still_wait_plot_data_with_chem_pot(self):
plot_data = dilute_solution_model(
self.struct, self.e0, self.vac, self.asites, self.T,
trial_chem_pot=self.trial_mu, generate='plot')
self.assertIsNotNone(plot_data)
for key, value in plot_data.items():
self.assertIsNotNone(value)
if __name__ == "__main__":
unittest.main()
|
xhqu1981/pymatgen
|
pymatgen/analysis/defects/tests/test_dilute_solution_model.py
|
Python
|
mit
| 4,601
|
[
"pymatgen"
] |
b6d35758868a544b76fb374ca285132e68dc2f50a92694d0170f83ddff8b7196
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Mesh Preview Generator.
Examples
--------
$ ./script/gen_mesh_prev.py meshes/2d/
"""
from optparse import OptionParser
import sys
sys.path.append('.')
import os
import vtk
from sfepy.discrete.fem import Mesh
def gen_shot(vtk_filename, png_filename):
"""
Generate PNG image of the FE mesh.
Parameters
----------
vtk_filename : str
The input mesh filename (file in VTK format).
png_filename : str
The name of the output PNG file.
"""
reader = vtk.vtkUnstructuredGridReader()
reader.SetFileName(vtk_filename)
reader.Update()
bnd = reader.GetOutput().GetPoints().GetBounds()
surface0 = vtk.vtkDataSetSurfaceFilter()
surface0.SetInput(reader.GetOutput())
surface0.Update()
if abs(bnd[5] - bnd[4]) > 1.0e-12:
tr = vtk.vtkTransform()
tr.RotateWXYZ(45,1,1,1)
trFilter = vtk.vtkTransformPolyDataFilter()
trFilter.SetTransform(tr)
trFilter.SetInputConnection(surface0.GetOutputPort())
trFilter.Update()
surface = trFilter
else:
surface = surface0
ca,cb = surface.GetOutput().GetCellData().GetScalars().GetRange()
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.667, 0.667)
lut.SetSaturationRange(0.0, 1.0)
lut.SetValueRange(0.8, 1.0)
lut.SetAlphaRange(1.0, 1.0)
lut.SetTableRange(ca,cb)
gf = vtk.vtkGraphicsFactory()
gf.SetOffScreenOnlyMode(1)
gf.SetUseMesaClasses(1)
ifa = vtk.vtkImagingFactory()
ifa.SetUseMesaClasses(1)
mapper = vtk.vtkPolyDataMapper()
mapper.SetLookupTable(lut)
mapper.SetScalarRange(ca,cb);
mapper.SetInput(surface.GetOutput())
mapper.SetScalarModeToUseCellData()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInput(surface.GetOutput())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
actor2.GetProperty().SetRepresentationToWireframe()
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetOffScreenRendering(1)
renWin.AddRenderer(ren)
ren.AddActor(actor)
ren.AddActor(actor2)
renWin.Render()
image = vtk.vtkWindowToImageFilter()
image.SetInput(renWin)
image.Update()
base, _ = os.path.splitext(vtk_filename)
writer = vtk.vtkPNGWriter()
writer.SetFileName(png_filename)
writer.SetInput(image.GetOutput())
writer.Write()
usage = '%prog [options] mesh_dir\n' + __doc__.rstrip()
def main():
parser = OptionParser(usage=usage)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
mesh_dir = args[0]
mesh_files = []
for (dirpath, dirnames, filenames) in os.walk(mesh_dir):
for ii in filenames:
_, ext = os.path.splitext(ii)
if ext.lower() in ['.mesh', '.vtk']:
mesh_files.append(dirpath + os.path.sep + ii)
for ii in mesh_files:
base, ext = os.path.splitext(ii)
fname_out = base + '.png'
if ext == '.mesh':
fname_in = 'aux.vtk'
mesh = Mesh.from_file(ii)
mesh.write(fname_in, io='auto')
else:
fname_in = ii
print('writing %s...' % fname_out)
gen_shot(fname_in, fname_out)
if __name__ == "__main__":
main()
|
RexFuzzle/sfepy
|
script/gen_mesh_prev.py
|
Python
|
bsd-3-clause
| 3,376
|
[
"VTK"
] |
875c55cd103fe130064b12ed894753e8c07579b3bfeb9e69911111b459c4178e
|
"""
desispec.zfind.redmonster
=========================
Classes for use with the redmonster package.
"""
from __future__ import division, absolute_import
import os
import numpy as np
import time
from desispec.zfind import ZfindBase
from desispec.interpolation import resample_flux
from desispec.log import get_logger
class RedMonsterZfind(ZfindBase):
"""Class documentation goes here.
"""
def __init__(self, wave, flux, ivar, R=None, dloglam=1e-4, objtype=None,
zrange_galaxy=(0.0, 1.6), zrange_qso=(0.0, 3.5), zrange_star=(-0.005, 0.005),nproc=1,npoly=2):
"""Uses Redmonster to classify and find redshifts.
See :class:`desispec.zfind.zfind.ZfindBase` class for inputs/outputs.
optional:
objtype : list or string of template object types to try
[ELG, LRG, QSO, GALAXY, STAR]
TODO: document redmonster specific output variables
"""
from redmonster.physics.zfinder import ZFinder
from redmonster.physics.zfitter import ZFitter
from redmonster.physics.zpicker2 import ZPicker
log=get_logger()
#- RedMonster templates don't quite go far enough into the blue,
#- so chop off some data
ii, = np.where(wave>3965)
wave = wave[ii]
flux = flux[:, ii]
ivar = ivar[:, ii]
#- Resample inputs to a loglam grid
start = round(np.log10(wave[0]), 4)+dloglam
stop = round(np.log10(wave[-1]), 4)
nwave = int((stop-start)/dloglam)
loglam = start + np.arange(nwave)*dloglam
nspec = flux.shape[0]
self.flux = np.empty((nspec, nwave))
self.ivar = np.empty((nspec, nwave))
for i in range(nspec):
self.flux[i], self.ivar[i] = resample_flux(10**loglam, wave, flux[i], ivar[i])
self.dloglam = dloglam
self.loglam = loglam
self.wave = 10**loglam
self.nwave = nwave
self.nspec = nspec
#- Standardize objtype, converting ELG,LRG -> GALAXY, make upper case
templatetypes = set()
if objtype is None:
templatetypes = set(['GALAXY', 'STAR', 'QSO'])
else:
if isinstance(objtype, str):
objtype = [objtype,]
objtype = [x.upper() for x in objtype]
for x in objtype:
if x in ['ELG', 'LRG']:
templatetypes.add('GALAXY')
elif x in ['QSO', 'GALAXY', 'STAR']:
templatetypes.add(x)
else:
raise ValueError('Unknown objtype '+x)
#- list of (templatename, zmin, zmax) to fix
self.template_dir = os.getenv('REDMONSTER_TEMPLATES_DIR')
self.templates = list()
for x in templatetypes:
if x == 'GALAXY':
self.templates.append(('ndArch-ssp_em_galaxy-v000.fits', zrange_galaxy[0], zrange_galaxy[1]))
elif x == 'STAR':
self.templates.append(('ndArch-spEigenStar-55734.fits', zrange_star[0], zrange_star[1]))
elif x == 'QSO':
self.templates.append(('ndArch-QSO-V003.fits', zrange_qso[0], zrange_qso[1]))
else:
raise ValueError("Bad template type "+x)
#- Find and refine best redshift per template
self.zfinders = list()
self.zfitters = list()
for template, zmin, zmax in self.templates:
start=time.time()
zfind = ZFinder(os.path.join(self.template_dir, template), npoly=npoly, zmin=zmin, zmax=zmax,nproc=nproc)
zfind.zchi2(self.flux, self.loglam, self.ivar, npixstep=2)
stop=time.time()
log.debug("Time to find the redshifts of %d fibers for template %s =%f sec"%(self.flux.shape[0],template,stop-start))
start=time.time()
zfit = ZFitter(zfind.zchi2arr, zfind.zbase)
zfit.z_refine2()
stop=time.time()
log.debug("Time to refine the redshift fit of %d fibers for template %s =%f sec"%(zfit.z.shape[0],template,stop-start))
for ifiber in range(zfit.z.shape[0]) :
log.debug("(after z_refine2) fiber #%d %s chi2s=%s zs=%s"%(ifiber,template,zfit.chi2vals[ifiber],zfit.z[ifiber]))
self.zfinders.append(zfind)
self.zfitters.append(zfit)
#- Create wrapper object needed for zpicker
specobj = _RedMonsterSpecObj(self.wave, self.flux, self.ivar)
flags = list()
for i in range(len(self.zfitters)):
flags.append(self.zfinders[i].zwarning.astype(int) | \
self.zfitters[i].zwarning.astype(int))
#- Zpicker
self.zpicker = ZPicker(specobj, self.zfinders, self.zfitters, flags)
#- Fill in outputs
self.spectype = np.asarray([self.zpicker.type[i][0] for i in range(nspec)])
self.subtype = np.asarray([repr(self.zpicker.subtype[i][0]) for i in range(nspec)])
self.z = np.array([self.zpicker.z[i][0] for i in range(nspec)])
self.zerr = np.array([self.zpicker.z_err[i][0] for i in range(nspec)])
self.zwarn = np.array([int(self.zpicker.zwarning[i]) for i in range(nspec)])
self.model = self.zpicker.models[:,0]
for ifiber in range(self.z.size):
log.debug("(after zpicker) fiber #%d z=%s"%(ifiber,self.z[ifiber]))
#- This is a container class needed by Redmonster zpicker
class _RedMonsterSpecObj(object):
def __init__(self, wave, flux, ivar, dof=None):
"""
Create an object with .wave, .flux, .ivar, and .dof attributes;
these are needed by RedMonster as input
"""
nspec, nwave = flux.shape
self.wave = wave
self.flux = flux
self.ivar = ivar
self.npix = flux.shape[-1]
if dof is None:
self.dof = np.ones(nspec) * nwave
else:
self.dof = dof
#- Leftover BOSS-isms
self.plate = self.mjd = self.fiberid = 0
self.hdr = None
self.plugmap = None
|
timahutchinson/desispec
|
py/desispec/zfind/redmonster.py
|
Python
|
bsd-3-clause
| 6,150
|
[
"Galaxy"
] |
c1f6e71bbda18af43d40fbbac1f38e3a442a1838566451422ef14a7ad79d0b80
|
import datetime
import netCDF4
import numpy
import os
import re
from dateutil import relativedelta
from ece2cmor3 import cmor_target, cmor_utils
def get_table_path(tab_id=None):
directory = os.path.join(os.path.dirname(cmor_target.__file__), "resources", "tables")
return os.path.join(directory, "CMIP6_" + tab_id + ".json") if tab_id else directory
def is_lfs_ref(filename):
f = open(filename, "rb")
bytes35 = str(f.read(35))
return bytes35 == "version https://git-lfs.github.com/"
class nemo_output_factory(object):
def __init__(self):
self.lons = None
self.lats = None
self.gridtype = None
self.startdate = None
self.enddate = None
self.frequency = None
self.depthaxis = None
self.layers = 0
def make_grid(self, nlons_, nlats_, gridtype_, nlayers=0):
self.lons = numpy.fromfunction(lambda i, j: (i * 360 + 0.5) / ((nlons_ + nlats_ - j) + 2), (nlons_, nlats_),
dtype=numpy.float64)
self.lats = numpy.fromfunction(lambda i, j: (j * 180 + 0.5) / ((nlats_ + nlons_ - i) + 2) - 90,
(nlons_, nlats_), dtype=numpy.float64)
self.gridtype = gridtype_
self.depthaxis = gridtype_.replace("grid", "depth")
self.layers = nlayers
def set_timeframe(self, startdate_, enddate_, frequency_):
self.startdate = startdate_
self.enddate = enddate_
expr = re.compile("^[1-9]([hdmy])$")
if re.match(expr, frequency_):
self.frequency = frequency_
else:
raise Exception("Invalid frequency argument given: ", frequency_)
def get_path(self, dir_, prefix_):
joinchar = '_'
startstr = cmor_utils.date2str(self.startdate)
stopstr = cmor_utils.date2str(self.enddate)
filename = joinchar.join([prefix_, self.frequency, startstr, stopstr, self.gridtype]) + ".nc"
return os.path.join(dir_, filename)
def get_times(self):
fnum = int(self.frequency[0])
funit = self.frequency[1]
period = None
if funit == 'h':
period = relativedelta.relativedelta(hours=+fnum)
elif funit == 'd':
period = relativedelta.relativedelta(days=+fnum)
elif funit == 'm':
period = relativedelta.relativedelta(months=+fnum)
elif funit == 'y':
period = relativedelta.relativedelta(years=+fnum)
else:
raise Exception("Unknown period: ", period)
d = datetime.datetime.combine(self.startdate, datetime.time())
dstop = datetime.datetime.combine(self.enddate, datetime.time())
tims = []
while d < dstop:
tims.append(d)
d = d + period
return tims
def write_variables(self, path_, prefix_, vars_):
filepath = self.get_path(path_, prefix_)
root = netCDF4.Dataset(filepath, "w")
root.createDimension("time_counter")
root.createDimension("y", self.lons.shape[0])
root.createDimension("x", self.lons.shape[1])
root.createDimension("axis_nbounds", 2)
tims = self.get_times()
z = None
if self.depthaxis and self.layers:
z = "depth" + self.depthaxis
root.createDimension(z, self.layers)
varlat = root.createVariable("nav_lat", "f8", ("y", "x",))
varlat.standard_name = "latitude"
varlat.long_name = "Latitude"
varlat.units = "degrees north"
varlat.nav_model = self.gridtype
varlat[:, :] = self.lats
varlon = root.createVariable("nav_lon", "f8", ("y", "x",))
varlon.standard_name = "longitude"
varlon.long_name = "Longitude"
varlon.units = "degrees east"
varlon.nav_model = self.gridtype
varlon[:, :] = self.lons
if z:
varz = root.createVariable(z, "f8", (z,))
varz.long_name = "Vertical " + self.depthaxis.upper() + " levels"
varz.units = "m"
varz.positive = "down"
varz.bounds = z + "_bounds"
maxdepth = 6000.
nz = self.layers
step = maxdepth / nz
zarray = numpy.arange(0.1 * step, maxdepth, step)
varz[:] = zarray
varzbnd = root.createVariable(z + "_bounds", "f8", (z, "axis_nbounds",))
toparray = numpy.zeros(nz)
botarray = numpy.zeros(nz)
for i in range(0, nz - 1):
mid = 0.5 * (zarray[i] + zarray[i + 1])
toparray[i + 1] = mid
botarray[i] = mid
botarray[nz - 1] = zarray[nz - 1] + 0.5 * (zarray[nz - 1] - zarray[nz - 2])
varzbnd[:, 0] = toparray
varzbnd[:, 1] = botarray
vartimc = root.createVariable("time_centered", "f8", ("time_counter",))
vartimc.standard_name = "time"
vartimc.long_name = "Time axis"
vartimc.calendar = "gregorian"
vartimc.origin = "1950-01-01 00:00:00.0"
vartimc.units = "seconds since " + vartimc.origin
vartimc.bounds = "time_centered_bounds"
vartim = root.createVariable("time_counter", "f8", ("time_counter",))
vartim.axis = "T"
vartim.standard_name = "time"
vartim.long_name = "Time axis"
vartim.calendar = "gregorian"
vartim.origin = "1950-01-01 00:00:00.0"
vartim.units = "seconds since " + vartim.origin
vartim.bounds = "time_counter_bounds"
vartimcbnd = root.createVariable("time_centered_bounds", "f8", ("time_counter", "axis_nbounds",))
vartimbnd = root.createVariable("time_counter_bounds", "f8", ("time_counter", "axis_nbounds",))
timarray = netCDF4.date2num(tims, units=vartimc.units, calendar=vartimc.calendar)
vartim[:] = timarray
vartimc[:] = timarray
n = len(timarray)
bndlarray = numpy.zeros(n)
bndrarray = numpy.zeros(n)
bndlarray[0] = timarray[0] - 0.5 * (timarray[1] - timarray[0])
for i in range(0, n - 1):
mid = 0.5 * (timarray[i] + timarray[i + 1])
bndlarray[i + 1] = mid
bndrarray[i] = mid
bndrarray[n - 1] = timarray[n - 1] + 0.5 * (timarray[n - 1] - timarray[n - 2])
vartimbnd[:, 0] = bndlarray
vartimbnd[:, 1] = bndrarray
vartimcbnd[:, 0] = bndlarray
vartimcbnd[:, 1] = bndrarray
for v in vars_:
atts = v.copy()
name = atts.pop("name")
dims = atts.pop("dims")
func = atts.pop("function")
if name:
if dims == 2:
var = root.createVariable(name, "f8", ("time_counter", "y", "x",))
elif dims == 3:
var = root.createVariable(name, "f8", ("time_counter", z, "y", "x",))
else:
raise Exception("Writing a variable with ", dims, "dimensions is not supported")
else:
raise Exception("Variable must have a name to be included in netcdf file")
for k in atts:
setattr(var, k, atts[k])
if func:
if dims == 2:
var[:, :, :] = numpy.fromfunction(numpy.vectorize(func),
(len(tims), self.lons.shape[1], self.lons.shape[0]),
dtype=numpy.float64)
elif dims == 3:
var[:, :, :, :] = numpy.fromfunction(numpy.vectorize(func),
(len(tims), self.layers, self.lons.shape[1],
self.lons.shape[0]), dtype=numpy.float64)
else:
raise Exception("Variables with dimensions %d are not supported" % dims)
else:
if dims == 2:
var[:, :, :] = numpy.zeros((len(tims), self.lons.shape[1], self.lons.shape[0]))
elif dims == 3:
var[:, :, :, :] = numpy.zeros((len(tims), self.layers, self.lons.shape[1], self.lons.shape[0]))
else:
raise Exception("Variables with dimensions %d are not supported" % dims)
root.close()
|
goord/ece2cmor3
|
test/test_utils.py
|
Python
|
apache-2.0
| 8,331
|
[
"NetCDF"
] |
b403f453a41b2c4aa4a754a773735b6988874fedd63361c8e78346d31746554e
|
'''
Expresses the spherical harmonics as perturbations from
spherical symmetry, as used in my research.
Created on 30 Jan 2013
@author: chris
'''
import numpy as np
from my_real_sph_harm import my_real_sph_harm
# import scipy.misc as sc
def main():
l = 4
m = 2
r, theta, phi = pert_sphere(l,m)
print np.shape(r), np.shape(theta), np.shape(phi)
print np.min(r)
cos = np.cos
sin = np.sin
x = r*sin(theta)*cos(phi)
y = r*sin(theta)*sin(phi)
z = r*cos(theta)
from mayavi import mlab
mlab.figure(1, bgcolor=(1, 1, 1), fgcolor=(0, 0, 0), size=(400, 300))
mlab.clf()
print np.shape(theta), np.shape(phi)
s = my_real_sph_harm(l, m, theta, phi)
print np.shape(s)
mlab.mesh(x, y, z, scalars = r, colormap='jet')
mlab.axes()
mlab.show()
def pert_sphere(l,m):
pi = np.pi
thetass, phiss = np.mgrid[0:pi:101j, 0:2*pi:202j]
#print np.shape(thetass), np.shape(phiss)
#print "max theta", thetass.max()
r_0 = 6.35620e-2
Ep = 0.2
s = my_real_sph_harm(l, m, thetass, phiss)
print np.min(s)
print np.max(s)
s /= s.max()
rss = r_0*(1 + s*Ep)
return rss, thetass, phiss
if __name__ == '__main__':
main()
|
chrisjdavie/ws_cross_project
|
general_maths/sph_harms/pert_sphere.py
|
Python
|
mit
| 1,287
|
[
"Mayavi"
] |
95eccf1a9f3d8592c95b936e1b16d898e140a9f915db2662ac5a95a7bea7a551
|
"""
HPHF
"""
import numpy as np
from frankenstein.sgscf import sphf
class HPHF(sphf.SPHF):
def __init__(self, pymol, **kwargs):
self.eta = 1
sphf.SPHF.__init__(self, pymol)
self.__dict__.update(kwargs)
if np.abs(self.eta) != 1:
raise ValueError("eta must be either 1 or -1!")
self.spin_proj = 0 if self.eta == 1 else 1
self.ngrid = 2
self.grid = "eq"
if __name__ == "__main__":
import sys
try:
geom = sys.argv[1]
basis = sys.argv[2]
except:
print("Usage: geom, basis")
sys.exit(1)
from frankenstein.tools.pyscf_utils import get_pymol
from pyscf import scf
pymol = get_pymol(geom, basis, verbose=3)
pymol.verbose = 4
mf = scf.RHF(pymol)
mf.kernel()
e0 = mf.e_tot
dm0 = mf.make_rdm1()
mo_coeff0 = np.asarray([mf.mo_coeff, mf.mo_coeff.copy()])
from uhf import UHF
mf = UHF(pymol)
mf.guess_mix = 0.3
mf.kernel(mo_coeff0=mo_coeff0)
mo_coeff0 = mf.mo_coeff
if mf.S2 < 0.05:
from frankenstein.tools.scf_utils import homo_lumo_mix
homo_lumo_mix(mo_coeff0[0], mf.no[0], 0.3)
mf = HPHF(pymol)
mf.kernel(mo_coeff0=mo_coeff0)
e1 = mf.e_tot
|
hongzhouye/frankenstein
|
sgscf/hphf.py
|
Python
|
bsd-3-clause
| 1,250
|
[
"PyMOL",
"PySCF"
] |
91242a844c33540bb3fe6e6123c5a2ecabd237ac9cd5321e6011a9eadc1f4463
|
import numpy as np
import matplotlib.pyplot as plt
def compute_pose_iter(beta, X, Y, W, iters):
for iter in range(iters):
residuals = abs(np.dot(X, beta) - Y)
res_scale = 6.9460 * np.median(residuals)
W = residuals / res_scale
W.shape = (W.shape[0],1)
outliers = (W > 1)*1
W[ outliers.nonzero() ] = 0
good_values = (W != 0)*1
# calculate robust weights for 'good' points
# Note that if you supply your own regression weight vector,
# the final weight is the product of the robust weight and
# the regression weight.
tmp = 1 - np.power(W[ good_values.nonzero() ], 2)
W[ good_values.nonzero() ] = np.power(tmp, 2)
XW = np.tile(W, (1, 3)) * X
a = np.dot(XW.T, X)
b = np.dot(XW.T, Y)
# get the least-squares solution to a linear matrix equation
beta = np.linalg.lstsq(a,b)[0]
return beta, X, Y, W
m = 5
input = np.array([
[1, 6],
[2, 5],
[3, 7],
[4, 10],
[5, 12]
])
X = np.matrix([np.ones(m), input[:,0]]).T
y = np.matrix(input[:,1]).T
betaHat = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
print(betaHat)
plt.figure(1)
xx = np.linspace(0, 5, 2)
yy = np.array(betaHat[0] + betaHat[1] * xx)
plt.plot(xx, yy.T, color='b')
plt.scatter(input[:,0], input[:,1], color='r')
for i in range(0,10):
abs_residual = abs(1 / (y - X.dot(betaHat)))
W = np.squeeze(np.array([abs_residual]))
#print(W.shape)
W = np.diag(W)
#print(W.shape)
betaHat = np.linalg.inv(X.T.dot(W.dot(X))).dot(X.T.dot(W).dot(y))
print(betaHat)
plt.figure(1)
xx = np.linspace(0, 5, 2)
yy = np.array(betaHat[0] + betaHat[1] * xx)
plt.plot(xx, yy.T, color='g')
plt.scatter(input[:,0], input[:,1], color='r')
plt.show()
# coefficients of the model
a1, a2, a3 = 0.1, -0.2, 4.0
# ground truth
A_gt = [a1, a2, a3]
print 'A_gt = ', A_gt
# create a coordinate matrix
nx = np.linspace(-1, 1, 41)
ny = np.linspace(-1, 1, 41)
x, y = np.meshgrid(nx, ny)
# make the estimation
z = a1*x + a2*y + a3
#######################################################
# CASE 1: data is corrupted by gaussian noise #
# Regular linear least squares method is used #
#######################################################
# let's add some gaussian noise
z_noise = z + 0.1*np.random.standard_normal(z.shape)
# non-robust least squares estimation
# X*A = Z
x_fl = x.flatten()
y_fl = y.flatten()
z_ones = np.ones([x.size,1])
X = np.hstack((np.reshape(x_fl, ([len(x_fl),1])), np.reshape(y_fl, ([len(y_fl),1])), z_ones))
Z = np.zeros(z_noise.shape)
Z[:] = z_noise
Z_fl = Z.flatten()
Z = np.reshape(Z_fl, ([len(Z_fl),1]))
A_lsq = np.linalg.lstsq(X,Z)[0]
z_least_squares = np.dot(X, A_lsq)
z_least_squares = np.reshape(z_least_squares, z.shape)
lsq_non_robust_noise = np.hstack((z, z_noise, z_least_squares))
# plt.figure()
# plt.title('Non-robust estimate (corrupted only by noise)')
# plt.imshow(lsq_non_robust_noise)
# plt.clim(z.min(), z.max())
#
# plt.show()
############################################################
# CASE 2: data is corrupted by gaussian noise AND outliers #
# Regular linear least squares method is used #
############################################################
# create outliers
outlier_prop = 0.3
outlier_IND = np.random.permutation(x.size)
outlier_IND = outlier_IND[0:np.floor(x.size * outlier_prop)]
z_noise_outlier = np.zeros(z_noise.shape)
z_noise_outlier[:] = z_noise
z_noise_outlier_flt = z_noise_outlier.flatten()
z_noise_outlier_flt[outlier_IND] = z_noise_outlier_flt[outlier_IND] + 10*np.random.standard_normal(z_noise_outlier_flt[outlier_IND].shape)
z_noise_outlier = np.reshape(z_noise_outlier_flt, z.shape)
# non-robust least squares estimation
Z = np.zeros(z_noise_outlier.shape)
Z[:] = z_noise_outlier
Z_fl = Z.flatten()
Z = np.reshape(Z_fl, ([len(Z_fl),1]))
A_lsq_outlier = np.linalg.lstsq(X,Z)[0]
z_lsq_outlier = np.dot(X, A_lsq_outlier)
z_lsq_outlier = np.reshape(z_lsq_outlier, z.shape)
lsq_non_robust_outlier = np.hstack((z, z_noise_outlier, z_lsq_outlier))
# plt.figure()
# plt.title('Non-robust estimate (corrupted by noise AND outliers)')
# plt.imshow(lsq_non_robust_outlier)
# plt.clim(z.min(), z.max())
#
# plt.show()
############################################################
# CASE 3: data is corrupted by gaussian noise AND outliers #
# Robust least squares method is used #
############################################################
# robust least sqaures (starting with the least squares solution)
A_robust = A_lsq_outlier
n_robust_it = 10
# iterate till the fit converges
for robust_it in range(n_robust_it):
mess = ''
# compute absolute value of residuals (fit minus data)
abs_resid = abs(np.dot(X, A_robust) - Z)
mess += 'residual shape ' + str(abs_resid.shape)
# compute the scaling factor for the standardization of residuals
# using the median absolute deviation of the residuals
# 6.9460 is a tuning constant (4.685/0.6745)
abs_res_scale = 6.9460 * np.median(abs_resid)
# standardize residuals
w = abs_resid / abs_res_scale
mess += ' w shape ' + str(w.shape)
# compute the robust bisquare weights excluding outliers
outliers = (w > 1)*1
w[ outliers.nonzero() ] = 0
good_values = (w != 0)*1
# calculate robust weights for 'good' points
# Note that if you supply your own regression weight vector,
# the final weight is the product of the robust weight and the regression weight.
tmp = 1 - np.power(w[ good_values.nonzero() ], 2)
w[ good_values.nonzero() ] = np.power(tmp, 2)
# get weighted X'es
mess += ' w shape ' + str(w.shape) + ' w_tile ' + str(np.tile(w, (1, 3)).shape) + ' X ' + str(X.shape)
XW = np.tile(w, (1, 3)) * X
mess += ' XW shape ' + str(XW.shape)
a = np.dot(XW.T, X)
b = np.dot(XW.T, Z)
# get the least-squares solution to a linear matrix equation
A_robust = np.linalg.lstsq(a,b)[0]
mess += ' A ' + str(A_robust.shape)
z_robust = np.dot(X, A_robust)
z_robust = np.reshape(z_robust, z.shape)
mess += ' Z ' + str(z_robust.shape)
print(mess)
lsq_robust = np.hstack((z, z_noise_outlier, z_robust))
plt.figure()
plt.title('Robust estimate (corrupted by noise AND outliers)')
plt.imshow(lsq_robust)
plt.clim(z.min(), z.max())
plt.show()
############################################################
# CASE 4: data is corrupted by gaussian noise AND outliers #
# Robust least squares method is used #
############################################################
beta = A_lsq_outlier
Y = Z
W= []
iters = 10
[beta,X,Y,W] = compute_pose_iter(beta, X, Y, W, iters)
z_robust = np.dot(X, beta)
z_robust = np.reshape(z_robust, z.shape)
lsq_robust = np.hstack((z, z_noise_outlier, z_robust))
plt.figure()
plt.title('Robust estimate (corrupted by noise AND outliers)')
plt.imshow(lsq_robust)
plt.clim(z.min(), z.max())
plt.show()
|
CoffeRobot/fato
|
pose_estimation/src/lsq.py
|
Python
|
bsd-3-clause
| 6,923
|
[
"Gaussian"
] |
4871a93420f8e89889ac65cb3b8f78be9bcdb6598263399616bd9f24384e9675
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""An internal module to handle OAuth 2.0 Authorization.
There are three ways you may obtain an access token:
- Authorization Code Grant
- Implicit Grant
- Client Credentials Grant
Each OAuth 2.0 grant uses your app credentials to start an
authorization process with Uber. Upon successful authorization,
a Session is created, which stores the OAuth 2.0 credentials.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from random import SystemRandom
from requests import codes
from requests import post
from string import ascii_letters
from string import digits
from urlparse import parse_qs
from urlparse import urlparse
from uber_rides.errors import ClientError
from uber_rides.errors import UberIllegalState
from uber_rides.session import OAuth2Credential
from uber_rides.session import Session
from uber_rides.utils import auth
from uber_rides.utils.request import build_url
class OAuth2(object):
"""The parent class for all OAuth 2.0 grant types."""
def __init__(self, client_id, scopes):
"""Initialize OAuth 2.0 Class.
Parameters
client_id (str)
Your app's Client ID.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
"""
self.client_id = client_id
self.scopes = scopes
def _build_authorization_request_url(
self,
response_type,
redirect_url,
state=None
):
"""Form URL to request an auth code or access token.
Parameters
response_type (str)
Either 'code' (Authorization Code Grant) or
'token' (Implicit Grant)
redirect_url (str)
The URL that the Uber server will redirect the user to after
finishing authorization. The redirect must be HTTPS-based and
match the URL you registered your application with. Localhost
URLs are permitted and can be either HTTP or HTTPS.
state (str)
Optional CSRF State token to send to server.
Returns
(str)
The fully constructed authorization request URL.
Raises
UberIllegalState (ApiError)
Raised if response_type parameter is invalid.
"""
if response_type not in auth.VALID_RESPONSE_TYPES:
message = '{} is not a valid response type.'
raise UberIllegalState(message.format(response_type))
args = {
'redirect_uri': redirect_url,
'state': state,
'scope': ' '.join(self.scopes),
'response_type': response_type,
'client_id': self.client_id,
}
return build_url(auth.AUTH_HOST, auth.AUTHORIZE_PATH, args)
def _extract_query(self, redirect_url):
"""Extract query parameters from a url.
Parameters
redirect_url (str)
The full URL that the Uber server redirected to after
the user authorized your app.
Returns
(dict)
A dictionary of query parameters.
"""
qs = urlparse(redirect_url)
# Implicit Grant redirect_urls have data after fragment identifier (#)
# All other redirect_urls return data after query identifier (?)
qs = qs.fragment if isinstance(self, ImplicitGrant) else qs.query
query_params = parse_qs(qs)
query_params = {qp: query_params[qp][0] for qp in query_params}
return query_params
class AuthorizationCodeGrant(OAuth2):
"""Class for Authorization Code Grant type.
The authorization code grant type is used to obtain both access
tokens and refresh tokens and is optimized for confidential clients.
It involves a two-step authorization process. The first step is having
the user authorize your app. The second involves getting an OAuth 2.0
access token from Uber.
"""
def __init__(self, client_id, scopes, client_secret, redirect_url):
"""Initialize AuthorizationCodeGrant Class.
Parameters
client_id (str)
Your app's Client ID.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
client_secret (str)
Your app's Client Secret.
redirect_url (str)
The URL that the Uber server will redirect the user to after
finishing authorization. The redirect must be HTTPS-based and
match the URL you registered your application with. Localhost
URLs are permitted and can be either HTTP or HTTPS.
"""
super(AuthorizationCodeGrant, self).__init__(client_id, scopes)
self.redirect_url = redirect_url
self.state_token = self._generate_state_token()
self.client_secret = client_secret
def _generate_state_token(self, length=32):
"""Generate CSRF State Token.
CSRF State Tokens are passed as a parameter in the authorization
URL and are checked when receiving responses from the Uber Auth
server to prevent request forgery.
"""
choices = ascii_letters + digits
return ''.join(SystemRandom().choice(choices) for _ in range(length))
def get_authorization_url(self):
"""Start the Authorization Code Grant process.
This function starts the OAuth 2.0 authorization process and builds an
authorization URL. You should redirect your user to this URL, where
they can grant your application access to their Uber account.
Returns
(str)
The fully constructed authorization request URL.
Tell the user to visit this URL and approve your app.
"""
return self._build_authorization_request_url(
response_type=auth.CODE_RESPONSE_TYPE,
redirect_url=self.redirect_url,
state=self.state_token,
)
def _verify_query(self, query_params):
"""Verify response from the Uber Auth server.
Parameters
query_params (dict)
Dictionary of query parameters attached to your redirect URL
after user approved your app and was redirected.
Returns
authorization_code (str)
Code received when user grants your app access. Use this code
to request an access token.
Raises
UberIllegalState (ApiError)
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
"""
error_message = None
# Check CSRF State Token against returned state token from GET request
received_state_token = query_params.get('state')
if received_state_token is None:
error_message = 'Bad Request. Missing state parameter.'
raise UberIllegalState(error_message)
if self.state_token is None:
error_message = 'Missing CSRF State Token in session.'
raise UberIllegalState(error_message)
if self.state_token != received_state_token:
error_message = 'CSRF Error. Expected {}, got {}'
error_message = error_message.format(
self.state_token,
received_state_token,
)
raise UberIllegalState(error_message)
# Verify either 'code' or 'error' parameter exists
error = query_params.get('error')
authorization_code = query_params.get(auth.CODE_RESPONSE_TYPE)
if error and authorization_code:
error_message = (
'Code and Error query params code and error '
'can not both be set.'
)
raise UberIllegalState(error_message)
if error is None and authorization_code is None:
error_message = 'Neither query parameter code or error is set.'
raise UberIllegalState(error_message)
if error:
raise UberIllegalState(error)
return authorization_code
def get_session(self, redirect_url):
"""Complete the Authorization Code Grant process.
The redirect URL received after the user has authorized
your application contains an authorization code. Use this
authorization code to request an access token.
Parameters
redirect_url (str)
The full URL that the Uber server redirected to after
the user authorized your app.
Returns
(Session)
A Session object with OAuth 2.0 credentials.
"""
query_params = self._extract_query(redirect_url)
authorization_code = self._verify_query(query_params)
response = _request_access_token(
grant_type=auth.AUTHORIZATION_CODE_GRANT,
client_id=self.client_id,
client_secret=self.client_secret,
code=authorization_code,
redirect_url=self.redirect_url,
)
oauth2credential = OAuth2Credential.make_from_response(
response=response,
grant_type=auth.AUTHORIZATION_CODE_GRANT,
client_id=self.client_id,
client_secret=self.client_secret,
redirect_url=self.redirect_url,
)
return Session(oauth2credential=oauth2credential)
class ImplicitGrant(OAuth2):
"""Class for Implicit Grant type.
The implicit grant type is used to obtain access tokens and is optimized
for public clients under a particular redirect URI. It does not
refresh access tokens.
Unlike the authorization code grant type, in which the client makes
separate requests for authorization and access token, the client
receives the access token as the result of the authorization request.
"""
def __init__(self, client_id, scopes, redirect_url):
"""Initialize ImplicitGrant Class.
Parameters
client_id (str)
Your app's Client ID.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
redirect_url (str)
The URL that the Uber server will redirect the user to after
finishing authorization. The redirect must be HTTPS-based and
match the URL you registered your application with. Localhost
URLs are permitted and can be either HTTP or HTTPS.
"""
super(ImplicitGrant, self).__init__(client_id, scopes)
self.redirect_url = redirect_url
def get_authorization_url(self):
"""Build URL for authorization request.
Returns
(str)
The fully constructed authorization request URL.
"""
return self._build_authorization_request_url(
response_type=auth.TOKEN_RESPONSE_TYPE,
redirect_url=self.redirect_url,
)
def get_session(self, redirect_url):
"""Create Session to store credentials.
Parameters
redirect_url (str)
The full URL that the Uber server redirected to after
the user authorized your app.
Returns
(Session)
A Session object with OAuth 2.0 credentials.
Raises
UberIllegalState (APIError)
Raised if redirect URL contains an error.
"""
query_params = self._extract_query(redirect_url)
error = query_params.get('error')
if error:
raise UberIllegalState(error)
# convert space delimited string to set
scopes = query_params.get('scope')
scopes_set = {scope for scope in scopes.split()}
oauth2credential = OAuth2Credential(
client_id=self.client_id,
redirect_url=self.redirect_url,
access_token=query_params.get('access_token'),
expires_in_seconds=query_params.get('expires_in'),
scopes=scopes_set,
grant_type=auth.IMPLICIT_GRANT,
)
return Session(oauth2credential=oauth2credential)
class ClientCredentialGrant(OAuth2):
"""Class for Client Credential Grant type.
The client credential grant type is used to request an access token using
only its client credentials.
The client credentials grant type must only be used by confidential
clients or when the client is requesting access to protected resources
under its control.
"""
def __init__(self, client_id, scopes, client_secret):
"""Initialize ClientCredential Class.
Parameters
client_id (str)
Your app's Client ID.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'}) Keep this list minimal so
users feel safe granting your app access to their information.
client_secret (str)
Your app's Client Secret. This must be kept confidential.
"""
super(ClientCredentialGrant, self).__init__(client_id, scopes)
self.client_secret = client_secret
def get_session(self):
"""Create Session to store credentials.
Returns
(Session)
A Session object with OAuth 2.0 credentials.
"""
response = _request_access_token(
grant_type=auth.CLIENT_CREDENTIAL_GRANT,
client_id=self.client_id,
client_secret=self.client_secret,
scopes=self.scopes,
)
oauth2credential = OAuth2Credential.make_from_response(
response=response,
grant_type=auth.CLIENT_CREDENTIAL_GRANT,
client_id=self.client_id,
client_secret=self.client_secret,
)
return Session(oauth2credential=oauth2credential)
def _request_access_token(
grant_type,
client_id=None,
client_secret=None,
scopes=None,
code=None,
redirect_url=None,
refresh_token=None
):
"""Make an HTTP POST to request an access token.
Parameters
grant_type (str)
Either 'client_credientials' (Client Credentials Grant)
or 'authorization_code' (Authorization Code Grant).
client_id (str)
Your app's Client ID.
client_secret (str)
Your app's Client Secret.
scopes (set)
Set of permission scopes to request.
(e.g. {'profile', 'history'})
code (str)
The authorization code to switch for an access token.
Only used in Authorization Code Grant.
redirect_url (str)
The URL that the Uber server will redirect to.
refresh_token (str)
Refresh token used to get a new access token.
Only used for Authorization Code Grant.
Returns
(requests.Response)
Successful HTTP response from a 'POST' to request
an access token.
Raises
ClientError (APIError)
Thrown if there was an HTTP error.
"""
url = build_url(auth.AUTH_HOST, auth.ACCESS_TOKEN_PATH)
if isinstance(scopes, set):
scopes = ' '.join(scopes)
args = {
'grant_type': grant_type,
'client_id': client_id,
'client_secret': client_secret,
'scope': scopes,
'code': code,
'redirect_uri': redirect_url,
'refresh_token': refresh_token,
}
response = post(url=url, data=args)
if response.status_code == codes.ok:
return response
message = 'Failed to request access token: {}.'
message = message.format(response.reason)
raise ClientError(response, message)
def refresh_access_token(credential):
"""Use a refresh token to request a new access token.
Not suported for access tokens obtained via Implicit Grant.
Parameters
credential (OAuth2Credential)
An authorized user's OAuth 2.0 credentials.
Returns
(Session)
A new Session object with refreshed OAuth 2.0 credentials.
Raises
UberIllegalState (APIError)
Raised if OAuth 2.0 grant type does not support
refresh tokens.
"""
if credential.grant_type == auth.AUTHORIZATION_CODE_GRANT:
response = _request_access_token(
grant_type=auth.REFRESH_TOKEN,
client_id=credential.client_id,
client_secret=credential.client_secret,
redirect_url=credential.redirect_url,
refresh_token=credential.refresh_token,
)
oauth2credential = OAuth2Credential.make_from_response(
response=response,
grant_type=credential.grant_type,
client_id=credential.client_id,
client_secret=credential.client_secret,
redirect_url=credential.redirect_url,
)
return Session(oauth2credential=oauth2credential)
elif credential.grant_type == auth.CLIENT_CREDENTIAL_GRANT:
response = _request_access_token(
grant_type=auth.CLIENT_CREDENTIAL_GRANT,
client_id=credential.client_id,
client_secret=credential.client_secret,
scopes=credential.scopes,
)
oauth2credential = OAuth2Credential.make_from_response(
response=response,
grant_type=credential.grant_type,
client_id=credential.client_id,
client_secret=credential.client_secret,
)
return Session(oauth2credential=oauth2credential)
message = '{} Grant Type does not support Refresh Tokens.'
message = message.format(credential.grant_type)
raise UberIllegalState(message)
def revoke_access_token(credential):
"""Revoke an access token.
All future requests with the access token will be invalid.
Parameters
credential (OAuth2Credential)
An authorized user's OAuth 2.0 credentials.
Raises
ClientError (APIError)
Thrown if there was an HTTP error.
"""
url = build_url(auth.AUTH_HOST, auth.REVOKE_PATH)
args = {
'token': credential.access_token,
'client_id': credential.client_id,
'client_secret': credential.client_secret,
}
response = post(url=url, params=args)
if response.status_code == codes.ok:
return
message = 'Failed to revoke access token: {}.'
message = message.format(response.reason)
raise ClientError(response, message)
|
ianmabie/uberpy
|
venv/lib/python2.7/site-packages/uber_rides/auth.py
|
Python
|
mit
| 20,155
|
[
"VisIt"
] |
47c59b7918ec9837e794cd588229643cec57bacd82c4dd509cd74a2b39df2bed
|
# creates: metric.png
import numpy as np
import matplotlib
matplotlib.use('Agg')
import pylab as plt
from math import pi, cos
# Special points in the BZ of a simple cubic cell
G = pi * np.array([0., 0., 0.])
R = pi * np.array([1., 1., 1.])
X = pi * np.array([1., 0., 0.])
M = pi * np.array([1., 1., 0.])
# The path for the band plot
path = [X, G, R, X, M, G]
textpath = [r'$X$', r'$\Gamma$', r'$R$', r'$X$', r'$M$', r'$\Gamma$']
# Make band data
qvec = []
lines = [0]
previous = path[0]
for next in path[1:]:
Npoints = int(round(20 * np.linalg.norm(next - previous)))
lines.append(lines[-1] + Npoints)
for t in np.linspace(0, 1, Npoints):
qvec.append((1 - t) * previous + t * next)
previous = next
vasp = [1 / max(np.linalg.norm(q), 1e-6)**2 for q in qvec]
gpaw = [( 1 + cos(qx) + cos(qy) + cos(qz) +
cos(qx) * cos(qy) + cos(qx) * cos(qz) + cos(qy) * cos(qz) +
cos(qx) * cos(qy) * cos(qz)) / 8. for qx, qy, qz in qvec]
# Plot band data
fig = plt.figure(1, figsize=(5, 3), dpi=90)
fig.subplots_adjust(left=.1, right=.95)
lim = [0, lines[-1], 0, 1.25]
plt.plot(vasp, 'k:', label='VASP')
plt.plot(gpaw, 'k-', label='GPAW')
for q in lines:
plt.plot([q, q], lim[2:], 'k-')
plt.xticks(lines, textpath)
plt.yticks([0, 1], [r'$1$', r'$w+1$'])
plt.axis(lim)
# The pad keyword to legend was deprecated in MPL v. 0.98.4
if matplotlib.__version__ < '0.98.4':
kwpad = {'pad': 0.1, 'axespad': 0.06}
else:
kwpad = {'borderpad': 0.2, 'borderaxespad': 0.06}
plt.legend(loc='upper right', **kwpad)
plt.title('Special metric for density changes')
plt.savefig('metric.png', dpi=90)
#plt.show()
|
qsnake/gpaw
|
doc/documentation/densitymix/metric.py
|
Python
|
gpl-3.0
| 1,634
|
[
"GPAW",
"VASP"
] |
0450756255f19ff92e8aa2fe52f08b1ab36f1df298e840d9de56fa4cf3b50bee
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import base64
import json
import logging
from google.appengine.api import users
from google.appengine.ext import ndb, deferred
from framework.consts import get_base_url
from framework.plugin_loader import get_config
from framework.utils import now
from mcfw.consts import DEBUG, MISSING
from mcfw.exceptions import HttpBadRequestException
from mcfw.properties import object_factory
from mcfw.rpc import returns, arguments
from plugins.rogerthat_api.api import messaging, system
from plugins.rogerthat_api.to import UserDetailsTO, MemberTO
from plugins.rogerthat_api.to.messaging import AttachmentTO, Message
from plugins.rogerthat_api.to.messaging.flow import FLOW_STEP_MAPPING
from plugins.rogerthat_api.to.messaging.forms import SignTO, SignFormTO, FormTO, SignWidgetResultTO
from plugins.rogerthat_api.to.messaging.service_callback_results import TYPE_FLOW, FlowCallbackResultTypeTO, \
FlowMemberResultCallbackResultTO
from plugins.tff_backend.bizz import get_tf_token_api_key, get_grid_api_key
from plugins.tff_backend.bizz.agreements import create_hosting_agreement_pdf
from plugins.tff_backend.bizz.email import send_emails_to_support
from plugins.tff_backend.bizz.gcs import upload_to_gcs
from plugins.tff_backend.bizz.intercom_helpers import tag_intercom_users, IntercomTags
from plugins.tff_backend.bizz.iyo.utils import get_username
from plugins.tff_backend.bizz.messages import send_message_and_email
from plugins.tff_backend.bizz.nodes.stats import assign_nodes_to_user
from plugins.tff_backend.bizz.odoo import create_odoo_quotation, update_odoo_quotation, QuotationState, \
confirm_odoo_quotation, get_nodes_from_odoo
from plugins.tff_backend.bizz.rogerthat import put_user_data, create_error_message
from plugins.tff_backend.bizz.todo import update_hoster_progress
from plugins.tff_backend.bizz.todo.hoster import HosterSteps
from plugins.tff_backend.bizz.user import get_tff_profile
from plugins.tff_backend.configuration import TffConfiguration
from plugins.tff_backend.consts.hoster import REQUIRED_TOKEN_COUNT_TO_HOST
from plugins.tff_backend.dal.node_orders import get_node_order
from plugins.tff_backend.exceptions.hoster import OrderAlreadyExistsException, InvalidContentTypeException
from plugins.tff_backend.models.hoster import NodeOrder, NodeOrderStatus, ContactInfo
from plugins.tff_backend.models.investor import InvestmentAgreement
from plugins.tff_backend.plugin_consts import KEY_NAME, KEY_ALGORITHM, NAMESPACE, FLOW_HOSTER_SIGNATURE_RECEIVED, \
FLOW_SIGN_HOSTING_AGREEMENT
from plugins.tff_backend.to.nodes import NodeOrderTO, CreateNodeOrderTO
from plugins.tff_backend.utils import get_step_value, get_step
from plugins.tff_backend.utils.app import create_app_user_by_email, get_app_user_tuple
@returns()
@arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)],
end_id=unicode, end_message_flow_id=unicode, parent_message_key=unicode, tag=unicode, result_key=unicode,
flush_id=unicode, flush_message_flow_id=unicode, service_identity=unicode, user_details=UserDetailsTO,
flow_params=unicode)
def order_node(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag, result_key,
flush_id, flush_message_flow_id, service_identity, user_details, flow_params):
order_key = NodeOrder.create_key()
deferred.defer(_order_node, order_key, user_details.email, user_details.app_id, steps)
def _order_node(order_key, user_email, app_id, steps):
logging.info('Receiving order of Zero-Node')
app_user = create_app_user_by_email(user_email, app_id)
overview_step = get_step(steps, 'message_overview')
if overview_step and overview_step.answer_id == u"button_use":
api_key = get_grid_api_key()
user_data_keys = ['name', 'email', 'phone', 'billing_address', 'address', 'shipping_name', 'shipping_email',
'shipping_phone', 'shipping_address']
user_data = system.get_user_data(api_key, user_email, app_id, user_data_keys)
billing_info = ContactInfo(name=user_data['name'],
email=user_data['email'],
phone=user_data['phone'],
address=user_data['billing_address'] or user_data['address'])
if user_data['shipping_name']:
shipping_info = ContactInfo(name=user_data['shipping_name'],
email=user_data['shipping_email'],
phone=user_data['shipping_phone'],
address=user_data['shipping_address'])
else:
shipping_info = billing_info
updated_user_data = None
else:
name = get_step_value(steps, 'message_name')
email = get_step_value(steps, 'message_email')
phone = get_step_value(steps, 'message_phone')
billing_address = get_step_value(steps, 'message_billing_address')
updated_user_data = {
'name': name,
'email': email,
'phone': phone,
'billing_address': billing_address,
}
billing_info = ContactInfo(name=name,
email=email,
phone=phone,
address=billing_address)
same_shipping_info_step = get_step(steps, 'message_choose_shipping_info')
if same_shipping_info_step and same_shipping_info_step.answer_id == u"button_yes":
shipping_info = billing_info
else:
shipping_name = get_step_value(steps, 'message_shipping_name')
shipping_email = get_step_value(steps, 'message_shipping_email')
shipping_phone = get_step_value(steps, 'message_shipping_phone')
shipping_address = get_step_value(steps, 'message_shipping_address')
updated_user_data.update({
'shipping_name': shipping_name,
'shipping_email': shipping_email,
'shipping_phone': shipping_phone,
'shipping_address': shipping_address,
})
shipping_info = ContactInfo(name=shipping_name,
email=shipping_email,
phone=shipping_phone,
address=shipping_address)
socket_step = get_step(steps, 'message_socket')
socket = socket_step and socket_step.answer_id.replace('button_', '')
# Only one node is allowed per user, and one per location
username = get_username(app_user)
if NodeOrder.has_order_for_user_or_location(username, billing_info.address) and not DEBUG:
logging.info('User already has a node order, sending abort message')
msg = u'Dear ThreeFold Member, we sadly cannot grant your request to host an additional ThreeFold Node:' \
u' We are currently only allowing one Node to be hosted per ThreeFold Member and location.' \
u' This will allow us to build a bigger base and a more diverse Grid.'
subject = u'Your ThreeFold Node request'
send_message_and_email(app_user, msg, subject, get_grid_api_key())
return
# Check if user has invested >= 120 tokens
paid_orders = InvestmentAgreement.list_by_status_and_user(username, InvestmentAgreement.STATUS_PAID)
total_tokens = sum([o.token_count_float for o in paid_orders])
can_host = total_tokens >= REQUIRED_TOKEN_COUNT_TO_HOST
def trans():
logging.debug('Storing order in the database')
order = NodeOrder(key=order_key,
username=username,
tos_iyo_see_id=None,
billing_info=billing_info,
shipping_info=shipping_info,
order_time=now(),
status=NodeOrderStatus.APPROVED if can_host else NodeOrderStatus.WAITING_APPROVAL,
socket=socket)
order.put()
if can_host:
logging.info('User has invested more than %s tokens, immediately creating node order PDF.',
REQUIRED_TOKEN_COUNT_TO_HOST)
deferred.defer(_create_node_order_pdf, order_key.id(), app_user, _transactional=True)
else:
logging.info('User has not invested more than %s tokens, an admin needs to approve this order manually.',
REQUIRED_TOKEN_COUNT_TO_HOST)
deferred.defer(_inform_support_of_new_node_order, order_key.id(), _transactional=True)
deferred.defer(set_hoster_status_in_user_data, app_user, False, _transactional=True)
if updated_user_data:
deferred.defer(put_user_data, get_tf_token_api_key(), user_email, app_id, updated_user_data,
_transactional=True)
ndb.transaction(trans)
def _create_node_order_pdf(node_order_id, app_user):
node_order = get_node_order(node_order_id)
user_email, app_id = get_app_user_tuple(app_user)
logging.debug('Creating Hosting agreement')
pdf_name = NodeOrder.filename(node_order_id)
pdf_contents = create_hosting_agreement_pdf(node_order.billing_info.name, node_order.billing_info.address)
pdf_size = len(pdf_contents)
pdf_url = upload_to_gcs(pdf_name, pdf_contents, 'application/pdf')
deferred.defer(_order_node_iyo_see, app_user, node_order_id, pdf_url, pdf_size)
deferred.defer(update_hoster_progress, user_email.email(), app_id, HosterSteps.FLOW_ADDRESS)
def _order_node_iyo_see(app_user, node_order_id, pdf_url, pdf_size, create_quotation=True):
order_id = NodeOrder.create_human_readable_id(node_order_id)
attachment_name = u'Zero-Node order %s - Terms and conditions'.join(order_id)
if create_quotation:
_create_quotation(app_user, node_order_id, pdf_url, attachment_name, pdf_size)
@returns()
@arguments(app_user=users.User, order_id=(int, long), pdf_url=unicode, attachment_name=unicode, pdf_size=(int, long))
def _create_quotation(app_user, order_id, pdf_url, attachment_name, pdf_size):
order = get_node_order(order_id)
config = get_config(NAMESPACE)
assert isinstance(config, TffConfiguration)
product_id = config.odoo.product_ids.get(order.socket)
if not product_id:
logging.warn('Could not find appropriate product for socket %s. Falling back to EU socket.', order.socket)
product_id = config.odoo.product_ids['EU']
odoo_sale_order_id, odoo_sale_order_name = create_odoo_quotation(order.billing_info, order.shipping_info,
product_id)
order.odoo_sale_order_id = odoo_sale_order_id
order.put()
deferred.defer(_send_order_node_sign_message, app_user, order_id, pdf_url, attachment_name,
odoo_sale_order_name, pdf_size)
@returns()
@arguments(order_id=(int, long))
def _cancel_quotation(order_id):
def trans():
node_order = get_node_order(order_id)
if node_order.odoo_sale_order_id:
update_odoo_quotation(node_order.odoo_sale_order_id, {'state': QuotationState.CANCEL.value})
node_order.populate(status=NodeOrderStatus.CANCELED, cancel_time=now())
node_order.put()
ndb.transaction(trans)
@returns()
@arguments(app_user=users.User, order_id=(int, long), pdf_url=unicode, attachment_name=unicode, order_name=unicode,
pdf_size=(int, long))
def _send_order_node_sign_message(app_user, order_id, pdf_url, attachment_name, order_name, pdf_size):
logging.debug('Sending SIGN widget to app user')
widget = SignTO(algorithm=KEY_ALGORITHM,
key_name=KEY_NAME,
payload=base64.b64encode(pdf_url).decode('utf-8'))
form = SignFormTO(positive_button_ui_flags=Message.UI_FLAG_EXPECT_NEXT_WAIT_5,
widget=widget)
attachment = AttachmentTO(content_type=u'application/pdf',
download_url=pdf_url,
name=attachment_name,
size=pdf_size)
member_user, app_id = get_app_user_tuple(app_user)
members = [MemberTO(member=member_user.email(), app_id=app_id, alert_flags=0)]
tag = json.dumps({
u'__rt__.tag': u'sign_order_node_tos',
u'order_id': order_id
}).decode('utf-8')
flow_params = json.dumps({
'order_name': order_name,
'form': form.to_dict(),
'attachments': [attachment.to_dict()]
})
messaging.start_local_flow(get_tf_token_api_key(), None, members, None, tag=tag, context=None,
flow=FLOW_SIGN_HOSTING_AGREEMENT, flow_params=flow_params)
@returns(FlowMemberResultCallbackResultTO)
@arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)],
end_id=unicode, end_message_flow_id=unicode, parent_message_key=unicode, tag=unicode, result_key=unicode,
flush_id=unicode, flush_message_flow_id=unicode, service_identity=unicode, user_details=UserDetailsTO,
flow_params=unicode)
def order_node_signed(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag,
result_key, flush_id, flush_message_flow_id, service_identity, user_details, flow_params):
try:
user_detail = user_details
tag_dict = json.loads(tag)
order = get_node_order(tag_dict['order_id'])
last_step = steps[-1]
if last_step.answer_id != FormTO.POSITIVE:
logging.info('Zero-Node order was canceled')
deferred.defer(_cancel_quotation, order.id)
return None
logging.info('Received signature for Zero-Node order')
sign_result = last_step.form_result.result.get_value()
assert isinstance(sign_result, SignWidgetResultTO)
iyo_username = get_username(user_detail)
logging.debug('Storing signature in DB')
order.populate(status=NodeOrderStatus.SIGNED,
signature=sign_result.payload_signature,
sign_time=now())
order.put()
# TODO: send mail to TF support
deferred.defer(update_hoster_progress, user_detail.email, user_detail.app_id, HosterSteps.FLOW_SIGN)
intercom_tags = get_intercom_tags_for_node_order(order)
for intercom_tag in intercom_tags:
deferred.defer(tag_intercom_users, intercom_tag, [iyo_username])
logging.debug('Sending confirmation message')
result = FlowCallbackResultTypeTO(flow=FLOW_HOSTER_SIGNATURE_RECEIVED,
tag=None,
force_language=None,
flow_params=json.dumps({'orderId': order.human_readable_id}))
return FlowMemberResultCallbackResultTO(type=TYPE_FLOW,
value=result)
except:
logging.exception('An unexpected error occurred')
return create_error_message()
@returns(NodeOrderTO)
@arguments(order_id=(int, long))
def get_node_order_details(order_id):
# type: (long) -> NodeOrderDetailsTO
return NodeOrderTO.from_model(get_node_order(order_id))
def _get_allowed_status(current_status):
# type: (long) -> list[long]
next_statuses = {
NodeOrderStatus.CANCELED: [],
NodeOrderStatus.WAITING_APPROVAL: [NodeOrderStatus.CANCELED, NodeOrderStatus.APPROVED],
NodeOrderStatus.APPROVED: [NodeOrderStatus.CANCELED, NodeOrderStatus.SIGNED],
NodeOrderStatus.SIGNED: [NodeOrderStatus.CANCELED, NodeOrderStatus.PAID],
NodeOrderStatus.PAID: [NodeOrderStatus.SENT],
NodeOrderStatus.SENT: [],
NodeOrderStatus.ARRIVED: [],
}
return next_statuses.get(current_status)
def _can_change_status(current_status, new_status):
# type: (long, long) -> bool
return new_status in _get_allowed_status(current_status)
@returns(NodeOrder)
@arguments(order_id=(int, long), order=NodeOrderTO)
def put_node_order(order_id, order):
# type: (long, NodeOrderTO) -> NodeOrder
order_model = get_node_order(order_id)
app_user = get_tff_profile(order_model.username).app_user
if order_model.status == NodeOrderStatus.CANCELED:
raise HttpBadRequestException('order_canceled')
if order.status not in (NodeOrderStatus.CANCELED, NodeOrderStatus.SENT, NodeOrderStatus.APPROVED,
NodeOrderStatus.PAID):
raise HttpBadRequestException('invalid_status')
# Only support updating the status for now
if order_model.status != order.status:
if not _can_change_status(order_model.status, order.status):
raise HttpBadRequestException('cannot_change_status',
{'from': order_model.status, 'to': order.status,
'allowed_new_statuses': _get_allowed_status(order_model.status)})
order_model.status = order.status
human_user, app_id = get_app_user_tuple(app_user)
if order_model.status == NodeOrderStatus.CANCELED:
order_model.cancel_time = now()
if order_model.odoo_sale_order_id:
deferred.defer(update_odoo_quotation, order_model.odoo_sale_order_id,
{'state': QuotationState.CANCEL.value})
deferred.defer(update_hoster_progress, human_user.email(), app_id,
HosterSteps.NODE_POWERED) # nuke todo list
deferred.defer(set_hoster_status_in_user_data, app_user, _countdown=2)
elif order_model.status == NodeOrderStatus.SENT:
if not order_model.odoo_sale_order_id or not get_nodes_from_odoo(order_model.odoo_sale_order_id):
raise HttpBadRequestException('cannot_mark_sent_no_serial_number_configured_yet',
{'sale_order': order_model.odoo_sale_order_id})
order_model.send_time = now()
deferred.defer(update_hoster_progress, human_user.email(), app_id, HosterSteps.NODE_SENT)
deferred.defer(_send_node_order_sent_message, order_id)
elif order_model.status == NodeOrderStatus.APPROVED:
deferred.defer(_create_node_order_pdf, order_id, app_user)
elif order_model.status == NodeOrderStatus.PAID:
deferred.defer(confirm_odoo_quotation, order_model.odoo_sale_order_id)
else:
logging.debug('Status was already %s, not doing anything', order_model.status)
order_model.put()
return order_model
def _inform_support_of_new_node_order(node_order_id):
node_order = get_node_order(node_order_id)
subject = 'New Node Order by %s' % node_order.billing_info.name
body = """Hello,
We just received a new Node order from %(name)s (IYO username %(iyo_username)s) with id %(node_order_id)s.
This order needs to be manually approved since this user has not invested more than %(tokens)s tokens yet via the app.
Check the old purchase agreements to verify if this user can sign up as a hoster and if not, contact him.
Please visit %(base_url)s/orders/%(node_order_id)s to approve or cancel this order.
""" % {
'name': node_order.billing_info.name,
'iyo_username': node_order.username,
'base_url': get_base_url(),
'node_order_id': node_order.id,
'tokens': REQUIRED_TOKEN_COUNT_TO_HOST
}
send_emails_to_support(subject, body)
def _send_node_order_sent_message(node_order_id):
node_order = get_node_order(node_order_id)
app_user = get_tff_profile(node_order.username).app_user
subject = u'ThreeFold node ready to ship out'
msg = u'Good news, your ThreeFold node (order id %s) has been prepared for shipment.' \
u' It will be handed over to our shipping partner soon.' \
u'\nThanks again for accepting hosting duties and helping to grow the ThreeFold Grid close to the users.' % \
node_order_id
send_message_and_email(app_user, msg, subject, get_grid_api_key())
def get_intercom_tags_for_node_order(order):
# type: (NodeOrder) -> list[IntercomTags]
if order.status in [NodeOrderStatus.ARRIVED, NodeOrderStatus.SENT, NodeOrderStatus.SIGNED, NodeOrderStatus.PAID]:
return [IntercomTags.HOSTER]
return []
def set_hoster_status_in_user_data(app_user, can_order=None):
# type: (users.User, bool) -> None
username = get_username(app_user)
if not isinstance(can_order, bool):
can_order = all(o.status == NodeOrderStatus.CANCELED for o in NodeOrder.list_by_user(username))
user_data = {
'hoster': {
'can_order': can_order
}
}
api_key = get_grid_api_key()
email, app_id = get_app_user_tuple(app_user)
current_user_data = system.get_user_data(api_key, email.email(), app_id, ['hoster'])
if current_user_data != user_data:
put_user_data(api_key, email.email(), app_id, user_data)
@returns(NodeOrder)
@arguments(data=CreateNodeOrderTO)
def create_node_order(data):
# type: (CreateNodeOrderTO) -> NodeOrder
profile = get_tff_profile(data.username)
if data.status not in (NodeOrderStatus.SIGNED, NodeOrderStatus.SENT, NodeOrderStatus.ARRIVED, NodeOrderStatus.PAID):
data.sign_time = MISSING
if data.status not in (NodeOrderStatus.SENT, NodeOrderStatus.ARRIVED):
data.send_time = MISSING
order_count = NodeOrder.list_by_so(data.odoo_sale_order_id).count()
if order_count > 0:
raise OrderAlreadyExistsException(data.odoo_sale_order_id)
try:
nodes = get_nodes_from_odoo(data.odoo_sale_order_id)
except (IndexError, TypeError):
logging.warn('Could not get nodes from odoo for order id %s' % data.odoo_sale_order_id, exc_info=True)
raise HttpBadRequestException('cannot_find_so_x', {'id': data.odoo_sale_order_id})
if not nodes:
raise HttpBadRequestException('no_serial_number_configured_yet',
{'sale_order': data.odoo_sale_order_id})
prefix, doc_content_base64 = data.document.split(',')
content_type = prefix.split(';')[0].replace('data:', '')
if content_type != 'application/pdf':
raise InvalidContentTypeException(content_type, ['application/pdf'])
doc_content = base64.b64decode(doc_content_base64)
order_key = NodeOrder.create_key()
pdf_name = NodeOrder.filename(order_key.id())
pdf_url = upload_to_gcs(pdf_name, doc_content, content_type)
order = NodeOrder(key=order_key,
**data.to_dict(exclude=['document']))
order.put()
deferred.defer(assign_nodes_to_user, order.username, nodes)
deferred.defer(set_hoster_status_in_user_data, profile.app_user, False)
deferred.defer(tag_intercom_users, IntercomTags.HOSTER, [order.username])
deferred.defer(_order_node_iyo_see, profile.app_user, order.id, pdf_url, len(doc_content), create_quotation=False)
return order
|
threefoldfoundation/app_backend
|
plugins/tff_backend/bizz/nodes/hoster.py
|
Python
|
bsd-3-clause
| 23,717
|
[
"VisIt"
] |
85dec08b240cdfa7684d22b55dfcc9d2b2855469a3d4d3e4644c410af7ebdfcb
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkImageQuantizeRGBToIndex(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkImageQuantizeRGBToIndex(), 'Processing.',
('vtkImageData',), ('vtkImageData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkImageQuantizeRGBToIndex.py
|
Python
|
bsd-3-clause
| 509
|
[
"VTK"
] |
2df5ad4ce14384350460c2a90706b83fbf620ebcb781605c47a6c54640b41ca9
|
# coding: utf-8
from datetime import datetime
from django.core import mail
from django.core.urlresolvers import reverse
from django.test import TestCase
import os.path
from Simplan import settings
from Simplan.account.factories import UserFactory
from Simplan.event.factories import EventGuestFactory, EventUserFactory, \
OptionTimeFactory, OptionFreeFactory, ChoiceFactory
from Simplan.event.models import EventGuest, Event, OptionFree, OptionTime, \
EventUser, Option, Choice
class EventGuestViewsTests(TestCase):
def setUp(self):
self.url = reverse('Simplan.event.views.new_event')
self.data_guest_event = {'title': u'Réunion de département',
'description': u'Réunion mensuelle du département de la chambre de commerce',
'place': 'Paris, France',
'author': 'stoffelen',
'email': 'stoffelen@yahoo.com'}
self.data_user_event = {'title': u'Réunion de département',
'description': u'Réunion mensuelle du département de la chambre de commerce',
'place': 'Paris, France'}
self.data_free_option = {'text': u'Salle des Fleurs',
'image': open(settings.SITE_ROOT + '/fixtures/fleur.jpg', 'r')}
self.data_free_huge_option = {'text': u'Salle des Oiseaux',
'image': open(settings.SITE_ROOT + '/fixtures/birds.jpg', 'r')}
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
def test_create_guest_event(self):
'''
Check create an event by guest
'''
res = self.client.post(self.url, self.data_guest_event,
follow=False)
# check if post has been executed
self.assertEqual(res.status_code, 302)
# check that event is in the EventGuest table
value = EventGuest.objects.get(pk=1)
self.assertEquals(value.title, u'Réunion de département')
self.assertEquals(value.description, u'Réunion mensuelle du département de la chambre de commerce')
self.assertEquals(value.place, 'Paris, France')
self.assertEquals(value.author, 'stoffelen')
self.assertEquals(value.email, 'stoffelen@yahoo.com')
# check that event is in the Event table
value = Event.objects.get(pk=1)
self.assertEquals(value.title, u'Réunion de département')
self.assertEquals(value.description, u'Réunion mensuelle du département de la chambre de commerce')
self.assertEquals(value.place, 'Paris, France')
def test_create_user_event(self):
'''
Check create an event by a user
'''
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
res = self.client.post(self.url, self.data_user_event, follow=False)
# check if post has been executed
self.assertEqual(res.status_code, 302)
# check that event is in the EventGuest table
value = EventUser.objects.get(pk=1)
self.assertEquals(value.title, u'Réunion de département')
self.assertEquals(value.description, u'Réunion mensuelle du département de la chambre de commerce')
self.assertEquals(value.place, 'Paris, France')
self.assertEquals(value.author, user)
# check that event is in the Event table
value = Event.objects.get(pk=1)
self.assertEquals(value.title, u'Réunion de département')
self.assertEquals(value.description, u'Réunion mensuelle du département de la chambre de commerce')
self.assertEquals(value.place, 'Paris, France')
def test_add_free_option_for_guest(self):
'''
Check a free option which attributes are prerequisites, can be added to a guest event
'''
egf = EventGuestFactory()
res = self.client.post(reverse('Simplan.event.views.new_option', args=[egf.slug]) + '?type=free',
self.data_free_option,
follow=False)
# check if post has been executed
self.assertEqual(res.status_code, 302)
res2 = self.client.post(reverse('Simplan.event.views.new_option', args=[egf.slug]) + '?type=free',
{'text': u'Salle des Chiens'},
follow=False)
self.assertEqual(res2.status_code, 302)
# add huge image
res3 = self.client.post(reverse('Simplan.event.views.new_option', args=[egf.slug]) + '?type=free',
self.data_free_huge_option,
follow=False)
self.assertEqual(res3.status_code, 404)
# add time option
otf = OptionTimeFactory(event=egf, position=3)
res4 = self.client.post(reverse('Simplan.event.views.new_option', args=[egf.slug]) + '?type=free',
{'text': u'Salle des Ours'},
follow=False)
self.assertEqual(res4.status_code, 302)
# check that row is insert
self.assertEquals(OptionFree.objects.count(), 3)
self.assertEquals(Option.objects.count(), 4)
value1 = OptionFree.objects.get(pk=1)
value2 = OptionFree.objects.get(pk=2)
value3 = OptionFree.objects.get(pk=4)
# check position
self.assertEquals(value1.position, 1)
self.assertEquals(value2.position, 2)
self.assertEquals(value3.position, 4)
# check that image has been upload
self.assertEqual(os.path.isfile(value1.image.path), True)
# delete temp image
os.remove(value1.image.path)
# check default choice number
self.assertEquals(Choice.objects.filter(option__pk=value1.pk).count(), len(settings.SETTING_CHOICES[0]))
self.assertEquals(Choice.objects.filter(option__pk=value2.pk).count(), len(settings.SETTING_CHOICES[0]))
self.assertEquals(Choice.objects.filter(option__pk=value3.pk).count(), len(settings.SETTING_CHOICES[0]))
def test_add_free_option_for_user(self):
'''
Check a free option which attributes are prerequisites, can be added to a user event
'''
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
res = self.client.post(reverse('Simplan.event.views.new_option', args=[euf.slug]) + '?type=free',
self.data_free_option,
follow=False)
# check if post has been executed
self.assertEqual(res.status_code, 302)
res2 = self.client.post(reverse('Simplan.event.views.new_option', args=[euf.slug]) + '?type=free',
{'text': u'Salle des Chiens'},
follow=False)
self.assertEqual(res2.status_code, 302)
# add huge image
res3 = self.client.post(reverse('Simplan.event.views.new_option', args=[euf.slug]) + '?type=free',
self.data_free_huge_option,
follow=False)
self.assertEqual(res3.status_code, 404)
# add time option
otf = OptionTimeFactory(event=euf, position=3)
res4 = self.client.post(reverse('Simplan.event.views.new_option', args=[euf.slug]) + '?type=free',
{'text': u'Salle des Ours'},
follow=False)
self.assertEqual(res4.status_code, 302)
# check that row is insert
self.assertEquals(OptionFree.objects.count(), 3)
self.assertEquals(Option.objects.count(), 4)
value1 = OptionFree.objects.get(pk=1)
value2 = OptionFree.objects.get(pk=2)
value3 = OptionFree.objects.get(pk=4)
# check position
self.assertEquals(value1.position, 1)
self.assertEquals(value2.position, 2)
self.assertEquals(value3.position, 4)
# check that image has been upload
self.assertEqual(os.path.isfile(value1.image.path), True)
# delete temp image
os.remove(value1.image.path)
self.client.logout()
# check default choice number
self.assertEquals(Choice.objects.filter(option__pk=value1.pk).count(), len(settings.SETTING_CHOICES[0]))
self.assertEquals(Choice.objects.filter(option__pk=value2.pk).count(), len(settings.SETTING_CHOICES[0]))
self.assertEquals(Choice.objects.filter(option__pk=value3.pk).count(), len(settings.SETTING_CHOICES[0]))
def test_add_time_option_for_guest(self):
egf = EventGuestFactory()
res = self.client.post(reverse('Simplan.event.views.new_option', args=[egf.slug]) + '?type=time',
{'date': datetime.now()},
follow=False)
self.assertEqual(res.status_code, 302)
res2 = self.client.post(reverse('Simplan.event.views.new_option', args=[egf.slug]) + '?type=time',
{'date': datetime.now()},
follow=False)
self.assertEqual(res.status_code, 302)
# add time option
off = OptionFreeFactory(event=egf, position=3)
res3 = self.client.post(reverse('Simplan.event.views.new_option', args=[egf.slug]) + '?type=time',
{'date': datetime.now()},
follow=False)
self.assertEqual(res.status_code, 302)
# check that an option has been insert
self.assertEquals(OptionTime.objects.count(), 3)
self.assertEquals(Option.objects.count(), 4)
value1 = OptionTime.objects.get(pk=1)
value2 = OptionTime.objects.get(pk=2)
value3 = OptionTime.objects.get(pk=4)
# check position
self.assertEquals(value1.position, 1)
self.assertEquals(value2.position, 2)
self.assertEquals(value3.position, 4)
# check default choice number
self.assertEquals(Choice.objects.filter(option__pk=value1.pk).count(), len(settings.SETTING_CHOICES[0]))
self.assertEquals(Choice.objects.filter(option__pk=value2.pk).count(), len(settings.SETTING_CHOICES[0]))
self.assertEquals(Choice.objects.filter(option__pk=value3.pk).count(), len(settings.SETTING_CHOICES[0]))
def test_add_time_option_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
res = self.client.post(reverse('Simplan.event.views.new_option', args=[euf.slug]) + '?type=time',
{'date': datetime.now()},
follow=False)
self.assertEqual(res.status_code, 302)
res2 = self.client.post(reverse('Simplan.event.views.new_option', args=[euf.slug]),
{'date': datetime.now()},
follow=False)
self.assertEqual(res.status_code, 302)
# add time option
off = OptionFreeFactory(event=euf, position=3)
res3 = self.client.post(reverse('Simplan.event.views.new_option', args=[euf.slug]) + '?type=time',
{'date': datetime.now()},
follow=False)
self.assertEqual(res.status_code, 302)
# check that an option has been insert
self.assertEquals(OptionTime.objects.count(), 3)
self.assertEquals(Option.objects.count(), 4)
value1 = OptionTime.objects.get(pk=1)
value2 = OptionTime.objects.get(pk=2)
value3 = OptionTime.objects.get(pk=4)
# check position
self.assertEquals(value1.position, 1)
self.assertEquals(value2.position, 2)
self.assertEquals(value3.position, 4)
# check default choice number
self.assertEquals(Choice.objects.filter(option__pk=value1.pk).count(), len(settings.SETTING_CHOICES[0]))
self.assertEquals(Choice.objects.filter(option__pk=value2.pk).count(), len(settings.SETTING_CHOICES[0]))
self.assertEquals(Choice.objects.filter(option__pk=value3.pk).count(), len(settings.SETTING_CHOICES[0]))
def test_edit_time_option_for_guest(self):
egf = EventGuestFactory()
otf1 = OptionTimeFactory(event=egf, position=1)
otf2 = OptionTimeFactory(event=egf, position=2)
otf3 = OptionTimeFactory(event=egf, position=3)
res = self.client.post(reverse('Simplan.event.views.edit_option', args=[otf2.pk]) + '?type=time',
{'date': datetime(2015, 1, 1, 0, 0)},
follow=False)
self.assertEqual(res.status_code, 302)
# check that no option has been insert
self.assertEquals(OptionTime.objects.count(), 3)
value1 = OptionTime.objects.get(pk=1)
value2 = OptionTime.objects.get(pk=2)
value3 = OptionTime.objects.get(pk=3)
# check that option has been update
self.assertEquals(value2.start_date, datetime(2015, 1, 1, 0, 0))
# check position
self.assertEquals(value1.position, 1)
self.assertEquals(value2.position, 2)
self.assertEquals(value3.position, 3)
def test_edit_time_option_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
otf1 = OptionTimeFactory(event=euf, position=1)
otf2 = OptionTimeFactory(event=euf, position=2)
otf3 = OptionTimeFactory(event=euf, position=3)
res = self.client.post(reverse('Simplan.event.views.edit_option', args=[otf2.pk]) + '?type=time',
{'date': datetime(2015, 1, 1, 0, 0)},
follow=False)
self.assertEqual(res.status_code, 302)
# check that no option has been insert
self.assertEquals(OptionTime.objects.count(), 3)
value1 = OptionTime.objects.get(pk=1)
value2 = OptionTime.objects.get(pk=2)
value3 = OptionTime.objects.get(pk=3)
# check that option has been update
self.assertEquals(value2.start_date, datetime(2015, 1, 1, 0, 0))
# check position
self.assertEquals(value1.position, 1)
self.assertEquals(value2.position, 2)
self.assertEquals(value3.position, 3)
def test_edit_free_option_for_guest(self):
egf = EventGuestFactory()
off1 = OptionFreeFactory(event=egf, position=1)
off2 = OptionFreeFactory(event=egf, position=2)
off3 = OptionFreeFactory(event=egf, position=3)
res = self.client.post(reverse('Simplan.event.views.edit_option', args=[off2.pk]) + '?type=free',
{'text': u'Toilettes'},
follow=False)
self.assertEqual(res.status_code, 302)
# check that no option has been insert
self.assertEquals(OptionFree.objects.count(), 3)
value1 = OptionFree.objects.get(pk=1)
value2 = OptionFree.objects.get(pk=2)
value3 = OptionFree.objects.get(pk=3)
# check that option has been update
self.assertEquals(value2.text, u'Toilettes')
# check position
self.assertEquals(value1.position, 1)
self.assertEquals(value2.position, 2)
self.assertEquals(value3.position, 3)
def test_edit_free_option_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
off1 = OptionFreeFactory(event=euf, position=1)
off2 = OptionFreeFactory(event=euf, position=2)
off3 = OptionFreeFactory(event=euf, position=3)
res = self.client.post(reverse('Simplan.event.views.edit_option', args=[off2.pk]) + '?type=free',
{'text': u'Toilettes'},
follow=False)
self.assertEqual(res.status_code, 302)
# check that no option has been insert
self.assertEquals(OptionFree.objects.count(), 3)
value1 = OptionFree.objects.get(pk=1)
value2 = OptionFree.objects.get(pk=2)
value3 = OptionFree.objects.get(pk=3)
# check that option has been update
self.assertEquals(value2.text, u'Toilettes')
# check position
self.assertEquals(value1.position, 1)
self.assertEquals(value2.position, 2)
self.assertEquals(value3.position, 3)
def test_delete_time_option_for_guest(self):
egf = EventGuestFactory()
otf1 = OptionTimeFactory(event=egf, position=1)
otf2 = OptionTimeFactory(event=egf, position=2)
off1 = OptionFreeFactory(event=egf, position=3)
otf3 = OptionTimeFactory(event=egf, position=4)
res = self.client.get(reverse('Simplan.event.views.del_option', args=[otf2.pk]) + '?type=time',
follow=False)
self.assertEqual(res.status_code, 302)
# check that option has been deleted
self.assertEquals(OptionTime.objects.count(), 2)
self.assertEquals(OptionFree.objects.count(), 1)
self.assertEquals(Option.objects.count(), 3)
# check position
self.assertEquals(Option.objects.get(pk=1).position, 1)
self.assertEquals(Option.objects.get(pk=3).position, 3)
self.assertEquals(Option.objects.get(pk=4).position, 4)
# try to remove an inexistant option
res = self.client.get(reverse('Simplan.event.views.del_option', args=[otf2.pk]) + '?type=time',
follow=False)
self.assertEqual(res.status_code, 404)
def test_delete_time_option_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
otf1 = OptionTimeFactory(event=euf, position=1)
otf2 = OptionTimeFactory(event=euf, position=2)
off1 = OptionFreeFactory(event=euf, position=3)
otf3 = OptionTimeFactory(event=euf, position=4)
res = self.client.get(reverse('Simplan.event.views.del_option', args=[otf2.pk]),
follow=False)
self.assertEqual(res.status_code, 302)
# check that option has been deleted
self.assertEquals(OptionTime.objects.count(), 2)
self.assertEquals(OptionFree.objects.count(), 1)
self.assertEquals(Option.objects.count(), 3)
# check position
self.assertEquals(Option.objects.get(pk=1).position, 1)
self.assertEquals(Option.objects.get(pk=3).position, 3)
self.assertEquals(Option.objects.get(pk=4).position, 4)
# try to remove an inexistant option
res = self.client.get(reverse('Simplan.event.views.del_option', args=[otf2.pk]),
follow=False)
self.assertEqual(res.status_code, 404)
def test_delete_free_option_for_guest(self):
egf = EventGuestFactory()
otf1 = OptionFreeFactory(event=egf, position=1)
otf2 = OptionFreeFactory(event=egf, position=2)
off1 = OptionTimeFactory(event=egf, position=3)
otf3 = OptionFreeFactory(event=egf, position=4)
res = self.client.get(reverse('Simplan.event.views.del_option', args=[otf2.pk]),
follow=False)
self.assertEqual(res.status_code, 302)
# check that option has been deleted
self.assertEquals(OptionTime.objects.count(), 1)
self.assertEquals(OptionFree.objects.count(), 2)
self.assertEquals(Option.objects.count(), 3)
# check position
self.assertEquals(Option.objects.get(pk=1).position, 1)
self.assertEquals(Option.objects.get(pk=3).position, 3)
self.assertEquals(Option.objects.get(pk=4).position, 4)
# try to remove an inexistant option
res = self.client.get(reverse('Simplan.event.views.del_option', args=[otf2.pk]),
follow=False)
self.assertEqual(res.status_code, 404)
def test_delete_free_option_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
otf1 = OptionFreeFactory(event=euf, position=1)
otf2 = OptionFreeFactory(event=euf, position=2)
off1 = OptionTimeFactory(event=euf, position=3)
otf3 = OptionFreeFactory(event=euf, position=4)
res = self.client.get(reverse('Simplan.event.views.del_option', args=[otf2.pk]),
follow=False)
self.assertEqual(res.status_code, 302)
# check that option has been deleted
self.assertEquals(OptionTime.objects.count(), 1)
self.assertEquals(OptionFree.objects.count(), 2)
self.assertEquals(Option.objects.count(), 3)
# check position
self.assertEquals(Option.objects.get(pk=1).position, 1)
self.assertEquals(Option.objects.get(pk=3).position, 3)
self.assertEquals(Option.objects.get(pk=4).position, 4)
# try to remove an inexistant option
res = self.client.get(reverse('Simplan.event.views.del_option', args=[otf2.pk]),
follow=False)
self.assertEqual(res.status_code, 404)
def test_down_free_option_for_guest(self):
egf = EventGuestFactory()
otf1 = OptionTimeFactory(event=egf, position=1)
otf2 = OptionFreeFactory(event=egf, position=2)
otf3 = OptionTimeFactory(event=egf, position=5)
otf4 = OptionTimeFactory(event=egf, position=7)
old2 = otf2.position
old3 = otf3.position
res = self.client.get(reverse('Simplan.event.views.down_option', args=[otf2.pk]), follow=False)
self.assertEqual(res.status_code, 302)
# check that option has been deleted
self.assertEquals(OptionTime.objects.count(), 3)
value2 = OptionFree.objects.get(pk=otf2.pk)
value3 = OptionTime.objects.get(pk=otf3.pk)
self.assertEquals(value2.position, old3)
self.assertEquals(value3.position, old2)
res = self.client.get(reverse('Simplan.event.views.down_option', args=[otf1.pk]), follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.get(reverse('Simplan.event.views.down_option', args=[otf4.pk]), follow=False)
self.assertEqual(res.status_code, 404)
def test_up_option_for_guest(self):
egf = EventGuestFactory()
otf1 = OptionTimeFactory(event=egf, position=1)
otf2 = OptionFreeFactory(event=egf, position=2)
otf3 = OptionTimeFactory(event=egf, position=5)
otf4 = OptionTimeFactory(event=egf, position=7)
old2 = otf2.position
old3 = otf3.position
res = self.client.get(reverse('Simplan.event.views.up_option', args=[otf3.pk]), follow=False)
self.assertEqual(res.status_code, 302)
# check option's number
self.assertEquals(OptionTime.objects.count(), 3)
value2 = OptionFree.objects.get(pk=otf2.pk)
value3 = OptionTime.objects.get(pk=otf3.pk)
self.assertEquals(value2.position, old3)
self.assertEquals(value3.position, old2)
res = self.client.get(reverse('Simplan.event.views.up_option', args=[otf1.pk]), follow=False)
self.assertEqual(res.status_code, 404)
res = self.client.get(reverse('Simplan.event.views.up_option', args=[otf4.pk]), follow=False)
self.assertEqual(res.status_code, 302)
def test_down_free_option_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
otf1 = OptionTimeFactory(event=euf, position=1)
otf2 = OptionFreeFactory(event=euf, position=2)
otf3 = OptionTimeFactory(event=euf, position=5)
otf4 = OptionTimeFactory(event=euf, position=7)
old2 = otf2.position
old3 = otf3.position
res = self.client.get(reverse('Simplan.event.views.down_option', args=[otf2.pk]), follow=False)
self.assertEqual(res.status_code, 302)
# check option's number
self.assertEquals(OptionTime.objects.count(), 3)
value2 = OptionFree.objects.get(pk=otf2.pk)
value3 = OptionTime.objects.get(pk=otf3.pk)
self.assertEquals(value2.position, old3)
self.assertEquals(value3.position, old2)
res = self.client.get(reverse('Simplan.event.views.down_option', args=[otf1.pk]), follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.get(reverse('Simplan.event.views.down_option', args=[otf4.pk]), follow=False)
self.assertEqual(res.status_code, 404)
def test_up_option_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
otf1 = OptionTimeFactory(event=euf, position=1)
otf2 = OptionFreeFactory(event=euf, position=2)
otf3 = OptionTimeFactory(event=euf, position=5)
otf4 = OptionTimeFactory(event=euf, position=7)
old2 = otf2.position
old3 = otf3.position
res = self.client.get(reverse('Simplan.event.views.up_option', args=[otf3.pk]), follow=False)
self.assertEqual(res.status_code, 302)
# check that option has been deleted
self.assertEquals(OptionTime.objects.count(), 3)
value2 = OptionFree.objects.get(pk=otf2.pk)
value3 = OptionTime.objects.get(pk=otf3.pk)
self.assertEquals(value2.position, old3)
self.assertEquals(value3.position, old2)
res = self.client.get(reverse('Simplan.event.views.up_option', args=[otf1.pk]), follow=False)
self.assertEqual(res.status_code, 404)
res = self.client.get(reverse('Simplan.event.views.up_option', args=[otf4.pk]), follow=False)
self.assertEqual(res.status_code, 302)
def test_add_choice_for_guest(self):
egf = EventGuestFactory()
off1 = OptionFreeFactory(event=egf, position=1)
off2 = OptionFreeFactory(event=egf, position=2)
otf1 = OptionTimeFactory(event=egf, position=5)
off3 = OptionFreeFactory(event=egf, position=7)
res = self.client.post(reverse('Simplan.event.views.new_choice', args=[off1.pk]),
{'title': u'Oui', 'description': u'Oh oui', },
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 1)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
res = self.client.post(reverse('Simplan.event.views.new_choice', args=[off1.pk]),
{'title': u'Non', 'description': u'Out boy', },
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
def test_add_choice_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
off1 = OptionFreeFactory(event=euf, position=1)
off2 = OptionFreeFactory(event=euf, position=2)
otf1 = OptionTimeFactory(event=euf, position=5)
off3 = OptionFreeFactory(event=euf, position=7)
res = self.client.post(reverse('Simplan.event.views.new_choice', args=[off1.pk]),
{'title': u'Oui', 'description': u'Oh oui', },
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 1)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
res = self.client.post(reverse('Simplan.event.views.new_choice', args=[off1.pk]),
{'title': u'Non', 'description': u'Out boy', },
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
def test_modify_choice_for_guest(self):
egf = EventGuestFactory()
off1 = OptionFreeFactory(event=egf, position=1)
off2 = OptionFreeFactory(event=egf, position=2)
otf1 = OptionTimeFactory(event=egf, position=5)
off3 = OptionFreeFactory(event=egf, position=7)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(off1.pk), 'choice_title': u'Oui', 'choice_positive': 'check', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 1)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(off1.pk), 'choice_title': u'Non', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(off2.pk), 'choice_title': u'Oui', 'choice_positive': 'check', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(off2.pk), 'choice_title': u'Non', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(otf1.pk), 'choice_title': u'Oui', 'choice_positive': 'check', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 1)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'choice_pk': '3', 'choice_title': u'Probablement', 'choice_maj':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been updated
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 1)
# check global values
self.assertEquals(Choice.objects.get(pk=1).title, 'Oui')
self.assertEquals(Choice.objects.get(pk=1).positive, True)
self.assertEquals(Choice.objects.get(pk=2).title, 'Non')
self.assertEquals(Choice.objects.get(pk=2).positive, False)
self.assertEquals(Choice.objects.get(pk=3).title, 'Probablement')
self.assertEquals(Choice.objects.get(pk=3).positive, False)
self.assertEquals(Choice.objects.get(pk=4).title, 'Non')
self.assertEquals(Choice.objects.get(pk=4).positive, False)
self.assertEquals(Choice.objects.get(pk=5).title, 'Oui')
self.assertEquals(Choice.objects.get(pk=5).positive, True)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'event_pk': egf.pk, 'choice_title': u'Peut-être', 'choice_positive':'check', 'answer_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 3)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 3)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 1)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 2)
# check global values
self.assertEquals(Choice.objects.get(pk=6).title, u'Peut-être')
self.assertEquals(Choice.objects.get(pk=6).positive, True)
self.assertEquals(Choice.objects.get(pk=7).title, u'Peut-être')
self.assertEquals(Choice.objects.get(pk=7).positive, True)
self.assertEquals(Choice.objects.get(pk=8).title, u'Peut-être')
self.assertEquals(Choice.objects.get(pk=8).positive, True)
self.assertEquals(Choice.objects.get(pk=9).title, u'Peut-être')
self.assertEquals(Choice.objects.get(pk=9).positive, True)
def test_modify_choice_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
off1 = OptionFreeFactory(event=euf, position=1)
off2 = OptionFreeFactory(event=euf, position=2)
otf1 = OptionTimeFactory(event=euf, position=5)
off3 = OptionFreeFactory(event=euf, position=7)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(off1.pk), 'choice_title': u'Oui', 'choice_positive': 'check', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 1)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(off1.pk), 'choice_title': u'Non', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(off2.pk), 'choice_title': u'Oui', 'choice_positive': 'check', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(off2.pk), 'choice_title': u'Non', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 0)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'option_pk': str(otf1.pk), 'choice_title': u'Oui', 'choice_positive': 'check', 'choice_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 1)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'choice_pk': '3', 'choice_title': u'Probablement', 'choice_maj':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been updated
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 1)
# check global values
self.assertEquals(Choice.objects.get(pk=1).title, 'Oui')
self.assertEquals(Choice.objects.get(pk=1).positive, True)
self.assertEquals(Choice.objects.get(pk=2).title, 'Non')
self.assertEquals(Choice.objects.get(pk=2).positive, False)
self.assertEquals(Choice.objects.get(pk=3).title, 'Probablement')
self.assertEquals(Choice.objects.get(pk=3).positive, False)
self.assertEquals(Choice.objects.get(pk=4).title, 'Non')
self.assertEquals(Choice.objects.get(pk=4).positive, False)
self.assertEquals(Choice.objects.get(pk=5).title, 'Oui')
self.assertEquals(Choice.objects.get(pk=5).positive, True)
res = self.client.post(reverse('Simplan.event.views.modify_choice'),
{'event_pk': euf.pk, 'choice_title': u'Peut-être', 'choice_positive':'check', 'answer_add':'valider'},
follow=False)
self.assertEqual(res.status_code, 302)
# check choice has been created
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 3)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 3)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 1)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 2)
# check global values
self.assertEquals(Choice.objects.get(pk=6).title, u'Peut-être')
self.assertEquals(Choice.objects.get(pk=6).positive, True)
self.assertEquals(Choice.objects.get(pk=7).title, u'Peut-être')
self.assertEquals(Choice.objects.get(pk=7).positive, True)
self.assertEquals(Choice.objects.get(pk=8).title, u'Peut-être')
self.assertEquals(Choice.objects.get(pk=8).positive, True)
self.assertEquals(Choice.objects.get(pk=9).title, u'Peut-être')
self.assertEquals(Choice.objects.get(pk=9).positive, True)
def test_delete_choice_for_guest(self):
egf = EventGuestFactory()
off1 = OptionFreeFactory(event=egf, position=1)
off2 = OptionFreeFactory(event=egf, position=2)
otf1 = OptionTimeFactory(event=egf, position=5)
off3 = OptionFreeFactory(event=egf, position=7)
cf1_1 = ChoiceFactory(option=off1, position=1)
cf1_2 = ChoiceFactory(option=off1, position=2)
cf1_3 = ChoiceFactory(option=off1, position=3)
cf2_1 = ChoiceFactory(option=off2, position=1)
cf2_2 = ChoiceFactory(option=off2, position=2)
cf2_3 = ChoiceFactory(option=off2, position=3)
cf3_1 = ChoiceFactory(option=otf1, position=1)
cf3_2 = ChoiceFactory(option=otf1, position=2)
cf3_3 = ChoiceFactory(option=otf1, position=3)
res = self.client.get(reverse('Simplan.event.views.del_choice', args=[cf2_3.pk]), follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.get(reverse('Simplan.event.views.del_choice', args=[cf3_2.pk]), follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.get(reverse('Simplan.event.views.del_choice', args=[999]), follow=False)
self.assertEqual(res.status_code, 404)
# check choice has been deleted
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 3)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 2)
def test_delete_choice_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
off1 = OptionFreeFactory(event=euf, position=1)
off2 = OptionFreeFactory(event=euf, position=2)
otf1 = OptionTimeFactory(event=euf, position=5)
off3 = OptionFreeFactory(event=euf, position=7)
cf1_1 = ChoiceFactory(option=off1, position=1)
cf1_2 = ChoiceFactory(option=off1, position=2)
cf1_3 = ChoiceFactory(option=off1, position=3)
cf2_1 = ChoiceFactory(option=off2, position=1)
cf2_2 = ChoiceFactory(option=off2, position=2)
cf2_3 = ChoiceFactory(option=off2, position=3)
cf3_1 = ChoiceFactory(option=otf1, position=1)
cf3_2 = ChoiceFactory(option=otf1, position=2)
cf3_3 = ChoiceFactory(option=otf1, position=3)
res = self.client.get(reverse('Simplan.event.views.del_choice', args=[cf2_3.pk]), follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.get(reverse('Simplan.event.views.del_choice', args=[cf3_2.pk]), follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.get(reverse('Simplan.event.views.del_choice', args=[999]), follow=False)
self.assertEqual(res.status_code, 404)
# check choice has been deleted
self.assertEquals(Choice.objects.filter(option__pk=off1.pk).count(), 3)
self.assertEquals(Choice.objects.filter(option__pk=off2.pk).count(), 2)
self.assertEquals(Choice.objects.filter(option__pk=off3.pk).count(), 0)
self.assertEquals(Choice.objects.filter(option__pk=otf1.pk).count(), 2)
def test_invit_end_event_for_guest(self):
egf = EventGuestFactory()
res = self.client.get(reverse('Simplan.event.views.invit_end_event', args=[egf.slug]), follow=False)
self.assertEqual(res.status_code, 302)
self.assertEquals(len(mail.outbox), 3)
self.assertEquals(mail.outbox[0].subject, "Simplan - Lien du sondage : " + egf.title)
self.assertEquals(mail.outbox[0].from_email, 'Simplan <noreply@simplann.eu>')
self.assertEquals(mail.outbox[0].to, [egf.email.encode('utf-8')])
self.assertEquals(mail.outbox[1].subject, "Simplan - Participez au Sondage : " + egf.title)
self.assertEquals(mail.outbox[1].from_email, 'Simplan <noreply@simplann.eu>')
self.assertEquals(mail.outbox[1].to, [egf.mailing_list.split(',')[0].encode('utf-8')])
self.assertEquals(mail.outbox[2].subject, "Simplan - Participez au Sondage : " + egf.title)
self.assertEquals(mail.outbox[2].from_email, 'Simplan <noreply@simplann.eu>')
self.assertEquals(mail.outbox[2].to, [egf.mailing_list.split(',')[1].encode('utf-8')])
def test_invit_end_event_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
res = self.client.get(reverse('Simplan.event.views.invit_end_event', args=[euf.slug]), follow=False)
self.assertEqual(res.status_code, 302)
self.assertEquals(len(mail.outbox), 3)
self.assertEquals(mail.outbox[0].subject, "Simplan - Lien du sondage : " + euf.title)
self.assertEquals(mail.outbox[0].from_email, 'Simplan <noreply@simplann.eu>')
self.assertEquals(mail.outbox[0].to, [euf.author.email.encode('utf-8')])
self.assertEquals(mail.outbox[1].subject, "Simplan - Participez au Sondage : " + euf.title)
self.assertEquals(mail.outbox[1].from_email, 'Simplan <noreply@simplann.eu>')
self.assertEquals(mail.outbox[1].to, [euf.mailing_list.split(',')[0].encode('utf-8')])
self.assertEquals(mail.outbox[2].subject, "Simplan - Participez au Sondage : " + euf.title)
self.assertEquals(mail.outbox[2].from_email, 'Simplan <noreply@simplann.eu>')
self.assertEquals(mail.outbox[2].to, [euf.mailing_list.split(',')[1].encode('utf-8')])
def test__end_event_for_guest(self):
egf = EventGuestFactory()
res = self.client.get(reverse('Simplan.event.views.end_event', args=[egf.slug]), follow=False)
self.assertEqual(res.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, "Simplan - Lien du sondage : " + egf.title)
self.assertEquals(mail.outbox[0].from_email, 'Simplan <noreply@simplann.eu>')
self.assertEquals(mail.outbox[0].to, [egf.email.encode('utf-8')])
def test_end_event_for_user(self):
user = UserFactory()
log = self.client.login(username=user.username, password='1234')
self.assertEqual(log, True)
euf = EventUserFactory(author=user)
res = self.client.get(reverse('Simplan.event.views.end_event', args=[euf.slug]), follow=False)
self.assertEqual(res.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, "Simplan - Lien du sondage : " + euf.title)
self.assertEquals(mail.outbox[0].from_email, 'Simplan <noreply@simplann.eu>')
self.assertEquals(mail.outbox[0].to, [euf.author.email.encode('utf-8')])
def test_make_choice_for_guest(self):
egf = EventGuestFactory()
off1 = OptionFreeFactory(event=egf, position=1)
off2 = OptionFreeFactory(event=egf, position=2)
otf1 = OptionTimeFactory(event=egf, position=5)
cf1_1 = ChoiceFactory(option=off1, position=1)
cf1_2 = ChoiceFactory(option=off1, position=2)
cf1_3 = ChoiceFactory(option=off1, position=3)
cf2_1 = ChoiceFactory(option=off2, position=1)
cf2_2 = ChoiceFactory(option=off2, position=2)
cf2_3 = ChoiceFactory(option=off2, position=3)
cf3_1 = ChoiceFactory(option=otf1, position=1)
cf3_2 = ChoiceFactory(option=otf1, position=2)
cf3_3 = ChoiceFactory(option=otf1, position=3)
res = self.client.post(reverse('Simplan.event.views.make_choice', args=[egf.slug]),
{
'pseudo': 'choseen_one',
off1.pk: cf1_1.pk,
off2.pk: cf2_3.pk,
otf1.pk: cf3_1.pk,
}
,
follow=False)
self.assertEqual(res.status_code, 302)
res = self.client.get(reverse('Simplan.event.views.view_event', args=[egf.slug_public[0:8], egf.slug_public[9:13], egf.slug_public[14:18], egf.slug_public[19:23], egf.slug_public[24:36]]), follow=True)
self.assertEqual(res.status_code, 200)
def test_make_choice_for_user(self):
# TODO
self.assertEqual(0, 0)
|
meuhia/Simplann
|
Simplan/event/tests.py
|
Python
|
gpl-3.0
| 51,991
|
[
"FLEUR"
] |
c11093b57d63caaba8283f0ba08b813d9015b6d100bddd4c0bbc65c936859aa3
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
# Nelson Liu <nelson@nelsonliu.me>
#
# License: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta
from abc import abstractmethod
from math import ceil
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..externals import six
from ..utils import check_array
from ..utils import check_random_state
from ..utils import compute_sample_weight
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ..exceptions import NotFittedError
from ._criterion import Criterion
from ._splitter import Splitter
from ._tree import DepthFirstTreeBuilder
from ._tree import BestFirstTreeBuilder
from ._tree import Tree
from . import _tree, _splitter, _criterion
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _criterion.Gini, "entropy": _criterion.Entropy}
CRITERIA_REG = {"mse": _criterion.MSE, "friedman_mse": _criterion.FriedmanMSE,
"mae": _criterion.MAE}
DENSE_SPLITTERS = {"best": _splitter.BestSplitter,
"random": _splitter.RandomSplitter}
SPARSE_SPLITTERS = {"best": _splitter.BestSparseSplitter,
"random": _splitter.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
min_impurity_split,
class_weight=None,
presort=False):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.min_impurity_split = min_impurity_split
self.class_weight = class_weight
self.presort = presort
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
y = check_array(y, ensure_2d=False, dtype=None)
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
check_classification_targets(y)
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_encoded = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_encoded[:, k] = np.unique(y[:, k],
return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_encoded
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.min_samples_leaf, (numbers.Integral, np.integer)):
if not 1 <= self.min_samples_leaf:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = self.min_samples_leaf
else: # float
if not 0. < self.min_samples_leaf <= 0.5:
raise ValueError("min_samples_leaf must be at least 1 "
"or in (0, 0.5], got %s"
% self.min_samples_leaf)
min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
if isinstance(self.min_samples_split, (numbers.Integral, np.integer)):
if not 2 <= self.min_samples_split:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the integer %s"
% self.min_samples_split)
min_samples_split = self.min_samples_split
else: # float
if not 0. < self.min_samples_split <= 1.:
raise ValueError("min_samples_split must be an integer "
"greater than 1 or a float in (0.0, 1.0]; "
"got the float %s"
% self.min_samples_split)
min_samples_split = int(ceil(self.min_samples_split * n_samples))
min_samples_split = max(2, min_samples_split)
min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1,
int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if sample_weight is None:
min_weight_leaf = (self.min_weight_fraction_leaf *
n_samples)
else:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
if self.min_impurity_split < 0.:
raise ValueError("min_impurity_split must be greater than "
"or equal to 0")
presort = self.presort
# Allow presort to be 'auto', which means True if the dataset is dense,
# otherwise it will be False.
if self.presort == 'auto' and issparse(X):
presort = False
elif self.presort == 'auto':
presort = True
if presort is True and issparse(X):
raise ValueError("Presorting is not supported for sparse "
"matrices.")
# If multiple trees are built on the same dataset, we only want to
# presort once. Splitters now can accept presorted indices if desired,
# but do not handle any presorting themselves. Ensemble algorithms
# which desire presorting must do presorting themselves and pass that
# matrix into each tree.
if X_idx_sorted is None and presort:
X_idx_sorted = np.asfortranarray(np.argsort(X, axis=0),
dtype=np.int32)
if presort and X_idx_sorted.shape != X.shape:
raise ValueError("The shape of X (X.shape = {}) doesn't match "
"the shape of X_idx_sorted (X_idx_sorted"
".shape = {})".format(X.shape,
X_idx_sorted.shape))
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_,
n_samples)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
min_samples_leaf,
min_weight_leaf,
random_state,
self.presort)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth, self.min_impurity_split)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes,
self.min_impurity_split)
builder.build(self.tree_, X, y, sample_weight, X_idx_sorted)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
.. versionadded:: 0.17
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
def decision_path(self, X, check_input=True):
"""Return the decision path in the tree
.. versionadded:: 0.18
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.decision_path(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
check_is_fitted(self, 'tree_')
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
class_weight : dict, list of dicts, "balanced" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None,
presort=False):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree classifier from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels) as integers or strings.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. Splits are also
ignored if they would result in any single class carrying a
negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeClassifier, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'tree_')
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. Supported criteria
are "mse" for the mean squared error, which is equal to variance
reduction as feature selection criterion, and "mae" for the mean
absolute error.
.. versionadded:: 0.18
Mean Absolute Error (MAE) criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
.. versionchanged:: 0.18
Added float values for percentages.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
.. versionchanged:: 0.18
Added float values for percentages.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
min_impurity_split : float, optional (default=1e-7)
Threshold for early stopping in tree growth. If the impurity
of a node is below the threshold, the node is a leaf.
.. versionadded:: 0.18
presort : bool, optional (default=False)
Whether to presort the data to speed up the finding of best splits in
fitting. For the default settings of a decision tree on large
datasets, setting this to true may slow down the training process.
When using either a smaller dataset or a restricted depth, this may
speed up the training.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.model_selection import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
presort=False):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state,
min_impurity_split=min_impurity_split,
presort=presort)
def fit(self, X, y, sample_weight=None, check_input=True,
X_idx_sorted=None):
"""Build a decision tree regressor from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (real numbers). Use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
X_idx_sorted : array-like, shape = [n_samples, n_features], optional
The indexes of the sorted training input samples. If many tree
are grown on the same dataset, this allows the ordering to be
cached between trees. If None, the data will be sorted here.
Don't use this parameter unless you know what to do.
Returns
-------
self : object
Returns self.
"""
super(DecisionTreeRegressor, self).fit(
X, y,
sample_weight=sample_weight,
check_input=check_input,
X_idx_sorted=X_idx_sorted)
return self
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
min_impurity_split=1e-7,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
min_impurity_split=min_impurity_split,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
min_impurity_split=1e-7,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=random_state)
|
jaidevd/scikit-learn
|
sklearn/tree/tree.py
|
Python
|
bsd-3-clause
| 44,488
|
[
"Brian"
] |
1074137e9f5050192145163684d20885c9c03fd9aab4c7eb42d39215e7516785
|
"""Manage IPython.parallel clusters in the notebook.
Authors:
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from tornado import web
from zmq.eventloop import ioloop
from IPython.config.configurable import LoggingConfigurable
from IPython.utils.traitlets import Dict, Instance, CFloat
from IPython.parallel.apps.ipclusterapp import IPClusterStart
from IPython.core.profileapp import list_profiles_in
from IPython.core.profiledir import ProfileDir
from IPython.utils import py3compat
from IPython.utils.path import get_ipython_dir
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class DummyIPClusterStart(IPClusterStart):
"""Dummy subclass to skip init steps that conflict with global app.
Instantiating and initializing this class should result in fully configured
launchers, but no other side effects or state.
"""
def init_signal(self):
pass
def reinit_logging(self):
pass
class ClusterManager(LoggingConfigurable):
profiles = Dict()
delay = CFloat(1., config=True,
help="delay (in s) between starting the controller and the engines")
loop = Instance('zmq.eventloop.ioloop.IOLoop')
def _loop_default(self):
from zmq.eventloop.ioloop import IOLoop
return IOLoop.instance()
def build_launchers(self, profile_dir):
starter = DummyIPClusterStart(log=self.log)
starter.initialize(['--profile-dir', profile_dir])
cl = starter.controller_launcher
esl = starter.engine_launcher
n = starter.n
return cl, esl, n
def get_profile_dir(self, name, path):
p = ProfileDir.find_profile_dir_by_name(path,name=name)
return p.location
def update_profiles(self):
"""List all profiles in the ipython_dir and cwd.
"""
for path in [get_ipython_dir(), py3compat.getcwd()]:
for profile in list_profiles_in(path):
pd = self.get_profile_dir(profile, path)
if profile not in self.profiles:
self.log.debug("Adding cluster profile '%s'" % profile)
self.profiles[profile] = {
'profile': profile,
'profile_dir': pd,
'status': 'stopped'
}
def list_profiles(self):
self.update_profiles()
# sorted list, but ensure that 'default' always comes first
default_first = lambda name: name if name != 'default' else ''
result = [self.profile_info(p) for p in sorted(self.profiles, key=default_first)]
return result
def check_profile(self, profile):
if profile not in self.profiles:
raise web.HTTPError(404, u'profile not found')
def profile_info(self, profile):
self.check_profile(profile)
result = {}
data = self.profiles.get(profile)
result['profile'] = profile
result['profile_dir'] = data['profile_dir']
result['status'] = data['status']
if 'n' in data:
result['n'] = data['n']
return result
def start_cluster(self, profile, n=None):
"""Start a cluster for a given profile."""
self.check_profile(profile)
data = self.profiles[profile]
if data['status'] == 'running':
raise web.HTTPError(409, u'cluster already running')
cl, esl, default_n = self.build_launchers(data['profile_dir'])
n = n if n is not None else default_n
def clean_data():
data.pop('controller_launcher',None)
data.pop('engine_set_launcher',None)
data.pop('n',None)
data['status'] = 'stopped'
def engines_stopped(r):
self.log.debug('Engines stopped')
if cl.running:
cl.stop()
clean_data()
esl.on_stop(engines_stopped)
def controller_stopped(r):
self.log.debug('Controller stopped')
if esl.running:
esl.stop()
clean_data()
cl.on_stop(controller_stopped)
dc = ioloop.DelayedCallback(lambda: cl.start(), 0, self.loop)
dc.start()
dc = ioloop.DelayedCallback(lambda: esl.start(n), 1000*self.delay, self.loop)
dc.start()
self.log.debug('Cluster started')
data['controller_launcher'] = cl
data['engine_set_launcher'] = esl
data['n'] = n
data['status'] = 'running'
return self.profile_info(profile)
def stop_cluster(self, profile):
"""Stop a cluster for a given profile."""
self.check_profile(profile)
data = self.profiles[profile]
if data['status'] == 'stopped':
raise web.HTTPError(409, u'cluster not running')
data = self.profiles[profile]
cl = data['controller_launcher']
esl = data['engine_set_launcher']
if cl.running:
cl.stop()
if esl.running:
esl.stop()
# Return a temp info dict, the real one is updated in the on_stop
# logic above.
result = {
'profile': data['profile'],
'profile_dir': data['profile_dir'],
'status': 'stopped'
}
return result
def stop_all_clusters(self):
for p in self.profiles.keys():
self.stop_cluster(p)
|
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/IPython/html/services/clusters/clustermanager.py
|
Python
|
bsd-3-clause
| 5,952
|
[
"Brian"
] |
e17083802710640351d0dfb3687a7b9df102a2cf268505f0ee287f0945b86f36
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
# vim:ai:sta:et:ts=4:sw=4:sts=4
"""kernelng 0.x
Tool for maintaining customized overlays of kernel-ng.eclass-based ebuilds
Copyright 2005-2014 Gentoo Foundation
Copyright (C) 2005 Colin Kingsley <tercel@gentoo.org>
Copyright (C) 2008 Zac Medico <zmedico@gentoo.org>
Copyright (C) 2009 Sebastian Pipping <sebastian@pipping.org>
Copyright (C) 2009 Christian Ruppert <idl0r@gentoo.org>
Copyright (C) 2012 Brian Dolbec <dolsen@gentoo.org>
Copyright (C) 2014 by Armin Ronacher
Copyright (C) 2014 Gregory M. Turner <gmt@be-evil.net>
FIXME: Due to interface limitations, portions of this code are
cut-pasted from Armin Ronacher's click framework, which has
a more liberal license than that of kernel-ng-util. Armin's code
is BSD licensed: see https://github.com/mitsuhiko/click/blob/master/LICENSE
for the gory details.
Feel free to treat everything in this file under the terms of that
license. Note: when and if he can get all of the mirror-select-isms out of
kernel-ng-util, Greg may attempt to relicense everything along similar
lines, so as to clear up any license-soup problems this creates.
"""
from __future__ import print_function
from contextlib import contextmanager
from click.core import Context, Command, Group
from click.termui import style, get_terminal_size
from click.formatting import HelpFormatter
from click.decorators import command, option, version_option
import click
from .kngclicktextwrapper import KNGClickTextWrapper
from .kngtextwrapper import kngterm_len, kngexpandtabs
from .version import version
from .output import set_verbose_level, trace
KNG_OPTIONS_METAVAR = ''.join((
style('[', fg='blue'),
style('OPTIONS', fg='cyan', bold=True),
style(']', fg='blue')))
SUBCOMMAND_METAVAR = ''.join((
style('SUBCOMMAND', fg='cyan', bold=True),
' ',
style('[', fg='blue'),
style('ARGS', fg='cyan', bold=True),
style(']...', fg='blue')))
SUBCOMMANDS_METAVAR = ''.join((
style('SUBCOMMAND1', fg='cyan', bold=True),
' ',
style('[', fg='blue'),
style('ARGS', fg='cyan', bold=True),
style(']...', fg='blue'),
' ',
style('[', fg='blue'),
style('SUBCOMMAND2', fg='cyan', bold=True),
' ',
style('[', fg='blue'),
style('ARGS', fg='cyan', bold=True),
style(']...', fg='blue'),
style(']...', fg='blue')))
def kngwrap_text(text, width=78, initial_indent='', subsequent_indent='',
preserve_paragraphs=False):
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
text = kngexpandtabs(text)
wrapper = KNGClickTextWrapper(width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False)
if not preserve_paragraphs:
return wrapper.fill(text)
p = []
buf = []
indent = None
def _flush_par():
if not buf:
return
if buf[0].strip() == '\b':
p.append((indent or 0, True, '\n'.join(buf[1:])))
else:
p.append((indent or 0, False, ' '.join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = kngterm_len(line)
line = line.lstrip()
indent = orig_len - kngterm_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(' ' * indent):
if raw:
rv.append(wrapper.indent_only(text))
else:
rv.append(wrapper.fill(text))
return '\n\n'.join(rv)
click.formatting.__dict__['wrap_text'] = kngwrap_text
class KNGHelpFormatter(HelpFormatter):
# allow a maximum default width of 120 vs. HelpFormatter's 80
@trace
def __init__(self, *args, **kwargs):
if 'width' in kwargs:
width = kwargs.pop('width')
else:
width = None
width = max(min(get_terminal_size()[0], 120) - 2, 50) if width is None else width
kwargs['width'] = width
self._kngsection = None
super(KNGHelpFormatter, self).__init__(*args, **kwargs)
@trace
def write_heading(self, heading):
"""
Writes a heading into the buffer, applying some styling if the heading
matches the current section name.
"""
if self._kngsection is not None and heading == self._kngsection:
if heading == 'Commands':
heading = 'Subcommand'
self.write('%*s%s%s\n' % (self.current_indent, '',
style(heading, fg='cyan', bold=True), style(':', fg='white', bold=True)))
else:
super(KNGHelpFormatter, self).write_heading(heading)
@contextmanager
@trace
def section(self, name):
"""Wrap click.HelpFormatter.section() so as to track the
most recently added section name.
:param name: the section name to pass to click.HelpFormatter.section()
"""
oldkngsection = self._kngsection
try:
self._kngsection = name
with super(KNGHelpFormatter, self).section(name):
yield
finally:
self._kngsection = oldkngsection
@trace
def write_usage(self, prog, args='', prefix='Usage: '):
prog = style(prog, fg='white', bold=True)
super(KNGHelpFormatter, self).write_usage(prog, args=args, prefix=prefix)
@trace
def dl_style_word(self, word):
if len(word) == 0:
return word
elif word[:1] == '-':
return style(word, fg='white', bold=True)
elif self._kngsection == 'Options':
# for the options definiton list, we make non-hyphenated
# words yellow; otherwise, we stick to white
return style(word, fg='yellow', bold=True)
else:
return style(word, fg='white', bold=True)
@trace
def write_dl(self, rows, *args, **kwargs):
newrows = []
for row in rows:
if len(row) != 2:
raise TypeError('Expected two columns for definition list')
newrows.append((
','.join((
' '.join((
self.dl_style_word(spacesepstr) for spacesepstr in commasepstr.split(' ')
)) for commasepstr in row[0].split(',')
)),
row[1]
))
super(KNGHelpFormatter, self).write_dl(newrows, *args, **kwargs)
class KNGContext(Context):
@trace
def make_formatter(self):
return KNGHelpFormatter(width=self.terminal_width)
no_color_mode = False
# generate a new echo function suitable for monkey patching an old one
# nb: definitely not a good idea to trace the inner function!!!
def nocolorecho(oldecho):
def newecho(*args, **kwargs):
if no_color_mode:
if len(args) > 0:
args=(click._compat.strip_ansi(args[0]),) + args[1:]
else:
message = kwargs.pop('message', None)
if message is not None:
kwargs['message'] = click._compat.strip_ansi(message)
oldecho(*args, **kwargs)
return newecho
# monkey patch click's echo functions to always ignore color, regardless
# of the output's isatty-ness when no_color_mode is True -- but only
# bother if no_color_mode is, indeed, true, and we haven't already
# monkey patched it.
def no_color(ctx, command, value):
global no_color_mode
oldval = no_color_mode
no_color_mode = no_color_mode or value
if (not oldval) and no_color_mode:
click.utils.__dict__['echo'] = nocolorecho(click.utils.echo)
click.core.__dict__['echo'] = nocolorecho(click.core.echo)
click.__dict__['echo'] = nocolorecho(click.echo)
class KNGGroup(Group):
@trace
def __init__(self, *args, **kwargs):
options_metavar = kwargs.pop('options_metavar', KNG_OPTIONS_METAVAR)
kwargs['options_metavar'] = options_metavar
chain=kwargs.pop('chain', False)
kwargs['chain'] = chain
subcommand_metavar = kwargs.pop('subcommand_metavar',
SUBCOMMANDS_METAVAR if chain else SUBCOMMAND_METAVAR)
kwargs['subcommand_metavar'] = subcommand_metavar
super(KNGGroup, self).__init__(*args, **kwargs)
@trace
def make_context(self, info_name, args, parent=None, **extra):
for key, value in iter((self.context_settings or {}).items()):
if key not in extra:
extra[key] = value
ctx = KNGContext(self, info_name=info_name, parent=parent, **extra)
self.parse_args(ctx, args)
return ctx
def kngcommand(self, *args, **kwargs):
def decorator(f):
cmd = kngcommand(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def knggroup(self, *args, **kwargs):
def decorator(f):
cmd = knggroup(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
class KNGCommand(Command):
@trace
def __init__(self, *args, **kwargs):
options_metavar = kwargs.pop('options_metavar', KNG_OPTIONS_METAVAR)
kwargs['options_metavar'] = options_metavar
super(KNGCommand, self).__init__(*args, **kwargs)
@trace
def make_context(self, info_name, args, parent=None, **extra):
for key, value in iter((self.context_settings or {}).items()):
if key not in extra:
extra[key] = value
ctx = KNGContext(self, info_name=info_name, parent=parent, **extra)
self.parse_args(ctx, args)
return ctx
def kngcommand(self, *args, **kwargs):
def decorator(f):
cmd = kngcommand(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def knggroup(self, *args, **kwargs):
def decorator(f):
cmd = knggroup(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
NOCOLORIZEHELP = "Do not colorize output or use advanced terminal features."
QUIETHELP = "Skip non-essential outputs and error messages (mostly for robots)."
VERBOSEHELP = "Include optional progress and informational output suitable for humans."
DEBUGHELP = "Provide counterproductively detailed output (mostly for developers)."
def kngcommandcommon(name=None, cls=None, **kwargs):
'''
Invoke the click.command decorator with additional decorations common
to all commands and groups in the kernel-ng-utils command-line framework.
Routing through this instead of click.command allows users to place the
options provided by the additional decorations freely within the kernelng
command-line, so, for example, kernelng -C foo bar, kernelng foo -C bar,
and kernelng foo bar -C all do the expected thing. The additional
decorations hard-code (along with their implementations) the following
options::
-v, --verbose: report progress in detail (verboseness=2)
-q, --quiet: avoid nonessential ouput (verboseness=0)
--debug: dump silly amounts of information (verboseness=3)
-C, --no-color: suppresses fancy terminal behavior
-V, --version: dump version info & terminate
'''
def decorator(f):
return command(name, cls, **kwargs)(
option('-v', '--verbose', 'verbosity', expose_value=False, flag_value=2,
help=VERBOSEHELP, callback=set_verbose_level, is_eager=True)(
option('-q', '--quiet', 'verbosity', expose_value=False, flag_value=0,
help=QUIETHELP, callback=set_verbose_level, is_eager=True)(
option('--debug', 'verbosity', expose_value=False, flag_value=3,
help=DEBUGHELP, callback=set_verbose_level, is_eager=True)(
option('-C', '--no-color', is_flag=True, default=False, is_eager=True,
help=NOCOLORIZEHELP, expose_value=False, callback=no_color)(
version_option(version, '-V', '--version')(f))))))
return decorator
def kngcommand(name=None, cls=None, **kwargs):
cls = KNGCommand if cls is None else cls
return kngcommandcommon(name, cls, **kwargs)
def knggroup(name=None, cls=None, **kwargs):
cls = KNGGroup if cls is None else cls
return kngcommandcommon(name, cls, **kwargs)
class Octal_3ParamType(click.ParamType):
name = 'octal_3'
@trace
def convert(self, value, param, ctx):
origvalue = value
try:
if not isinstance(value, int):
value = value.strip()
while value[:1] == '0':
value = value[1:]
if len(value) > 3:
self.fail('"%s" is not a valid 3-digit octal value' % origvalue, param, ctx)
value = int('0%s' % value, 8)
if not isinstance(value, int):
self.fail('"%s" is not a valid 3-digit octal value' % origvalue, param, ctx)
if 0 <= value and value <= 0o777:
return value
else:
self.fail('0%o is outside the allowed range 0-0%o' % (value, 0o777), param, ctx)
except ValueError:
self.fail('%s is not a valid 3-digit octal value' % value, param, ctx)
OCTAL_3 = Octal_3ParamType()
|
gmt/kernel-ng-util
|
kernelng/kngclick.py
|
Python
|
gpl-2.0
| 14,411
|
[
"Brian"
] |
aa9fa42fa8611243ac564068f47a10981cee18dd0fdabdb122e9fd1e10bbe93a
|
# pylint: disable=C0111
# pylint: disable=W0621
from lettuce import world, step
from common import *
from terrain.steps import reload_the_page
from selenium.common.exceptions import InvalidElementStateException
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from contentstore.utils import reverse_course_url
from nose.tools import assert_in, assert_not_in, assert_equal, assert_not_equal # pylint: disable=E0611
@step(u'I am viewing the grading settings')
def view_grading_settings(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-grading a'
world.css_click(link_css)
@step(u'I add "([^"]*)" new grade')
def add_grade(step, many):
grade_css = '.new-grade-button'
for i in range(int(many)):
world.css_click(grade_css)
@step(u'I delete a grade')
def delete_grade(step):
#grade_css = 'li.grade-specific-bar > a.remove-button'
#range_css = '.grade-specific-bar'
#world.css_find(range_css)[1].mouseover()
#world.css_click(grade_css)
world.browser.execute_script('document.getElementsByClassName("remove-button")[0].click()')
@step(u'I see I now have "([^"]*)" grades$')
def view_grade_slider(step, how_many):
grade_slider_css = '.grade-specific-bar'
all_grades = world.css_find(grade_slider_css)
assert_equal(len(all_grades), int(how_many))
@step(u'I move a grading section')
def move_grade_slider(step):
moveable_css = '.ui-resizable-e'
f = world.css_find(moveable_css).first
f.action_chains.drag_and_drop_by_offset(f._element, 100, 0).perform()
@step(u'I see that the grade range has changed')
def confirm_change(step):
range_css = '.range'
all_ranges = world.css_find(range_css)
for i in range(len(all_ranges)):
assert_not_equal(world.css_html(range_css, index=i), '0-50')
@step(u'I change assignment type "([^"]*)" to "([^"]*)"$')
def change_assignment_name(step, old_name, new_name):
name_id = '#course-grading-assignment-name'
index = get_type_index(old_name)
f = world.css_find(name_id)[index]
assert_not_equal(index, -1)
for count in range(len(old_name)):
f._element.send_keys(Keys.END, Keys.BACK_SPACE)
f._element.send_keys(new_name)
@step(u'I go back to the main course page')
def main_course_page(step):
course_name = world.scenario_dict['COURSE'].display_name.replace(' ', '_')
course_key = SlashSeparatedCourseKey(
world.scenario_dict['COURSE'].org,
world.scenario_dict['COURSE'].number,
course_name
)
main_page_link = reverse_course_url('course_handler', course_key)
world.visit(main_page_link)
assert_in('Course Outline', world.css_text('h1.page-header'))
@step(u'I do( not)? see the assignment name "([^"]*)"$')
def see_assignment_name(step, do_not, name):
assignment_menu_css = 'ul.menu > li > a'
# First assert that it is there, make take a bit to redraw
assert_true(
world.css_find(assignment_menu_css),
msg="Could not find assignment menu"
)
assignment_menu = world.css_find(assignment_menu_css)
allnames = [item.html for item in assignment_menu]
if do_not:
assert_not_in(name, allnames)
else:
assert_in(name, allnames)
@step(u'I delete the assignment type "([^"]*)"$')
def delete_assignment_type(step, to_delete):
delete_css = '.remove-grading-data'
world.css_click(delete_css, index=get_type_index(to_delete))
@step(u'I add a new assignment type "([^"]*)"$')
def add_assignment_type(step, new_name):
add_button_css = '.add-grading-data'
world.css_click(add_button_css)
name_id = '#course-grading-assignment-name'
new_assignment = world.css_find(name_id)[-1]
new_assignment._element.send_keys(new_name)
@step(u'I set the assignment weight to "([^"]*)"$')
def set_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
weight_field = world.css_find(weight_id)[-1]
old_weight = world.css_value(weight_id, -1)
for count in range(len(old_weight)):
weight_field._element.send_keys(Keys.END, Keys.BACK_SPACE)
weight_field._element.send_keys(weight)
@step(u'the assignment weight is displayed as "([^"]*)"$')
def verify_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
assert_equal(world.css_value(weight_id, -1), weight)
@step(u'I have populated the course')
def populate_course(step):
step.given('I have added a new section')
step.given('I have added a new subsection')
@step(u'I do not see the changes persisted on refresh$')
def changes_not_persisted(step):
reload_the_page(step)
name_id = '#course-grading-assignment-name'
assert_equal(world.css_value(name_id), 'Homework')
@step(u'I see the assignment type "(.*)"$')
def i_see_the_assignment_type(_step, name):
assignment_css = '#course-grading-assignment-name'
assignments = world.css_find(assignment_css)
types = [ele['value'] for ele in assignments]
assert_in(name, types)
@step(u'I change the highest grade range to "(.*)"$')
def change_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
grade.value = range_name
@step(u'I see the highest grade range is "(.*)"$')
def i_see_highest_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
assert_equal(grade.value, range_name)
@step(u'I cannot edit the "Fail" grade range$')
def cannot_edit_fail(_step):
range_css = 'span.letter-grade'
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
# try to change the grade range -- this should throw an exception
try:
ranges.last.value = 'Failure'
except (InvalidElementStateException):
pass # We should get this exception on failing to edit the element
# check to be sure that nothing has changed
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
@step(u'I change the grace period to "(.*)"$')
def i_change_grace_period(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
ele = world.css_find(grace_period_css).first
# Sometimes it takes a moment for the JavaScript
# to populate the field. If we don't wait for
# this to happen, then we can end up with
# an invalid value (e.g. "00:0048:00")
# which prevents us from saving.
assert_true(world.css_has_value(grace_period_css, "00:00"))
# Set the new grace period
ele.value = grace_period
@step(u'I see the grace period is "(.*)"$')
def the_grace_period_is(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
# The default value is 00:00
# so we need to wait for it to change
world.wait_for(
lambda _: world.css_has_value(grace_period_css, grace_period)
)
def get_type_index(name):
name_id = '#course-grading-assignment-name'
all_types = world.css_find(name_id)
for index in range(len(all_types)):
if world.css_value(name_id, index=index) == name:
return index
return -1
|
carsongee/edx-platform
|
cms/djangoapps/contentstore/features/grading.py
|
Python
|
agpl-3.0
| 7,202
|
[
"VisIt"
] |
c2fd0311fb2ecf0aa00f49bf15527440d8650922da5ba7784696eaed2a0edc65
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains methods for generation of resonance structures of molecules.
The main function to generate all relevant resonance structures for a given
Molecule object is ``generate_resonance_structures``. It calls the necessary
functions for generating each type of resonance structure.
Currently supported resonance types:
- All species:
- ``generate_adjacent_resonance_structures``: single radical shift with double or triple bond
- ``generate_lone_pair_radical_resonance_structures``: single radical shift with lone pair
- ``generate_N5dd_N5ts_resonance_structures``: shift between nitrogen with two double bonds and single + triple bond
- Aromatic species only:
- ``generate_aromatic_resonance_structures``: fully delocalized structure, where all aromatic rings have benzene bonds
- ``generate_kekule_structure``: generate a single Kekule structure for an aromatic compound (single/double bond form)
- ``generate_opposite_kekule_structure``: for monocyclic aromatic species, rotate the double bond assignment
- ``generate_clar_structures``: generate all structures with the maximum number of pi-sextet assignments
"""
import cython
import logging
import itertools
from .graph import Vertex, Edge, Graph, getVertexConnectivityValue
from .molecule import Atom, Bond, Molecule
from .kekulize import kekulize
import rmgpy.molecule.pathfinder as pathfinder
from rmgpy.exceptions import ILPSolutionError, KekulizationError, AtomTypeError
def populate_resonance_algorithms(features=None):
"""
Generate list of resonance structure algorithms relevant to the current molecule.
Takes a dictionary of features generated by analyze_molecule().
Returns a list of resonance algorithms.
"""
cython.declare(methodList=list)
methodList = []
if features is None:
methodList = [
generate_adjacent_resonance_structures,
generate_lone_pair_radical_resonance_structures,
generate_N5dd_N5ts_resonance_structures,
generate_aromatic_resonance_structures,
generate_kekule_structure,
generate_opposite_kekule_structure,
generate_clar_structures,
]
else:
# If the molecule is aromatic, then radical resonance has already been considered
# If the molecule was falsely identified as aromatic, then isArylRadical will still accurately capture
# cases where the radical is in an orbital that is orthogonal to the pi orbitals.
if features['isRadical'] and not features['isAromatic'] and not features['isArylRadical']:
methodList.append(generate_adjacent_resonance_structures)
if features['hasNitrogen']:
methodList.append(generate_N5dd_N5ts_resonance_structures)
if features['hasLonePairs']:
methodList.append(generate_lone_pair_radical_resonance_structures)
return methodList
def analyze_molecule(mol):
"""
Identify key features of molecule important for resonance structure generation.
Returns a dictionary of features.
"""
cython.declare(features=dict)
features = {'isRadical': mol.isRadical(),
'isCyclic': mol.isCyclic(),
'isAromatic': False,
'isPolycyclicAromatic': False,
'isArylRadical': False,
'hasNitrogen': False,
'hasOxygen': False,
'hasLonePairs': False,
}
if features['isCyclic']:
aromaticRings = mol.getAromaticRings()[0]
if len(aromaticRings) > 0:
features['isAromatic'] = True
if len(aromaticRings) > 1:
features['isPolycyclicAromatic'] = True
if features['isRadical'] and features['isAromatic']:
features['isArylRadical'] = mol.isArylRadical(aromaticRings)
for atom in mol.vertices:
if atom.isNitrogen():
features['hasNitrogen'] = True
if atom.isOxygen():
features['hasOxygen'] = True
if atom.lonePairs > 0:
features['hasLonePairs'] = True
return features
def generate_resonance_structures(mol, clarStructures=True, keepIsomorphic=False):
"""
Generate and return all of the resonance structures for the input molecule.
Most of the complexity of this method goes into handling aromatic species, particularly to generate an accurate
set of resonance structures that is consistent regardless of the input structure. The following considerations
are made:
1. False positives from RDKit aromaticity detection can occur if a molecule has exocyclic double bonds
2. False negatives from RDKit aromaticity detection can occur if a radical is delocalized into an aromatic ring
3. sp2 hybridized radicals in the plane of an aromatic ring do not participate in hyperconjugation
4. Non-aromatic resonance structures of PAHs are not important resonance contributors (assumption)
Aromatic species are broken into the following categories for resonance treatment:
- Radical polycyclic aromatic species: Kekule structures are generated in order to generate adjacent resonance
structures. The resulting structures are then used for Clar structure generation. After all three steps, any
non-aromatic structures are removed, under the assumption that they are not important resonance contributors.
- Radical monocyclic aromatic species: Kekule structures are generated along with adjacent resonance structures.
All are kept regardless of aromaticity because the radical is more likely to delocalize into the ring.
- Stable polycyclic aromatic species: Clar structures are generated
- Stable monocyclic aromatic species: Kekule structures are generated
"""
cython.declare(molList=list, newMolList=list, features=dict, methodList=list)
molList = [mol]
# Analyze molecule
features = analyze_molecule(mol)
# Use generate_aromatic_resonance_structures to check for false positives and negatives
if features['isAromatic'] or (features['isCyclic'] and features['isRadical'] and not features['isArylRadical']):
newMolList = generate_aromatic_resonance_structures(mol, features)
if len(newMolList) == 0:
# Encountered false positive, ie. the molecule is not actually aromatic
features['isAromatic'] = False
features['isPolycyclicAromatic'] = False
else:
features['isAromatic'] = True
if len(newMolList[0].getAromaticRings()[0]) > 1:
features['isPolycyclicAromatic'] = True
else:
newMolList = []
# Special handling for aromatic species
if len(newMolList) > 0:
if features['isRadical'] and not features['isArylRadical']:
if features['isPolycyclicAromatic']:
if clarStructures:
_generate_resonance_structures(newMolList, [generate_kekule_structure], keepIsomorphic)
_generate_resonance_structures(newMolList, [generate_adjacent_resonance_structures], keepIsomorphic)
_generate_resonance_structures(newMolList, [generate_clar_structures], keepIsomorphic)
# Remove non-aromatic structures under the assumption that they aren't important resonance contributors
newMolList = [m for m in newMolList if m.isAromatic()]
else:
pass
else:
_generate_resonance_structures(newMolList, [generate_kekule_structure,
generate_opposite_kekule_structure], keepIsomorphic)
_generate_resonance_structures(newMolList, [generate_adjacent_resonance_structures], keepIsomorphic)
elif features['isPolycyclicAromatic']:
if clarStructures:
_generate_resonance_structures(newMolList, [generate_clar_structures], keepIsomorphic)
else:
pass
else:
# The molecule is an aryl radical or stable mono-ring aromatic
# In this case, generate the kekulized form
_generate_resonance_structures(newMolList, [generate_kekule_structure,
generate_opposite_kekule_structure], keepIsomorphic)
# Check for isomorphism against the original molecule
for i, newMol in enumerate(newMolList):
if not keepIsomorphic and mol.isIsomorphic(newMol):
# There will be at most one isomorphic molecule, since the new molecules have
# already been checked against each other, so we can break after removing it
del newMolList[i]
break
elif keepIsomorphic and mol.isIdentical(newMol):
del newMolList[i]
break
# Add the newly generated structures to the original list
# This is not optimal, but is a temporary measure to ensure compatability until other issues are fixed
molList.extend(newMolList)
# Generate remaining resonance structures
methodList = populate_resonance_algorithms(features)
_generate_resonance_structures(molList, methodList, keepIsomorphic)
return molList
def _generate_resonance_structures(molList, methodList, keepIsomorphic=False, copy=False):
"""
Iteratively generate all resonance structures for a list of starting molecules using the specified methods.
Args:
molList starting list of molecules
methodList list of resonance structure algorithms
keepIsomorphic if False, removes any structures that give isIsomorphic=True (default)
if True, only remove structures that give isIdentical=True
copy if False, append new resonance structures to input list (default)
if True, make a new list with all of the resonance structures
"""
cython.declare(index=cython.int, molecule=Molecule, newMolList=list, newMol=Molecule, mol=Molecule)
if copy:
# Make a copy of the list so we don't modify the input list
molList = molList[:]
# Iterate over resonance isomers
index = 0
while index < len(molList):
molecule = molList[index]
newMolList = []
for method in methodList:
newMolList.extend(method(molecule))
for newMol in newMolList:
# Append to isomer list if unique
for mol in molList:
if not keepIsomorphic and mol.isIsomorphic(newMol):
break
elif keepIsomorphic and mol.isIdentical(newMol):
break
else:
molList.append(newMol)
# Move to next resonance isomer
index += 1
return molList
def generate_adjacent_resonance_structures(mol):
"""
Generate all of the resonance structures formed by one allyl radical shift.
Biradicals on a single atom are not supported.
"""
cython.declare(isomers=list, paths=list, index=cython.int, isomer=Molecule)
cython.declare(atom=Atom, atom1=Atom, atom2=Atom, atom3=Atom, bond12=Bond, bond23=Bond)
cython.declare(v1=Vertex, v2=Vertex)
isomers = []
# Radicals
if mol.isRadical():
# Iterate over radicals in structure
for atom in mol.vertices:
paths = pathfinder.findAllDelocalizationPaths(atom)
for atom1, atom2, atom3, bond12, bond23 in paths:
# Adjust to (potentially) new resonance isomer
atom1.decrementRadical()
atom3.incrementRadical()
bond12.incrementOrder()
bond23.decrementOrder()
# Make a copy of isomer
isomer = mol.copy(deep=True)
# Also copy the connectivity values, since they are the same
# for all resonance forms
for index in range(len(mol.vertices)):
v1 = mol.vertices[index]
v2 = isomer.vertices[index]
v2.connectivity1 = v1.connectivity1
v2.connectivity2 = v1.connectivity2
v2.connectivity3 = v1.connectivity3
v2.sortingLabel = v1.sortingLabel
# Restore current isomer
atom1.incrementRadical()
atom3.decrementRadical()
bond12.decrementOrder()
bond23.incrementOrder()
# Append to isomer list if unique
isomer.updateAtomTypes(logSpecies=False)
isomers.append(isomer)
return isomers
def generate_lone_pair_radical_resonance_structures(mol):
"""
Generate all of the resonance structures formed by lone electron pair - radical shifts.
"""
cython.declare(isomers=list, paths=list, index=cython.int, isomer=Molecule)
cython.declare(atom=Atom, atom1=Atom, atom2=Atom)
cython.declare(v1=Vertex, v2=Vertex)
isomers = []
# Radicals
if mol.isRadical():
# Iterate over radicals in structure
for atom in mol.vertices:
paths = pathfinder.findAllDelocalizationPathsLonePairRadical(atom)
for atom1, atom2 in paths:
# Adjust to (potentially) new resonance isomer
atom1.decrementRadical()
atom1.incrementLonePairs()
atom1.updateCharge()
atom2.incrementRadical()
atom2.decrementLonePairs()
atom2.updateCharge()
# Make a copy of isomer
isomer = mol.copy(deep=True)
# Also copy the connectivity values, since they are the same
# for all resonance forms
for index in range(len(mol.vertices)):
v1 = mol.vertices[index]
v2 = isomer.vertices[index]
v2.connectivity1 = v1.connectivity1
v2.connectivity2 = v1.connectivity2
v2.connectivity3 = v1.connectivity3
v2.sortingLabel = v1.sortingLabel
# Restore current isomer
atom1.incrementRadical()
atom1.decrementLonePairs()
atom1.updateCharge()
atom2.decrementRadical()
atom2.incrementLonePairs()
atom2.updateCharge()
# Append to isomer list if unique
isomer.updateAtomTypes(logSpecies=False)
isomers.append(isomer)
return isomers
def generate_N5dd_N5ts_resonance_structures(mol):
"""
Generate all of the resonance structures formed by shifts between N5dd and N5ts.
"""
cython.declare(isomers=list, paths=list, index=cython.int, isomer=Molecule)
cython.declare(atom=Atom, atom1=Atom, atom2=Atom, atom3=Atom)
cython.declare(bond12=Bond, bond13=Bond)
cython.declare(v1=Vertex, v2=Vertex)
isomers = []
# Iterate over nitrogen atoms in structure
for atom in mol.vertices:
paths = pathfinder.findAllDelocalizationPathsN5dd_N5ts(atom)
for atom1, atom2, atom3, bond12, bond13, direction in paths:
# from N5dd to N5ts
if direction == 1:
# Adjust to (potentially) new resonance isomer
bond12.decrementOrder()
bond13.incrementOrder()
atom2.incrementLonePairs()
atom3.decrementLonePairs()
atom1.updateCharge()
atom2.updateCharge()
atom3.updateCharge()
# Make a copy of isomer
isomer = mol.copy(deep=True)
# Also copy the connectivity values, since they are the same
# for all resonance forms
for index in range(len(mol.vertices)):
v1 = mol.vertices[index]
v2 = isomer.vertices[index]
v2.connectivity1 = v1.connectivity1
v2.connectivity2 = v1.connectivity2
v2.connectivity3 = v1.connectivity3
v2.sortingLabel = v1.sortingLabel
# Restore current isomer
bond12.incrementOrder()
bond13.decrementOrder()
atom2.decrementLonePairs()
atom3.incrementLonePairs()
atom1.updateCharge()
atom2.updateCharge()
atom3.updateCharge()
# Append to isomer list if unique
isomer.updateAtomTypes(logSpecies=False)
isomers.append(isomer)
# from N5ts to N5dd
if direction == 2:
# Adjust to (potentially) new resonance isomer
bond12.decrementOrder()
bond13.incrementOrder()
atom2.incrementLonePairs()
atom3.decrementLonePairs()
atom1.updateCharge()
atom2.updateCharge()
atom3.updateCharge()
# Make a copy of isomer
isomer = mol.copy(deep=True)
# Also copy the connectivity values, since they are the same
# for all resonance forms
for index in range(len(mol.vertices)):
v1 = mol.vertices[index]
v2 = isomer.vertices[index]
v2.connectivity1 = v1.connectivity1
v2.connectivity2 = v1.connectivity2
v2.connectivity3 = v1.connectivity3
v2.sortingLabel = v1.sortingLabel
# Restore current isomer
bond12.incrementOrder()
bond13.decrementOrder()
atom2.decrementLonePairs()
atom3.incrementLonePairs()
atom1.updateCharge()
atom2.updateCharge()
atom3.updateCharge()
# Append to isomer list if unique
isomer.updateAtomTypes(logSpecies=False)
isomers.append(isomer)
return isomers
def generate_aromatic_resonance_structures(mol, features=None):
"""
Generate the aromatic form of the molecule. For radicals, generates the form with the most aromatic rings.
Returns result as a list.
In most cases, only one structure will be returned.
In certain cases where multiple forms have the same number of aromatic rings, multiple structures will be returned.
If there's an error (eg. in RDKit) it just returns an empty list.
"""
cython.declare(molecule=Molecule, rings=list, aromaticBonds=list, kekuleList=list, maxNum=cython.int, molList=list,
newMolList=list, ring=list, bond=Bond, order=float, originalBonds=list, originalOrder=list,
i=cython.int, counter=cython.int)
if features is None:
features = analyze_molecule(mol)
if not features['isCyclic']:
return []
molecule = mol.copy(deep=True)
# First get all rings in the molecule
rings = molecule.getAllSimpleCyclesOfSize(6)
# Then determine which ones are aromatic
aromaticBonds = molecule.getAromaticRings(rings)[1]
# If the species is a radical, then there is a chance that the radical can be shifted
# to a location that increases the number of perceived aromatic rings.
if features['isRadical'] and not features['isArylRadical']:
if molecule.isAromatic():
kekuleList = generate_kekule_structure(molecule)
else:
kekuleList = [molecule]
_generate_resonance_structures(kekuleList, [generate_adjacent_resonance_structures])
maxNum = 0
molList = []
# Iterate through the adjacent resonance structures and keep the structures with the most aromatic rings
for mol0 in kekuleList:
aromaticBonds = mol0.getAromaticRings()[1]
if len(aromaticBonds) > maxNum:
maxNum = len(aromaticBonds)
molList = [(mol0, aromaticBonds)]
elif len(aromaticBonds) == maxNum:
molList.append((mol0, aromaticBonds))
else:
# Otherwise, it is not possible to increase the number of aromatic rings by moving electrons,
# so go ahead with the inputted form of the molecule
molList = [(molecule, aromaticBonds)]
newMolList = []
# Generate the aromatic resonance structure(s)
for mol0, aromaticBonds in molList:
if not aromaticBonds:
continue
# Save original bond orders in case this doesn't work out
originalBonds = []
for ring in aromaticBonds:
originalOrder = []
for bond in ring:
originalOrder.append(bond.order)
originalBonds.append(originalOrder)
# Change bond types to benzene bonds for all aromatic rings
for ring in aromaticBonds:
for bond in ring:
bond.order = 1.5
try:
mol0.updateAtomTypes(logSpecies=False)
except AtomTypeError:
# If this didn't work the first time, then there might be a ring that is not actually aromatic
# Reset our changes
for ring, originalOrder in itertools.izip(aromaticBonds, originalBonds):
for bond, order in itertools.izip(ring, originalOrder):
bond.order = order
# Try to make each ring aromatic, one by one
i = 0
counter = 0
while i < len(aromaticBonds) and counter < 2*len(aromaticBonds):
counter += 1
originalOrder = []
for bond in aromaticBonds[i]:
originalOrder.append(bond.order)
bond.order = 1.5
try:
mol0.updateAtomTypes(logSpecies=False)
except AtomTypeError:
# This ring could not be made aromatic, possibly because it depends on other rings
# Undo changes
for bond, order in itertools.izip(aromaticBonds[i], originalOrder):
bond.order = order
# Move it to the end of the list, and go on to the next ring
aromaticBonds.append(aromaticBonds.pop(i))
mol0.updateAtomTypes(logSpecies=False)
continue
else:
# We're done with this ring, so go on to the next ring
i += 1
# If we didn't end up making any of the rings aromatic, then this molecule is not actually aromatic
if i == 0:
# Move onto next molecule in the list
continue
for mol1 in newMolList:
if mol1.isIsomorphic(mol0):
break
else:
newMolList.append(mol0)
return newMolList
def generate_kekule_structure(mol):
"""
Generate a kekulized (single-double bond) form of the molecule.
The specific arrangement of double bonds is non-deterministic, and depends on RDKit.
Returns a single Kekule structure as an element of a list of length 1.
If there's an error (eg. in RDKit) then it just returns an empty list.
"""
cython.declare(atom=Atom, molecule=Molecule)
for atom in mol.atoms:
if atom.atomType.label == 'Cb' or atom.atomType.label == 'Cbf':
break
else:
return []
molecule = mol.copy(deep=True)
try:
kekulize(molecule)
except KekulizationError:
return []
return [molecule]
def generate_opposite_kekule_structure(mol):
"""
Generate the Kekule structure with opposite single/double bond arrangement
for single ring aromatics.
Returns a single Kekule structure as an element of a list of length 1.
"""
# This won't work with the aromatic form of the molecule
if mol.isAromatic():
return []
molecule = mol.copy(deep=True)
aromaticBonds = molecule.getAromaticRings()[1]
# We can only do this for single ring aromatics for now
if len(aromaticBonds) != 1:
return []
numS = 0
numD = 0
for bond in aromaticBonds[0]:
if bond.isSingle():
numS += 1
bond.order = 2
elif bond.isDouble():
numD += 1
bond.order = 1
else:
# Something is wrong: there is a bond that is not single or double
return []
if numS != 3 or numD != 3:
return []
try:
molecule.updateAtomTypes()
except AtomTypeError:
return []
else:
return [molecule]
def generate_isomorphic_resonance_structures(mol):
"""
Select the resonance isomer that is isomorphic to the parameter isomer, with the lowest unpaired
electrons descriptor.
We generate over all resonance isomers (non-isomorphic as well as isomorphic) and retain isomorphic
isomers.
WIP: do not generate aromatic resonance isomers.
"""
cython.declare(isomorphic_isomers=list,\
isomers=list,
)
cython.declare(isomer=Molecule,\
newIsomer=Molecule,\
isom=Molecule
)
cython.declare(index=int)
isomorphic_isomers = [mol]# resonance isomers that are isomorphic to the parameter isomer.
isomers = [mol]
# Iterate over resonance isomers
index = 0
while index < len(isomers):
isomer = isomers[index]
newIsomers = []
for algo in populate_resonance_algorithms():
newIsomers.extend(algo(isomer))
for newIsomer in newIsomers:
# Append to isomer list if unique
for isom in isomers:
if isom.copy(deep=True).isIsomorphic(newIsomer.copy(deep=True)):
isomorphic_isomers.append(newIsomer)
break
else:
isomers.append(newIsomer)
# Move to next resonance isomer
index += 1
return isomorphic_isomers
def generate_clar_structures(mol):
"""
Generate Clar structures for a given molecule.
Returns a list of :class:`Molecule` objects corresponding to the Clar structures.
"""
cython.declare(output=list, molList=list, newmol=Molecule, aromaticRings=list, bonds=list, solution=list,
y=list, x=list, index=cython.int, bond=Bond, ring=list)
if not mol.isCyclic():
return []
try:
output = _clar_optimization(mol)
except ILPSolutionError:
# The optimization algorithm did not work on the first iteration
return []
molList = []
for newmol, aromaticRings, bonds, solution in output:
# The solution includes a part corresponding to rings, y, and a part corresponding to bonds, x, using
# nomenclature from the paper. In y, 1 means the ring as a sextet, 0 means it does not.
# In x, 1 corresponds to a double bond, 0 either means a single bond or the bond is part of a sextet.
y = solution[0:len(aromaticRings)]
x = solution[len(aromaticRings):]
# Apply results to molecule - double bond locations first
for index, bond in enumerate(bonds):
if x[index] == 0:
bond.order = 1 # single
elif x[index] == 1:
bond.order = 2 # double
else:
raise ValueError('Unaccepted bond value {0} obtained from optimization.'.format(x[index]))
# Then apply locations of aromatic sextets by converting to benzene bonds
for index, ring in enumerate(aromaticRings):
if y[index] == 1:
_clar_transformation(newmol, ring)
try:
newmol.updateAtomTypes()
except AtomTypeError:
pass
else:
molList.append(newmol)
return molList
def _clar_optimization(mol, constraints=None, maxNum=None):
"""
Implements linear programming algorithm for finding Clar structures. This algorithm maximizes the number
of Clar sextets within the constraints of molecular geometry and atom valency.
Returns a list of valid Clar solutions in the form of a tuple, with the following entries:
[0] Molecule object
[1] List of aromatic rings
[2] List of bonds
[3] Optimization solution
The optimization solution is a list of boolean values with sextet assignments followed by double bond assignments,
with indices corresponding to the list of aromatic rings and list of bonds, respectively.
Method adapted from:
Hansen, P.; Zheng, M. The Clar Number of a Benzenoid Hydrocarbon and Linear Programming.
J. Math. Chem. 1994, 15 (1), 93–107.
"""
cython.declare(molecule=Molecule, aromaticRings=list, exo=list, l=cython.int, m=cython.int, n=cython.int,
a=list, objective=list, status=cython.int, solution=list, innerSolutions=list)
from lpsolve55 import lpsolve
import signal
# Save the current signal handler
sig = signal.getsignal(signal.SIGINT)
# Make a copy of the molecule so we don't destroy the original
molecule = mol.copy(deep=True)
aromaticRings = molecule.getAromaticRings()[0]
if not aromaticRings:
return []
# Get list of atoms that are in rings
atoms = set()
for ring in aromaticRings:
atoms.update(ring)
atoms = list(atoms)
# Get list of bonds involving the ring atoms, ignoring bonds to hydrogen
bonds = set()
for atom in atoms:
bonds.update([atom.bonds[key] for key in atom.bonds.keys() if key.isNonHydrogen()])
bonds = list(bonds)
# Identify exocyclic bonds, and save their bond orders
exo = []
for bond in bonds:
if bond.atom1 not in atoms or bond.atom2 not in atoms:
if bond.isDouble():
exo.append(1)
else:
exo.append(0)
else:
exo.append(None)
# Dimensions
l = len(aromaticRings)
m = len(atoms)
n = l + len(bonds)
# Connectivity matrix which indicates which rings and bonds each atom is in
# Part of equality constraint Ax=b
a = []
for atom in atoms:
inRing = [1 if atom in ring else 0 for ring in aromaticRings]
inBond = [1 if atom in [bond.atom1, bond.atom2] else 0 for bond in bonds]
a.append(inRing + inBond)
# Objective vector for optimization: sextets have a weight of 1, double bonds have a weight of 0
objective = [1] * l + [0] * len(bonds)
# Solve LP problem using lpsolve
lp = lpsolve('make_lp', m, n) # initialize lp with constraint matrix with m rows and n columns
lpsolve('set_verbose', lp, 2) # reduce messages from lpsolve
lpsolve('set_obj_fn', lp, objective) # set objective function
lpsolve('set_maxim', lp) # set solver to maximize objective
lpsolve('set_mat', lp, a) # set left hand side to constraint matrix
lpsolve('set_rh_vec', lp, [1] * m) # set right hand side to 1 for all constraints
lpsolve('set_constr_type', lp, ['='] * m) # set all constraints as equality constraints
lpsolve('set_binary', lp, [True] * n) # set all variables to be binary
# Constrain values of exocyclic bonds, since we don't want to modify them
for i in range(l, n):
if exo[i - l] is not None:
# NOTE: lpsolve indexes from 1, so the variable we're changing should be i + 1
lpsolve('set_bounds', lp, i + 1, exo[i - l], exo[i - l])
# Add constraints to problem if provided
if constraints is not None:
for constraint in constraints:
try:
lpsolve('add_constraint', lp, constraint[0], '<=', constraint[1])
except:
logging.error('Unable to add constraint: {0} <= {1}'.format(constraint[0], constraint[1]))
logging.error('Cannot complete Clar optimization for {0}.'.format(str(mol)))
logging.error(mol.toAdjacencyList())
raise
status = lpsolve('solve', lp)
objVal, solution = lpsolve('get_solution', lp)[0:2]
lpsolve('delete_lp', lp) # Delete the LP problem to clear up memory
# Reset signal handling since lpsolve changed it
try:
signal.signal(signal.SIGINT, sig)
except ValueError:
# This is not being run in the main thread, so we cannot reset signal
pass
# Check that optimization was successful
if status != 0:
raise ILPSolutionError('Optimization could not find a valid solution.')
# Check that we the result contains at least one aromatic sextet
if objVal == 0:
return []
# Check that the solution contains the maximum number of sextets possible
if maxNum is None:
maxNum = objVal # This is the first solution, so the result should be an upper limit
elif objVal < maxNum:
raise ILPSolutionError('Optimization obtained a sub-optimal solution.')
if any([x != 1 and x != 0 for x in solution]):
raise ILPSolutionError('Optimization obtained a non-integer solution.')
# Generate constraints based on the solution obtained
y = solution[0:l]
new_a = y + [0] * len(bonds)
new_b = sum(y) - 1
if constraints is not None:
constraints.append((new_a, new_b))
else:
constraints = [(new_a, new_b)]
# Run optimization with additional constraints
try:
innerSolutions = _clar_optimization(mol, constraints=constraints, maxNum=maxNum)
except ILPSolutionError:
innerSolutions = []
return innerSolutions + [(molecule, aromaticRings, bonds, solution)]
def _clar_transformation(mol, aromaticRing):
"""
Performs Clar transformation for given ring in a molecule, ie. conversion to aromatic sextet.
Args:
mol a :class:`Molecule` object
aromaticRing a list of :class:`Atom` objects corresponding to an aromatic ring in mol
This function directly modifies the input molecule and does not return anything.
"""
cython.declare(bondList=list, i=cython.int, atom1=Atom, atom2=Atom, bond=Bond)
bondList = []
for i, atom1 in enumerate(aromaticRing):
for atom2 in aromaticRing[i + 1:]:
if mol.hasBond(atom1, atom2):
bondList.append(mol.getBond(atom1, atom2))
for bond in bondList:
bond.order = 1.5
|
Molecular-Image-Recognition/Molecular-Image-Recognition
|
code/rmgpy/molecule/resonance.py
|
Python
|
mit
| 36,175
|
[
"RDKit"
] |
0cd9f41ff23d6324bc7cb18bb393390293a4a0d5a19724fdb8cf06423cb6860c
|
"""
Functions for explaining classifiers that use tabular data (matrices).
"""
import collections
import copy
from functools import partial
import json
import warnings
import numpy as np
import scipy as sp
import sklearn
import sklearn.preprocessing
from sklearn.utils import check_random_state
from pyDOE2 import lhs
from scipy.stats.distributions import norm
from lime.discretize import QuartileDiscretizer
from lime.discretize import DecileDiscretizer
from lime.discretize import EntropyDiscretizer
from lime.discretize import BaseDiscretizer
from lime.discretize import StatsDiscretizer
from . import explanation
from . import lime_base
class TableDomainMapper(explanation.DomainMapper):
"""Maps feature ids to names, generates table views, etc"""
def __init__(self, feature_names, feature_values, scaled_row,
categorical_features, discretized_feature_names=None,
feature_indexes=None):
"""Init.
Args:
feature_names: list of feature names, in order
feature_values: list of strings with the values of the original row
scaled_row: scaled row
categorical_features: list of categorical features ids (ints)
feature_indexes: optional feature indexes used in the sparse case
"""
self.exp_feature_names = feature_names
self.discretized_feature_names = discretized_feature_names
self.feature_names = feature_names
self.feature_values = feature_values
self.feature_indexes = feature_indexes
self.scaled_row = scaled_row
if sp.sparse.issparse(scaled_row):
self.all_categorical = False
else:
self.all_categorical = len(categorical_features) == len(scaled_row)
self.categorical_features = categorical_features
def map_exp_ids(self, exp):
"""Maps ids to feature names.
Args:
exp: list of tuples [(id, weight), (id,weight)]
Returns:
list of tuples (feature_name, weight)
"""
names = self.exp_feature_names
if self.discretized_feature_names is not None:
names = self.discretized_feature_names
return [(names[x[0]], x[1]) for x in exp]
def visualize_instance_html(self,
exp,
label,
div_name,
exp_object_name,
show_table=True,
show_all=False):
"""Shows the current example in a table format.
Args:
exp: list of tuples [(id, weight), (id,weight)]
label: label id (integer)
div_name: name of div object to be used for rendering(in js)
exp_object_name: name of js explanation object
show_table: if False, don't show table visualization.
show_all: if True, show zero-weighted features in the table.
"""
if not show_table:
return ''
weights = [0] * len(self.feature_names)
for x in exp:
weights[x[0]] = x[1]
if self.feature_indexes is not None:
# Sparse case: only display the non-zero values and importances
fnames = [self.exp_feature_names[i] for i in self.feature_indexes]
fweights = [weights[i] for i in self.feature_indexes]
if show_all:
out_list = list(zip(fnames,
self.feature_values,
fweights))
else:
out_dict = dict(map(lambda x: (x[0], (x[1], x[2], x[3])),
zip(self.feature_indexes,
fnames,
self.feature_values,
fweights)))
out_list = [out_dict.get(x[0], (str(x[0]), 0.0, 0.0)) for x in exp]
else:
out_list = list(zip(self.exp_feature_names,
self.feature_values,
weights))
if not show_all:
out_list = [out_list[x[0]] for x in exp]
ret = u'''
%s.show_raw_tabular(%s, %d, %s);
''' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name)
return ret
class LimeTabularExplainer(object):
"""Explains predictions on tabular (i.e. matrix) data.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to the
means and stds in the training data. For categorical features, perturb by
sampling according to the training distribution, and making a binary
feature that is 1 when the value is the same as the instance being
explained."""
def __init__(self,
training_data,
mode="classification",
training_labels=None,
feature_names=None,
categorical_features=None,
categorical_names=None,
kernel_width=None,
kernel=None,
verbose=False,
class_names=None,
feature_selection='auto',
discretize_continuous=True,
discretizer='quartile',
sample_around_instance=False,
random_state=None,
training_data_stats=None):
"""Init function.
Args:
training_data: numpy 2d array
mode: "classification" or "regression"
training_labels: labels for training data. Not required, but may be
used by discretizer.
feature_names: list of names (strings) corresponding to the columns
in the training data.
categorical_features: list of indices (ints) corresponding to the
categorical columns. Everything else will be considered
continuous. Values in these columns MUST be integers.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
kernel_width: kernel width for the exponential kernel.
If None, defaults to sqrt (number of columns) * 0.75
kernel: similarity kernel that takes euclidean distances and kernel
width as input and outputs weights in (0,1). If None, defaults to
an exponential kernel.
verbose: if true, print local prediction values from linear model
class_names: list of class names, ordered according to whatever the
classifier is using. If not present, class names will be '0',
'1', ...
feature_selection: feature selection method. can be
'forward_selection', 'lasso_path', 'none' or 'auto'.
See function 'explain_instance_with_data' in lime_base.py for
details on what each of the options does.
discretize_continuous: if True, all non-categorical features will
be discretized into quartiles.
discretizer: only matters if discretize_continuous is True
and data is not sparse. Options are 'quartile', 'decile',
'entropy' or a BaseDiscretizer instance.
sample_around_instance: if True, will sample continuous features
in perturbed samples from a normal centered at the instance
being explained. Otherwise, the normal is centered on the mean
of the feature data.
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
training_data_stats: a dict object having the details of training data
statistics. If None, training data information will be used, only matters
if discretize_continuous is True. Must have the following keys:
means", "mins", "maxs", "stds", "feature_values",
"feature_frequencies"
"""
self.random_state = check_random_state(random_state)
self.mode = mode
self.categorical_names = categorical_names or {}
self.sample_around_instance = sample_around_instance
self.training_data_stats = training_data_stats
# Check and raise proper error in stats are supplied in non-descritized path
if self.training_data_stats:
self.validate_training_data_stats(self.training_data_stats)
if categorical_features is None:
categorical_features = []
if feature_names is None:
feature_names = [str(i) for i in range(training_data.shape[1])]
self.categorical_features = list(categorical_features)
self.feature_names = list(feature_names)
self.discretizer = None
if discretize_continuous and not sp.sparse.issparse(training_data):
# Set the discretizer if training data stats are provided
if self.training_data_stats:
discretizer = StatsDiscretizer(
training_data, self.categorical_features,
self.feature_names, labels=training_labels,
data_stats=self.training_data_stats,
random_state=self.random_state)
if discretizer == 'quartile':
self.discretizer = QuartileDiscretizer(
training_data, self.categorical_features,
self.feature_names, labels=training_labels,
random_state=self.random_state)
elif discretizer == 'decile':
self.discretizer = DecileDiscretizer(
training_data, self.categorical_features,
self.feature_names, labels=training_labels,
random_state=self.random_state)
elif discretizer == 'entropy':
self.discretizer = EntropyDiscretizer(
training_data, self.categorical_features,
self.feature_names, labels=training_labels,
random_state=self.random_state)
elif isinstance(discretizer, BaseDiscretizer):
self.discretizer = discretizer
else:
raise ValueError('''Discretizer must be 'quartile',''' +
''' 'decile', 'entropy' or a''' +
''' BaseDiscretizer instance''')
self.categorical_features = list(range(training_data.shape[1]))
# Get the discretized_training_data when the stats are not provided
if(self.training_data_stats is None):
discretized_training_data = self.discretizer.discretize(
training_data)
if kernel_width is None:
kernel_width = np.sqrt(training_data.shape[1]) * .75
kernel_width = float(kernel_width)
if kernel is None:
def kernel(d, kernel_width):
return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))
kernel_fn = partial(kernel, kernel_width=kernel_width)
self.feature_selection = feature_selection
self.base = lime_base.LimeBase(kernel_fn, verbose, random_state=self.random_state)
self.class_names = class_names
# Though set has no role to play if training data stats are provided
self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)
self.scaler.fit(training_data)
self.feature_values = {}
self.feature_frequencies = {}
for feature in self.categorical_features:
if training_data_stats is None:
if self.discretizer is not None:
column = discretized_training_data[:, feature]
else:
column = training_data[:, feature]
feature_count = collections.Counter(column)
values, frequencies = map(list, zip(*(sorted(feature_count.items()))))
else:
values = training_data_stats["feature_values"][feature]
frequencies = training_data_stats["feature_frequencies"][feature]
self.feature_values[feature] = values
self.feature_frequencies[feature] = (np.array(frequencies) /
float(sum(frequencies)))
self.scaler.mean_[feature] = 0
self.scaler.scale_[feature] = 1
@staticmethod
def convert_and_round(values):
return ['%.2f' % v for v in values]
@staticmethod
def validate_training_data_stats(training_data_stats):
"""
Method to validate the structure of training data stats
"""
stat_keys = list(training_data_stats.keys())
valid_stat_keys = ["means", "mins", "maxs", "stds", "feature_values", "feature_frequencies"]
missing_keys = list(set(valid_stat_keys) - set(stat_keys))
if len(missing_keys) > 0:
raise Exception("Missing keys in training_data_stats. Details: %s" % (missing_keys))
def explain_instance(self,
data_row,
predict_fn,
labels=(1,),
top_labels=None,
num_features=10,
num_samples=5000,
distance_metric='euclidean',
model_regressor=None,
sampling_method='gaussian'):
"""Generates explanations for a prediction.
First, we generate neighborhood data by randomly perturbing features
from the instance (see __data_inverse). We then learn locally weighted
linear models on this neighborhood data to explain each of the classes
in an interpretable way (see lime_base.py).
Args:
data_row: 1d numpy array or scipy.sparse matrix, corresponding to a row
predict_fn: prediction function. For classifiers, this should be a
function that takes a numpy array and outputs prediction
probabilities. For regressors, this takes a numpy array and
returns the predictions. For ScikitClassifiers, this is
`classifier.predict_proba()`. For ScikitRegressors, this
is `regressor.predict()`. The prediction function needs to work
on multiple feature vectors (the vectors randomly perturbed
from the data_row).
labels: iterable with labels to be explained.
top_labels: if not None, ignore labels and produce explanations for
the K labels with highest prediction probabilities, where K is
this parameter.
num_features: maximum number of features present in explanation
num_samples: size of the neighborhood to learn the linear model
distance_metric: the distance metric to use for weights.
model_regressor: sklearn regressor to use in explanation. Defaults
to Ridge regression in LimeBase. Must have model_regressor.coef_
and 'sample_weight' as a parameter to model_regressor.fit()
sampling_method: Method to sample synthetic data. Defaults to Gaussian
sampling. Can also use Latin Hypercube Sampling.
Returns:
An Explanation object (see explanation.py) with the corresponding
explanations.
"""
if sp.sparse.issparse(data_row) and not sp.sparse.isspmatrix_csr(data_row):
# Preventative code: if sparse, convert to csr format if not in csr format already
data_row = data_row.tocsr()
data, inverse = self.__data_inverse(data_row, num_samples, sampling_method)
if sp.sparse.issparse(data):
# Note in sparse case we don't subtract mean since data would become dense
scaled_data = data.multiply(self.scaler.scale_)
# Multiplying with csr matrix can return a coo sparse matrix
if not sp.sparse.isspmatrix_csr(scaled_data):
scaled_data = scaled_data.tocsr()
else:
scaled_data = (data - self.scaler.mean_) / self.scaler.scale_
distances = sklearn.metrics.pairwise_distances(
scaled_data,
scaled_data[0].reshape(1, -1),
metric=distance_metric
).ravel()
yss = predict_fn(inverse)
# for classification, the model needs to provide a list of tuples - classes
# along with prediction probabilities
if self.mode == "classification":
if len(yss.shape) == 1:
raise NotImplementedError("LIME does not currently support "
"classifier models without probability "
"scores. If this conflicts with your "
"use case, please let us know: "
"https://github.com/datascienceinc/lime/issues/16")
elif len(yss.shape) == 2:
if self.class_names is None:
self.class_names = [str(x) for x in range(yss[0].shape[0])]
else:
self.class_names = list(self.class_names)
if not np.allclose(yss.sum(axis=1), 1.0):
warnings.warn("""
Prediction probabilties do not sum to 1, and
thus does not constitute a probability space.
Check that you classifier outputs probabilities
(Not log probabilities, or actual class predictions).
""")
else:
raise ValueError("Your model outputs "
"arrays with {} dimensions".format(len(yss.shape)))
# for regression, the output should be a one-dimensional array of predictions
else:
try:
if len(yss.shape) != 1 and len(yss[0].shape) == 1:
yss = np.array([v[0] for v in yss])
assert isinstance(yss, np.ndarray) and len(yss.shape) == 1
except AssertionError:
raise ValueError("Your model needs to output single-dimensional \
numpyarrays, not arrays of {} dimensions".format(yss.shape))
predicted_value = yss[0]
min_y = min(yss)
max_y = max(yss)
# add a dimension to be compatible with downstream machinery
yss = yss[:, np.newaxis]
feature_names = copy.deepcopy(self.feature_names)
if feature_names is None:
feature_names = [str(x) for x in range(data_row.shape[0])]
if sp.sparse.issparse(data_row):
values = self.convert_and_round(data_row.data)
feature_indexes = data_row.indices
else:
values = self.convert_and_round(data_row)
feature_indexes = None
for i in self.categorical_features:
if self.discretizer is not None and i in self.discretizer.lambdas:
continue
name = int(data_row[i])
if i in self.categorical_names:
name = self.categorical_names[i][name]
feature_names[i] = '%s=%s' % (feature_names[i], name)
values[i] = 'True'
categorical_features = self.categorical_features
discretized_feature_names = None
if self.discretizer is not None:
categorical_features = range(data.shape[1])
discretized_instance = self.discretizer.discretize(data_row)
discretized_feature_names = copy.deepcopy(feature_names)
for f in self.discretizer.names:
discretized_feature_names[f] = self.discretizer.names[f][int(
discretized_instance[f])]
domain_mapper = TableDomainMapper(feature_names,
values,
scaled_data[0],
categorical_features=categorical_features,
discretized_feature_names=discretized_feature_names,
feature_indexes=feature_indexes)
ret_exp = explanation.Explanation(domain_mapper,
mode=self.mode,
class_names=self.class_names)
if self.mode == "classification":
ret_exp.predict_proba = yss[0]
if top_labels:
labels = np.argsort(yss[0])[-top_labels:]
ret_exp.top_labels = list(labels)
ret_exp.top_labels.reverse()
else:
ret_exp.predicted_value = predicted_value
ret_exp.min_value = min_y
ret_exp.max_value = max_y
labels = [0]
for label in labels:
(ret_exp.intercept[label],
ret_exp.local_exp[label],
ret_exp.score[label],
ret_exp.local_pred[label]) = self.base.explain_instance_with_data(
scaled_data,
yss,
distances,
label,
num_features,
model_regressor=model_regressor,
feature_selection=self.feature_selection)
if self.mode == "regression":
ret_exp.intercept[1] = ret_exp.intercept[0]
ret_exp.local_exp[1] = [x for x in ret_exp.local_exp[0]]
ret_exp.local_exp[0] = [(i, -1 * j) for i, j in ret_exp.local_exp[1]]
return ret_exp
def __data_inverse(self,
data_row,
num_samples,
sampling_method):
"""Generates a neighborhood around a prediction.
For numerical features, perturb them by sampling from a Normal(0,1) and
doing the inverse operation of mean-centering and scaling, according to
the means and stds in the training data. For categorical features,
perturb by sampling according to the training distribution, and making
a binary feature that is 1 when the value is the same as the instance
being explained.
Args:
data_row: 1d numpy array, corresponding to a row
num_samples: size of the neighborhood to learn the linear model
sampling_method: 'gaussian' or 'lhs'
Returns:
A tuple (data, inverse), where:
data: dense num_samples * K matrix, where categorical features
are encoded with either 0 (not equal to the corresponding value
in data_row) or 1. The first row is the original instance.
inverse: same as data, except the categorical features are not
binary, but categorical (as the original data)
"""
is_sparse = sp.sparse.issparse(data_row)
if is_sparse:
num_cols = data_row.shape[1]
data = sp.sparse.csr_matrix((num_samples, num_cols), dtype=data_row.dtype)
else:
num_cols = data_row.shape[0]
data = np.zeros((num_samples, num_cols))
categorical_features = range(num_cols)
if self.discretizer is None:
instance_sample = data_row
scale = self.scaler.scale_
mean = self.scaler.mean_
if is_sparse:
# Perturb only the non-zero values
non_zero_indexes = data_row.nonzero()[1]
num_cols = len(non_zero_indexes)
instance_sample = data_row[:, non_zero_indexes]
scale = scale[non_zero_indexes]
mean = mean[non_zero_indexes]
if sampling_method == 'gaussian':
data = self.random_state.normal(0, 1, num_samples * num_cols
).reshape(num_samples, num_cols)
data = np.array(data)
elif sampling_method == 'lhs':
data = lhs(num_cols, samples=num_samples
).reshape(num_samples, num_cols)
means = np.zeros(num_cols)
stdvs = np.array([1]*num_cols)
for i in range(num_cols):
data[:, i] = norm(loc=means[i], scale=stdvs[i]).ppf(data[:, i])
data = np.array(data)
else:
warnings.warn('''Invalid input for sampling_method.
Defaulting to Gaussian sampling.''', UserWarning)
data = self.random_state.normal(0, 1, num_samples * num_cols
).reshape(num_samples, num_cols)
data = np.array(data)
if self.sample_around_instance:
data = data * scale + instance_sample
else:
data = data * scale + mean
if is_sparse:
if num_cols == 0:
data = sp.sparse.csr_matrix((num_samples,
data_row.shape[1]),
dtype=data_row.dtype)
else:
indexes = np.tile(non_zero_indexes, num_samples)
indptr = np.array(
range(0, len(non_zero_indexes) * (num_samples + 1),
len(non_zero_indexes)))
data_1d_shape = data.shape[0] * data.shape[1]
data_1d = data.reshape(data_1d_shape)
data = sp.sparse.csr_matrix(
(data_1d, indexes, indptr),
shape=(num_samples, data_row.shape[1]))
categorical_features = self.categorical_features
first_row = data_row
else:
first_row = self.discretizer.discretize(data_row)
data[0] = data_row.copy()
inverse = data.copy()
for column in categorical_features:
values = self.feature_values[column]
freqs = self.feature_frequencies[column]
inverse_column = self.random_state.choice(values, size=num_samples,
replace=True, p=freqs)
binary_column = (inverse_column == first_row[column]).astype(int)
binary_column[0] = 1
inverse_column[0] = data[0, column]
data[:, column] = binary_column
inverse[:, column] = inverse_column
if self.discretizer is not None:
inverse[1:] = self.discretizer.undiscretize(inverse[1:])
inverse[0] = data_row
return data, inverse
class RecurrentTabularExplainer(LimeTabularExplainer):
"""
An explainer for keras-style recurrent neural networks, where the
input shape is (n_samples, n_timesteps, n_features). This class
just extends the LimeTabularExplainer class and reshapes the training
data and feature names such that they become something like
(val1_t1, val1_t2, val1_t3, ..., val2_t1, ..., valn_tn)
Each of the methods that take data reshape it appropriately,
so you can pass in the training/testing data exactly as you
would to the recurrent neural network.
"""
def __init__(self, training_data, mode="classification",
training_labels=None, feature_names=None,
categorical_features=None, categorical_names=None,
kernel_width=None, kernel=None, verbose=False, class_names=None,
feature_selection='auto', discretize_continuous=True,
discretizer='quartile', random_state=None):
"""
Args:
training_data: numpy 3d array with shape
(n_samples, n_timesteps, n_features)
mode: "classification" or "regression"
training_labels: labels for training data. Not required, but may be
used by discretizer.
feature_names: list of names (strings) corresponding to the columns
in the training data.
categorical_features: list of indices (ints) corresponding to the
categorical columns. Everything else will be considered
continuous. Values in these columns MUST be integers.
categorical_names: map from int to list of names, where
categorical_names[x][y] represents the name of the yth value of
column x.
kernel_width: kernel width for the exponential kernel.
If None, defaults to sqrt(number of columns) * 0.75
kernel: similarity kernel that takes euclidean distances and kernel
width as input and outputs weights in (0,1). If None, defaults to
an exponential kernel.
verbose: if true, print local prediction values from linear model
class_names: list of class names, ordered according to whatever the
classifier is using. If not present, class names will be '0',
'1', ...
feature_selection: feature selection method. can be
'forward_selection', 'lasso_path', 'none' or 'auto'.
See function 'explain_instance_with_data' in lime_base.py for
details on what each of the options does.
discretize_continuous: if True, all non-categorical features will
be discretized into quartiles.
discretizer: only matters if discretize_continuous is True. Options
are 'quartile', 'decile', 'entropy' or a BaseDiscretizer
instance.
random_state: an integer or numpy.RandomState that will be used to
generate random numbers. If None, the random state will be
initialized using the internal numpy seed.
"""
# Reshape X
n_samples, n_timesteps, n_features = training_data.shape
training_data = np.transpose(training_data, axes=(0, 2, 1)).reshape(
n_samples, n_timesteps * n_features)
self.n_timesteps = n_timesteps
self.n_features = n_features
if feature_names is None:
feature_names = ['feature%d' % i for i in range(n_features)]
# Update the feature names
feature_names = ['{}_t-{}'.format(n, n_timesteps - (i + 1))
for n in feature_names for i in range(n_timesteps)]
# Send off the the super class to do its magic.
super(RecurrentTabularExplainer, self).__init__(
training_data,
mode=mode,
training_labels=training_labels,
feature_names=feature_names,
categorical_features=categorical_features,
categorical_names=categorical_names,
kernel_width=kernel_width,
kernel=kernel,
verbose=verbose,
class_names=class_names,
feature_selection=feature_selection,
discretize_continuous=discretize_continuous,
discretizer=discretizer,
random_state=random_state)
def _make_predict_proba(self, func):
"""
The predict_proba method will expect 3d arrays, but we are reshaping
them to 2D so that LIME works correctly. This wraps the function
you give in explain_instance to first reshape the data to have
the shape the the keras-style network expects.
"""
def predict_proba(X):
n_samples = X.shape[0]
new_shape = (n_samples, self.n_features, self.n_timesteps)
X = np.transpose(X.reshape(new_shape), axes=(0, 2, 1))
return func(X)
return predict_proba
def explain_instance(self, data_row, classifier_fn, labels=(1,),
top_labels=None, num_features=10, num_samples=5000,
distance_metric='euclidean', model_regressor=None):
"""Generates explanations for a prediction.
First, we generate neighborhood data by randomly perturbing features
from the instance (see __data_inverse). We then learn locally weighted
linear models on this neighborhood data to explain each of the classes
in an interpretable way (see lime_base.py).
Args:
data_row: 2d numpy array, corresponding to a row
classifier_fn: classifier prediction probability function, which
takes a numpy array and outputs prediction probabilities. For
ScikitClassifiers , this is classifier.predict_proba.
labels: iterable with labels to be explained.
top_labels: if not None, ignore labels and produce explanations for
the K labels with highest prediction probabilities, where K is
this parameter.
num_features: maximum number of features present in explanation
num_samples: size of the neighborhood to learn the linear model
distance_metric: the distance metric to use for weights.
model_regressor: sklearn regressor to use in explanation. Defaults
to Ridge regression in LimeBase. Must have
model_regressor.coef_ and 'sample_weight' as a parameter
to model_regressor.fit()
Returns:
An Explanation object (see explanation.py) with the corresponding
explanations.
"""
# Flatten input so that the normal explainer can handle it
data_row = data_row.T.reshape(self.n_timesteps * self.n_features)
# Wrap the classifier to reshape input
classifier_fn = self._make_predict_proba(classifier_fn)
return super(RecurrentTabularExplainer, self).explain_instance(
data_row, classifier_fn,
labels=labels,
top_labels=top_labels,
num_features=num_features,
num_samples=num_samples,
distance_metric=distance_metric,
model_regressor=model_regressor)
|
marcotcr/lime
|
lime/lime_tabular.py
|
Python
|
bsd-2-clause
| 34,542
|
[
"Gaussian"
] |
60dc96c70fa448df9a38f0bb857b7f9624527474d05f16beefb56d35c605afa8
|
import lb_loader
import simtk.openmm.app as app
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems, integrators
from sys import stdout
platform_name = "CUDA"
platform = mm.Platform.getPlatformByName(platform_name)
properties = {'CudaPrecision': "mixed"}
precision = "mixed"
sysname = "switchedaccurateflexiblewater"
#sysname = "switchedaccuratewater"
system, positions, groups, temperature, timestep, langevin_timestep, testsystem, equil_steps, steps_per_hmc = lb_loader.load(sysname)
positions, boxes, state = lb_loader.equilibrate(testsystem, temperature, langevin_timestep, steps=equil_steps, minimize=True, use_hmc=False, precision=precision, platform_name=platform_name)
n_steps = 100000
temperature = 300. * u.kelvin
timestep = 1.75 * u.femtoseconds
step_groups = [
(0, 0),
(1, 1),
(1, 2),
(1, 3),
(2, 1),
(3, 1),
]
for i, (step0, step1) in enumerate(step_groups):
groups = ((0, step0), (1, step1))
if step0 == 0 and step1 == 0:
integrator = mm.VerletIntegrator(timestep)
else:
integrator = mm.MTSIntegrator(timestep, groups)
print("*" * 80)
print(i, sysname, step0, step1)
csv_filename = "./conservation/%s_%f_%d_%d.csv" % (sysname, timestep / u.femtoseconds, step0, step1)
simulation = app.Simulation(testsystem.topology, testsystem.system, integrator, platform=platform, platformProperties=properties)
simulation.context.setPositions(testsystem.positions)
simulation.context.setVelocitiesToTemperature(temperature)
simulation.step(1000)
simulation.reporters.append(app.StateDataReporter(csv_filename, 5, step=True, time=True, potentialEnergy=True, kineticEnergy=False, totalEnergy=True, elapsedTime=True))
simulation.step(n_steps)
del simulation, integrator
|
kyleabeauchamp/HMCNotes
|
test_mts_conservation2.py
|
Python
|
gpl-2.0
| 1,797
|
[
"OpenMM"
] |
30d21343e9f1be52db218fab5461711bbc2eadad5538f95e550b81a844ad7a34
|
""" Define allowed quantities for FRB galaxies
Uncertainty is valid for any quantity with '_err' add-on, eg. W1_err
Am also likely to add _flg for each as well
"""
##############################################################
# Redshift
valid_z = [
'z', # Preferred redshift, may derived from one of several ways
'z_phot', # Photometric redshift
'z_spec', # Spectroscopic redshift
'z_FRB', # FRB redshift
]
##############################################################
# Error Ellipse
valid_e = [
'a', # Major axis
'b', # Minor axis
'theta', # Rotation of the major axis E from N (deg)
'cl', # Confidence level of the ellipse
]
##############################################################
# Photometry
# Filters
valid_filters = []
# SDSS
SDSS_bands = ['u', 'g', 'r', 'i', 'z']
for band in SDSS_bands:
valid_filters.append('SDSS_{:s}'.format(band))
# DES
DES_bands = ['g', 'r', 'i', 'z', 'Y']
for band in DES_bands:
valid_filters.append('DES_{:s}'.format(band))
# DECaLS
DECaL_bands = ['g', 'r', 'z']
for band in DECaL_bands:
valid_filters.append('DECaL_{:s}'.format(band))
#PanSTARRS
PanSTARRS_bands = ['g','r','i','z','y']
for band in PanSTARRS_bands:
valid_filters.append('Pan-STARRS_{:s}'.format(band))
# VLT
VLT_bands = ['u', 'g', 'I', 'z']
for band in VLT_bands:
valid_filters.append('VLT_FORS2_{:s}'.format(band))
# GMOS
#south
GMOS_bands = ['u', 'g', 'r', 'i', 'z']
for band in GMOS_bands:
valid_filters.append('GMOS_S_{:s}'.format(band))
#north
for band in GMOS_bands:
valid_filters.append('GMOS_N_{:s}'.format(band))
#NOT
NOT_bands = ['u', 'g','r','i','z']
for band in NOT_bands:
valid_filters.append('NOT_{:s}'.format(band))
#NIRI
NIRI_bands = ['J']
for band in NIRI_bands:
valid_filters.append('NIRI_{:s}'.format(band))
#LRIS
LRISb_bands = ['U', 'G', 'V', 'B']
for band in LRISb_bands:
valid_filters.append('LRISb_{:s}'.format(band))
LRISr_bands = ['V', 'R', 'I']
for band in LRISr_bands:
valid_filters.append('LRISr_{:s}'.format(band))
# VISTA (VIRCAM)
VISTA_bands = ['Y','J','H','Ks']
for band in VISTA_bands:
valid_filters.append('VISTA_{:s}'.format(band))
#MMT
MMIRS_bands = ['J','H','K']
for band in MMIRS_bands:
valid_filters.append('MMIRS_{:s}'.format(band))
#2MASS
MASS_bands = ['J','H','K']
for band in MASS_bands:
valid_filters.append('2MASS_{:s}'.format(band))
# HST instruments
# WFC3
WFC3_bands = ['F300X', 'F110W', 'F160W', 'F763M']
for band in WFC3_bands:
valid_filters.append('WFC3_{:s}'.format(band))
# WISE
WISE_bands = ['W1', 'W2', 'W3', 'W4']
for band in WISE_bands:
valid_filters.append('WISE_{:s}'.format(band))
# Spitzer
Spitzer_bands = ['3.6', '4.5']
for band in Spitzer_bands:
valid_filters.append('Spitzer_{:s}'.format(band))
NSC_bands = ['u','g', 'r', 'i', 'z', 'Y', 'VR']
for band in NSC_bands:
valid_filters.append('NSC_{:s}'.format(band))
# For upper limits, the flux is 3sigma and the error is set to -99.0
valid_flux = [entry+'_flux' for entry in valid_filters]
valid_ref = [entry+'_ref' for entry in valid_filters]
valid_photom = valid_filters + ['EBV'] # Galactic
##############################################################
# Line measurements -- Use linetools naming only!!!
valid_neb_lines = [
'Halpha', # Halpha flux erg/s/cm^2; pPXF
'Hbeta', # Hbeta flux erg/s/cm^2; pPXF
'Hgamma', # Hgamma flux erg/s/cm^2; pPXF
'[NII] 6548', # [NII] 6584 flux erg/s/cm^2;
'[NII] 6584', # [NII] 6584 flux erg/s/cm^2; pPXF
'[OII] 3726', # [OII] flux erg/s/cm^2; pPXF
'[OII] 3729', # [OII] flux erg/s/cm^2; pPXF
'[OIII] 4959', # [OII] 4959 flux erg/s/cm^2;
'[OIII] 5007', # [OII] 5007 flux erg/s/cm^2; pPXF
'[SII] 6716', # [SII] 6716 flux erg/s/cm^2; pPXF
'[SII] 6731', # [SII] 6731 flux erg/s/cm^2; pPXF
]
valid_neb_ref = [entry+'_ref' for entry in valid_neb_lines]
##############################################################
# Morphology
valid_morphology = [
'reff_ang', # Effective radius in arcsec; Galfit
'reff_kpc', # Effective radius in kpc; Galfit
'n', # Sersic index; Galfit
'PA', # Position angle (deg); Galfit
'b/a', # Ellipticity; Galfit
'ra', # RA centroid inferred from Galfit
'dec', # DEC centroid inferred from Galfit
'n', # Sersic index from Galfit
]
##############################################################
# Offsets
valid_offsets = [
'ang_best', # Angular offset in arcsec from localization centroid to galaxy
'ang_avg', # Angular offset in arcsec averaging over localization
'physical', # Physical offset in kpc; Uses ang_best
]
##############################################################
# Positional (Astrometric and Source) Errors
valid_positional_error = [
'ra_astrometric', # error for astrometric tie in RA; arcsec
'dec_astrometric', # error for astrometric tie in Dec; arcsec
'ra_source', # RA error for source position (e.g. from source extractor); arcsec
'dec_source', # Dec error for source position; arcsec
]
##############################################################
# Derived quantities
valid_derived_photom = [
'Mstar', # Stellar mass; linear in Msun CIGALE
'Mstar_spec', # Stellar mass from pPXF; linear in Msun
'f_AGN', # Fraction of AGN contribution to light; CIGALE
'u-r', # Rest-frame; CIGALE
'Lnu_r', # Specific luminosity (J/s/Hz); CIGALE; cosmology dependent
'M_r', # Absolute magnitude, r-band rest-frame; CIGALE+
'age_mass', # Age weighted mass from CIGALE
'SFR_photom', # SFR in Msun/yr from photometry; CIGALE
'SFR_radio', # SFR in Msun/yr from radio photometry
'EBV_photom', # E(B-V) from photometry; CIGALE
'EBV_spec', # E(B-V) from spectral SED; pPXF
'Z_photom', # Metallicity from photometry; CIGALE
'Z_spec', # Metallicity from spectra; pPXF
]
valid_derived_nebular = [
'AV_nebular', # AV from nebular line analysis (e.g. Ha/Hb)
'SFR_nebular', # SFR in Msun/yr from nebular emission (e.g. Halpha); pPXF+
]
valid_derived = valid_derived_photom + valid_derived_nebular
valid_derived_ref = [entry+'_ref' for entry in valid_derived]
|
FRBs/FRB
|
frb/galaxies/defs.py
|
Python
|
bsd-3-clause
| 6,396
|
[
"Galaxy"
] |
dd9024d48cd57cd8477cd2f38a7a06ac8cac2da98b3e017ed81b852868f5166f
|
# Copyright (C) 2012 Olaf Lenz
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This script generates the file doxyconfigure.h (used by doxygen)
#
import sys, featuredefs, time
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s DEFFILE HFILE" % sys.argv[0]
exit(2)
deffilename, hfilename = sys.argv[1:3]
print "Reading definitions from " + deffilename + "..."
defs = featuredefs.defs(deffilename)
print "Done."
print "Writing " + hfilename + "..."
hfile = file(hfilename, 'w');
hfile.write("""/*
WARNING: This file was autogenerated by
%s on %s
Do not modify it or your changes will be overwritten!
Modify features.def instead.
This file is needed so that doxygen will generate documentation for
all functions of all features.
*/
#ifndef _DOXYCONFIG_H
#define _DOXYCONFIG_H
""" % (sys.argv[0], time.asctime()))
for feature in defs.features:
hfile.write('#define ' + feature + '\n')
hfile.write("""
#endif /* of _DOXYCONFIG_H */""")
hfile.close()
print "Done."
|
icimrak/espresso
|
config/gen_doxyconfig.py
|
Python
|
gpl-3.0
| 1,620
|
[
"ESPResSo"
] |
b3ec8725192c422e5953ba075c11ac10e5638ae3ecf06dac532cd0ea9e3aa186
|
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import os
import platform
import re
import ast
from setuptools import find_packages, setup
from setuptools.extension import Extension
from setuptools.command.build_ext import build_ext as _build_ext
# Bootstrap setup.py with numpy
# Huge thanks to coldfix's solution
# http://stackoverflow.com/a/21621689/579416
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
# version parsing from __init__ pulled from Flask's setup.py
# https://github.com/mitsuhiko/flask/blob/master/setup.py
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('skbio/__init__.py', 'rb') as f:
hit = _version_re.search(f.read().decode('utf-8')).group(1)
version = str(ast.literal_eval(hit))
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Software Development :: Libraries
Topic :: Scientific/Engineering
Topic :: Scientific/Engineering :: Bio-Informatics
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Operating System :: Unix
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
description = ('Data structures, algorithms and educational '
'resources for bioinformatics.')
with open('README.rst') as f:
long_description = f.read()
# Dealing with Cython
USE_CYTHON = os.environ.get('USE_CYTHON', False)
ext = '.pyx' if USE_CYTHON else '.c'
# There's a bug in some versions of Python 3.4 that propagates
# -Werror=declaration-after-statement to extensions, instead of just affecting
# the compilation of the interpreter. See http://bugs.python.org/issue21121 for
# details. This acts as a workaround until the next Python 3 release -- thanks
# Wolfgang Maier (wolma) for the workaround!
ssw_extra_compile_args = ['-Wno-error=declaration-after-statement']
# Users with i686 architectures have reported that adding this flag allows
# SSW to be compiled. See https://github.com/biocore/scikit-bio/issues/409 and
# http://stackoverflow.com/q/26211814/3776794 for details.
if platform.machine() == 'i686':
ssw_extra_compile_args.append('-msse2')
extensions = [
Extension("skbio.stats.__subsample",
["skbio/stats/__subsample" + ext]),
Extension("skbio.alignment._ssw_wrapper",
["skbio/alignment/_ssw_wrapper" + ext,
"skbio/alignment/_lib/ssw.c"],
extra_compile_args=ssw_extra_compile_args)
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='scikit-bio',
version=version,
license='BSD',
description=description,
long_description=long_description,
author="scikit-bio development team",
author_email="gregcaporaso@gmail.com",
maintainer="scikit-bio development team",
maintainer_email="gregcaporaso@gmail.com",
url='http://scikit-bio.org',
test_suite='nose.collector',
packages=find_packages(),
ext_modules=extensions,
cmdclass={'build_ext': build_ext},
setup_requires=['numpy >= 1.9.2'],
install_requires=[
'bz2file >= 0.98',
'CacheControl[FileCache] >= 0.11.5',
'contextlib2 >= 0.4.0',
'decorator >= 3.4.2',
'future >= 0.14.3',
'IPython >= 3.2.0',
'matplotlib >= 1.4.3',
'natsort >= 4.0.3',
'numpy >= 1.9.2',
'pandas >= 0.16.2',
'scipy >= 0.15.1',
'six >= 1.9.0'
],
extras_require={'test': ["HTTPretty", "nose", "pep8", "flake8",
"python-dateutil"],
'doc': ["Sphinx == 1.2.2", "sphinx-bootstrap-theme"]},
classifiers=classifiers,
package_data={
'skbio.io.tests': ['data/*'],
'skbio.io.format.tests': ['data/*'],
'skbio.stats.tests': ['data/*'],
'skbio.stats.distance.tests': ['data/*'],
'skbio.stats.ordination.tests': ['data/*']
}
)
|
demis001/scikit-bio
|
setup.py
|
Python
|
bsd-3-clause
| 4,795
|
[
"scikit-bio"
] |
f6d1aee4fc025ae45d2251747eeaf4ed47ed4643720d81865813de7a7d18c87f
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import py2to3, setter
from .tags import TagPatterns
from .namepatterns import SuiteNamePatterns, TestNamePatterns
from .visitor import SuiteVisitor
class EmptySuiteRemover(SuiteVisitor):
def end_suite(self, suite):
suite.suites = [s for s in suite.suites if s.test_count]
def visit_test(self, test):
pass
def visit_keyword(self, kw):
pass
@py2to3
class Filter(EmptySuiteRemover):
def __init__(self, include_suites=None, include_tests=None,
include_tags=None, exclude_tags=None):
self.include_suites = include_suites
self.include_tests = include_tests
self.include_tags = include_tags
self.exclude_tags = exclude_tags
@setter
def include_suites(self, suites):
return SuiteNamePatterns(suites) \
if not isinstance(suites, SuiteNamePatterns) else suites
@setter
def include_tests(self, tests):
return TestNamePatterns(tests) \
if not isinstance(tests, TestNamePatterns) else tests
@setter
def include_tags(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
@setter
def exclude_tags(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
def start_suite(self, suite):
if not self:
return False
if hasattr(suite, 'starttime'):
suite.starttime = suite.endtime = None
if self.include_suites:
return self._filter_by_suite_name(suite)
if self.include_tests:
suite.tests = self._filter(suite, self._included_by_test_name)
if self.include_tags:
suite.tests = self._filter(suite, self._included_by_tags)
if self.exclude_tags:
suite.tests = self._filter(suite, self._not_excluded_by_tags)
return bool(suite.suites)
def _filter_by_suite_name(self, suite):
if self.include_suites.match(suite.name, suite.longname):
suite.visit(Filter(include_suites=[],
include_tests=self.include_tests,
include_tags=self.include_tags,
exclude_tags=self.exclude_tags))
return False
suite.tests = []
return True
def _filter(self, suite, filter):
return [t for t in suite.tests if filter(t)]
def _included_by_test_name(self, test):
return self.include_tests.match(test.name, test.longname)
def _included_by_tags(self, test):
return self.include_tags.match(test.tags)
def _not_excluded_by_tags(self, test):
return not self.exclude_tags.match(test.tags)
def __nonzero__(self):
return bool(self.include_suites or self.include_tests or
self.include_tags or self.exclude_tags)
|
moto-timo/robotframework
|
src/robot/model/filter.py
|
Python
|
apache-2.0
| 3,481
|
[
"VisIt"
] |
32815fc7c038f8df832735443c4ac61917892c6b865fab76d0dec096553a7184
|
#!/usr/bin/env python
"""packetradio, module for use with the RFM69HCW packet radio
created Dec 19, 2016 OM
work in progress - Mar 21, 2018
work in progress - Jan 18, 2020"""
"""
Copyright 2017, 2018, 2019, 2020 Owain Martin
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import time, spidev, sys, threading
import RPi.GPIO as IO
class Radio:
def __init__(self, spi_port, spi_cs):
"""__init__, initialize radio object, setting GPIO pins"""
# spi setup
self.spi=spidev.SpiDev()
self.spi.open(spi_port,spi_cs)
self.spi.max_speed_hz = 4000000
# set register list dictionary
self.registerList=[]
# need to add in any registers I didn't deal with.....
self.registerList.append({'name' : 'RegOpMode', 'address' : 0x01, 'value' : 0x04})
self.registerList.append({'name' : 'RegDataModul', 'address' : 0x02, 'value' : 0x00})
self.registerList.append({'name' : 'RegBitRateMsb', 'address' : 0x03, 'value' : 0x1A})
self.registerList.append({'name' : 'RegBitRateLsb', 'address' : 0x04, 'value' : 0x0B})
self.registerList.append({'name' : 'RegFdevMsb', 'address' : 0x05, 'value' : 0x00})
self.registerList.append({'name' : 'RegFdevLsb', 'address' : 0x06, 'value' : 0x62})
self.registerList.append({'name' : 'RegFrfMsb', 'address' : 0x07, 'value' : 0x6E})
self.registerList.append({'name' : 'RegFrfMid', 'address' : 0x08, 'value' : 0x10})
self.registerList.append({'name' : 'RegFrfLsb', 'address' : 0x09, 'value' : 0x00})
self.registerList.append({'name' : 'RegAfcCtrl', 'address' : 0x0B, 'value' : 0x00})
self.registerList.append({'name' : 'RegPaLevel', 'address' : 0x11, 'value' : 0x52})
self.registerList.append({'name' : 'RegPaRamp', 'address' : 0x12, 'value' : 0x09})
self.registerList.append({'name' : 'RegOcp', 'address' : 0x13, 'value' : 0x1A})
self.registerList.append({'name' : 'RegLna', 'address' : 0x18, 'value' : 0x08})
self.registerList.append({'name' : 'RegRxBw', 'address' : 0x19, 'value' : 0x55})
self.registerList.append({'name' : 'RegAfcBw', 'address' : 0x1A, 'value' : 0x55})
self.registerList.append({'name' : 'RegAfcFei', 'address' : 0x1E, 'value' : 0x14})
self.registerList.append({'name' : 'RegDioMapping1', 'address' : 0x25, 'value' : 0x40})
self.registerList.append({'name' : 'RegDioMapping2', 'address' : 0x26, 'value' : 0x07})
self.registerList.append({'name' : 'RegRssiThresh', 'address' : 0x29, 'value' : 0x8C})
self.registerList.append({'name' : 'RegRxTimeout1', 'address' : 0x2A, 'value' : 0x00})
self.registerList.append({'name' : 'RegRxTimeout2', 'address' : 0x2B, 'value' : 0x00})
self.registerList.append({'name' : 'RegPreambleMsb', 'address' : 0x2C, 'value' : 0x00})
self.registerList.append({'name' : 'RegPreambleLsb', 'address' : 0x2D, 'value' : 0x02})
self.registerList.append({'name' : 'RegSyncConfig', 'address' : 0x2E, 'value' : 0x92})
self.registerList.append({'name' : 'RegSyncValue1', 'address' : 0x2F, 'value' : 0xE4})
self.registerList.append({'name' : 'RegSyncValue2', 'address' : 0x30, 'value' : 0x7C})
self.registerList.append({'name' : 'RegSyncValue3', 'address' : 0x31, 'value' : 0xB2})
self.registerList.append({'name' : 'RegSyncValue4', 'address' : 0x32, 'value' : 0x00})
self.registerList.append({'name' : 'RegSyncValue5', 'address' : 0x33, 'value' : 0x00})
self.registerList.append({'name' : 'RegSyncValue6', 'address' : 0x34, 'value' : 0x00})
self.registerList.append({'name' : 'RegSyncValue7', 'address' : 0x35, 'value' : 0x00})
self.registerList.append({'name' : 'RegSyncValue8', 'address' : 0x36, 'value' : 0x00})
self.registerList.append({'name' : 'RegPacketConfig1', 'address' : 0x37, 'value' : 0x14})
self.registerList.append({'name' : 'RegPayloadLength', 'address' : 0x38, 'value' : 0x40})
self.registerList.append({'name' : 'RegFifoThresh', 'address' : 0x3C, 'value' : 0x80})
self.registerList.append({'name' : 'RegPacketConfig2', 'address' : 0x3D, 'value' : 0x03})
self.registerList.append({'name' : 'RegTestPa1', 'address' : 0x5A, 'value' : 0x55})
self.registerList.append({'name' : 'RegTestPa2', 'address' : 0x5C, 'value' : 0x70})
self.registerList.append({'name' : 'RegTestDagc', 'address' : 0x6F, 'value' : 0x30})
self.registerList.append({'name' : 'RegTestAfc', 'address' : 0x71, 'value' : 0x00})
# Node and Broadcast Address registers
self.registerList.append({'name' : 'RegNodeAdrs', 'address' : 0x39, 'value' : 0x05})
self.registerList.append({'name' : 'RegBroadcastAdrs', 'address' : 0x3A, 'value' : 0x07})
# AES Encryption registers
self.registerList.append({'name' : 'RegAESKey1', 'address' : 0x3E, 'value' : 0x51})
self.registerList.append({'name' : 'RegAESKey2', 'address' : 0x3F, 'value' : 0x2C})
self.registerList.append({'name' : 'RegAESKey3', 'address' : 0x40, 'value' : 0xA4})
self.registerList.append({'name' : 'RegAESKey4', 'address' : 0x41, 'value' : 0xA7})
self.registerList.append({'name' : 'RegAESKey5', 'address' : 0x42, 'value' : 0xB4})
self.registerList.append({'name' : 'RegAESKey6', 'address' : 0x43, 'value' : 0xD4})
self.registerList.append({'name' : 'RegAESKey7', 'address' : 0x44, 'value' : 0xA4})
self.registerList.append({'name' : 'RegAESKey8', 'address' : 0x45, 'value' : 0xAE})
self.registerList.append({'name' : 'RegAESKey9', 'address' : 0x46, 'value' : 0xA4})
self.registerList.append({'name' : 'RegAESKey10', 'address' : 0x47, 'value' : 0xF4})
self.registerList.append({'name' : 'RegAESKey11', 'address' : 0x48, 'value' : 0xA3})
self.registerList.append({'name' : 'RegAESKey12', 'address' : 0x49, 'value' : 0xD4})
self.registerList.append({'name' : 'RegAESKey13', 'address' : 0x4A, 'value' : 0xA6})
self.registerList.append({'name' : 'RegAESKey14', 'address' : 0x4B, 'value' : 0x44})
self.registerList.append({'name' : 'RegAESKey15', 'address' : 0x4C, 'value' : 0x72})
self.registerList.append({'name' : 'RegAESKey16', 'address' : 0x4D, 'value' : 0x29})
for reg in self.registerList:
self.single_access_write(reg['address'], reg['value'])
time.sleep(0.01) # time delay required or freq doesn't set correctly
# for testing print register contents
"""for reg in self.registerList:
print(hex(reg['address']),' ',hex(self.single_access_read(reg['address'])))"""
self.temperatureOffset = 0
self.receiveData=[] # list to place received data
self.receiveTimeout = 0 # timeout value for the receiver
self.intPin = 0 # interrupt pin used
self.sendAck = 0 # ack pattern for when returning an acknowledgment
self.receiveAck = 0 # ack pattern for when asking for an acknowledgment back
self.packetFormat = 'fixed' # packet format, fixed, variable or unlimited
return
#----------------- module interal use functions ---------------------
def single_access_write(self, reg=0x01, regValue=0x0):
"""single_access_write, function to write to a single data register
of the RFM69HCW
Default register to write to is RegOpMode"""
dataTransfer=self.spi.xfer2([(1<<7)+reg,regValue])
return
def single_access_read(self, reg=0x00):
"""single_access_read, function to read a single data register
of the RFM69HCW
Default register to read is fifo"""
dataTransfer=self.spi.xfer2([(0<<7)+reg,0])
return dataTransfer[1]
def read_all_registers(self):
"""read_all_registers, function to read all the registers"""
for register in range(0,80):
print(hex(register),hex(self.single_access_read(register)))
for reg in self.registerList:
print(reg['name'],hex(reg['value']))
return
def variable_length_write(self, reg=0x0, length=1, regValues=[]):
"""variable_length_write, function to write multiple consecutive
bytes of data to the radio on a single spi transaction"""
# do I need the length variable for anything!!!???
writeData=[(1<<7)+reg]
writeData.extend(regValues)
dataTransfer=self.spi.xfer2(writeData)
return
def fifo_write(self, fifoData, addressOn=False, address=0, fromAddress=0, ack=False, ackPattern=0, packetLength=64):
"""fifo_write, function to write data to the radio fifo register,
this will be the data that gets transmitted by the radio
fifoData contains a string that will be converted to a bytearray for transmitting
packetLength is the packet length to be transmitted"""
if type(fifoData) == type('a string'):
fifoData=bytearray(fifoData, encoding = "utf-8")
if ack == True:
try:
ackPattern = int(ackPattern)
fifoData.insert(0,ackPattern) # acknowledge Pattern
except:
for i in range(len(ackPattern)-1,-1,-1):
#fifoData.insert(0,ackPattern[i]) # does not work with Python3 - acknowledge Pattern
fifoData = ackPattern.encode('UTF-8') + fifoData # acknowledge Pattern
if fromAddress != 0:
fifoData.insert(0,fromAddress) # from address byte
if addressOn==True:
fifoData.insert(0,address) # address byte
if self.packetFormat == 'variable':
lengthByte = len(fifoData)
fifoData.insert(0,lengthByte) # length byte
# check fifoData length, if less than packetLength, add spaces for padding
# until fifoData length is equal to packetLength
if self.packetFormat == 'fixed':
if len(fifoData)<packetLength:
for i in range(len(fifoData),packetLength):
#fifoData.append(' ') # does not work with Python3
fifoData = fifoData + ' '.encode('UTF-8')
# check fifoData length, if greater than packetLength, remove excess data
# until fifoData length is equal to packetLength
if len(fifoData)>packetLength:
fifoData=fifoData[0:packetLength]
# write data to fifo register
self.variable_length_write(0x0,len(fifoData),fifoData) #(0x0,packetLength,fifoData)
return
def fifo_read(self):
"""fifo_read, function to read data from the radio fifo register,
this will be the data that was received by the radio"""
fifoData=bytearray()
# check FifoNotEmpty flag in RegIrqFlags2 0x28
# read data until fifo is empty
while (self.single_access_read(0x28) & 0b01000000)==64:
fifoData.append(self.single_access_read())
return fifoData
def transmit_packet(self):
"""transmit_packet, function to transmit a single packet of data that
has been preloaded into fifo and put the radio into standby mode upon
completion"""
# put radio into transmit mode
self.set_operating_mode('transmit')
#self.single_access_write(0x01,0x0C) # 0b00001100 - transmit mode
# check that the packet has been sent
count=0
while (self.single_access_read(0x28) & 0b00001000)!=8:
count+=1
if count == 100:
break
time.sleep(0.01) # was 0.025
# put radio back into standby mode
self.set_operating_mode('standby')
#self.single_access_write(0x01,0x04) # 0b0000100 - standby mode
return
def send_ack(self,addr):
"""send_ack, function to send back an acknowledgement"""
# put radio into standby mode
self.set_operating_mode('standby')
# get fromAddress (i.e. radio nodeAddress) from register list
for reg in self.registerList:
if reg['name'] == 'RegNodeAdrs':
fromAddr =reg['value']
break
# transmit ACK
self.transmit('ACK ACK ACK ACK ACK',addressOn=True, toAddress=addr,fromAddress=fromAddr,
ack=True, ackPattern=self.sendAck, packetLength=64)
# put radio back into receive mode
self.set_operating_mode('receive')
return
def receive_ack(self,toAddress,intType = 'hw'):
"""receive_ack, function to check for an acknowledgement
back, requires hw interrupt"""
success = False
intPin = self.intPin
def check_ack(data):
"""check_address, function to check the ack is from the correct
address and check the ack is the correct value"""
if self.packetFormat == 'fixed':
if len(data)<3:
return False
sendAckByte = data[2]
toAddressByte = data[1]
else: # packetFormat = 'variable'
if len(data)<4:
return False
sendAckByte = data[3]
toAddressByte = data[2]
if sendAckByte == self.sendAck and toAddressByte == toAddress:
return True
else:
return False
# put radio into receive mode
self.set_operating_mode('receive')
if self.packetFormat == 'fixed':
hwTimeout = 500
swTimeout = 0.5
else:
hwTimeout = 2000
swTimeout = 2
if intType == 'hw': # hardware interrupt section
channel = IO.wait_for_edge(intPin,IO.RISING, timeout=hwTimeout) # have got 200 working as well
if channel is not None:
ack = self.fifo_read()
if check_ack(ack):
success = True
else: # software interrupt section
timerStart = time.time()
while time.time()<=(timerStart+swTimeout) and success == False:
# check for received data via payload ready flag
while (self.single_access_read(0x28) & 0b00000100)==4:
ack = self.fifo_read()
if check_ack(ack):
success = True
break
time.sleep(0.01)
# put radio back into standby mode
self.set_operating_mode('standby')
return success
#----------- user transmit/ receive and auxillary functions ----------------
def transmit_with_ack(self, txData,toAddress=0, intType = 'hw',retry=0, packetLength=64):
"""transmit_with_ack, function to transmit data and retry if no acknowledgement
is receive in return for each packet sent"""
success = 0 # variable to keep track of the number of packets successfully sent
failed = 0 # variable to keep track of the number of packets that failed to send
addressOn=True
# look up fromAddress
for reg in self.registerList:
if reg['name'] == 'RegNodeAdrs':
fromAddress =reg['value']
break
# send message, wait for acknowledgement and resend 'retry'
# number of times if required
for message in txData:
trys = 0
messageLength = len(message)
while trys <=retry:
self.transmit(message,addressOn, toAddress, fromAddress,ack=True,
ackPattern=self.receiveAck, packetLength=packetLength)
ackSuccess = self.receive_ack(toAddress, intType)
if ackSuccess == True:
success+=1
break
else:
trys+=1
if len(message) > messageLength:
message = message[(len(message)-messageLength):] # not sure why message changes but it comes back with from/to addresses added
else:
failed+=1
return success, failed
def transmit(self, txData1, addressOn=False, toAddress=0,fromAddress=None, ack=False, ackPattern=0, packetLength=64):
"""transmit, user function to transmit data """
if fromAddress == None:
# look up fromAddress
for reg in self.registerList:
if reg['name'] == 'RegNodeAdrs':
fromAddress =reg['value']
break
self.fifo_write(txData1, addressOn, toAddress, fromAddress, ack, ackPattern, packetLength)
self.transmit_packet()
return
def receive(self,timeout=999, background=False):
"""receive, user function to put the radio into receive
mode"""
# set timeout value, a value of -1 will let the receiver run
# indefinately
self.receiveTimeout=timeout
def receive_thread():
"""receive_thread, internal function for putting the radio into receive mode, this
can be threaded to move the receive function into the background"""
# put radio into receive mode
self.set_operating_mode('receive')
#self.single_access_write(0x01,0x10) # 0b00010000 - receive mode
timerStart=time.time()
while time.time()<=(timerStart+self.receiveTimeout) or self.receiveTimeout == -1:
# check for received data via payload ready flag
while (self.single_access_read(0x28) & 0b00000100)==4:
self.receiveData.append(self.fifo_read())
time.sleep(0.1)
# put radio back into standby mode
self.set_operating_mode('standby')
#self.single_access_write(0x01,0x04) # 0b0000100 - standby mode
return self.receiveData
# check whether or not the receive function is to be put into the background
# (threaded) or stay in the foreground (inline)
if background == True:
receiveLoop=threading.Thread(name='receive_thread',target=receive_thread)
receiveLoop.start()
return
else:
receive_thread()
return self.receiveData
def receive_hw_int(self):
"""receive_hw_int, function to put the radio into receive mode,
waiting for a hardware interrupt to proceed. Function will exit on
timer expiring - self.receiveTimeout sets timer"""
timeStart=time.time()
# put radio into receive mode
self.set_operating_mode('receive')
# add interrupt event detect and check for event
# should interrupt on payload ready
intPin = self.intPin
IO.add_event_detect(intPin,IO.RISING)
while (timeStart+self.receiveTimeout)>time.time() or self.receiveTimeout == -1:
if IO.event_detected(intPin):
IO.remove_event_detect(intPin)
data = self.fifo_read()
if len(data)>3: # check to ensure data is at least 3 bytes long
self.receiveData.append(data)
# insert call back function here when I get to it
if self.packetFormat == 'fixed':
if int(data[2]) == self.receiveAck: # send ACK back if ACK request received
self.send_ack(data[1])
else: # packet format = 'variable'
if int(data[3]) == self.receiveAck: # send ACK back if ACK request received
self.send_ack(data[2])
IO.add_event_detect(intPin,IO.RISING)
else:
#time.sleep(0.001)
time.sleep(0.01)
# put radio back into standby mode and remove interrupt event detect
self.set_operating_mode('standby')
IO.remove_event_detect(intPin)
return
def receive_sw_int(self):
"""receive_sw_int, function to put the radio into receive mode,
waiting for a softwaree interrupt to proceed. Function will exit on
timer expiring - self.receiveTimeout sets timer"""
timeStart=time.time()
# put radio into receive mode
self.set_operating_mode('receive')
# check for software interrupt event
# should interrupt on payload ready
intPin = self.intPin
while (timeStart+self.receiveTimeout)>time.time() or self.receiveTimeout == -1:
# check for received data via payload ready flag
while (self.single_access_read(0x28) & 0b00000100)==4:
data = self.fifo_read()
if len(data)>3: # check to ensure data is at least 3 bytes long
self.receiveData.append(data)
# insert call back function here when I get to it
if self.packetFormat == 'fixed':
if int(data[2]) == self.receiveAck: # send ACK back if ACK request received
self.send_ack(data[1])
else: # packet format = 'variable'
if int(data[3]) == self.receiveAck: # send ACK back if ACK request received
self.send_ack(data[2])
#time.sleep(0.001)
time.sleep(0.01)
self.set_operating_mode('standby')
return
def receive_timeout(self, time=0):
"""receive_timeout, function to set the radio objects receiveTimeout variable"""
self.receiveTimeout = int(time)
return
def last_rssi(self):
"""last_rssi, user function to get the last rssi value
mesaured by the radio receiver"""
lastRssi=-self.single_access_read(0x24)/2
return lastRssi
def temperature(self):
"""temperature, user function to get the temperature reading from the
radio's temperature sensor
registers 0x4E and 0x4F relate to the temperature sensor"""
# radio has to be in either Stand by or freq synth mode to take temperature
writeData=0b1001 # bit sequence required to start taking temp reading
self.single_access_write(reg=0x4E, regValue=writeData)
while self.single_access_read(reg=0x4E)==5: # 5 = 0b101
pass
tempSensorValue=self.single_access_read(reg=0x4F)
# using tempSensorValue of 150 equaling 20 degrees
# and tempSensor range from -40 to 85 degrees
temperature=-40+((210+self.temperatureOffset)-tempSensorValue)
return temperature
#------------ user functions to set the radios parameters and registers --------------
def set_acks(self, receiveAck=0, sendAck=0):
"""set_acks, function to set the radio objects sendAck and receive ack variables"""
self.sendAck = sendAck # ack pattern for when returning an acknowledgment
self.receiveAck = receiveAck # ack pattern for when asking for an acknowledgment back
return
def set_address_filtering(self, mode='none'):
"""set_address_filtering, user function to set the addressing
filtering mode; none (none), node address match (node), either node or broadcast
address match (both)"""
if mode == 'node':
addressBits = 0b01
elif mode == 'both':
addressBits = 0b10
else:
addressBits = 0b00
regPacketConfig = self.single_access_read(0x37)
regPacketConfig = regPacketConfig & 0b11111001
regPacketConfig = regPacketConfig | (addressBits<<1)
self.set_register_by_name('RegPacketConfig1', regPacketConfig)
return
def set_afc(self, afcLowBetaOn='Standard', afcOffset=0, dccFreq=4, rxBW=10.4, autoclearOn='off', autoOn='on'):
"""set_afc, user function to set the parameters for automatic
frequency correction, this will set 4 different registers, RegAfcCtrl,
RegAfcBw and RegAfcFei, RegTestAfc"""
#------ RegAfcCtrl and RegTestAfc Section ----------
if afcLowBetaOn == 'Improved':
self.set_register_by_name('RegAfcCtrl', 0x20)
afcOffsetReg = afcOffset/488
self.set_register_by_name('RegTestAfc', afcOffsetReg)
else: # afcLowBetaOn = Standard
self.set_register_by_name('RegAfcCtrl', 0x00)
#-------- RegAfcBw Section ----------
dccFreqOptions = [(16,0b000),(8,0b001),(4,0b010),(2,0b011),
(1,0b100),(0.5,0b101),(0.25,0b110),(0.125,0b111)]
dccBits = 0b010 # default value if a correct match is not found
for dcc in dccFreqOptions:
if dcc[0] == dccFreq:
dccBits = dcc[1]
rxBwOptions = [(2.6,0b10111),(3.1,0b01111),(3.9,0b00111),(5.2,0b10110),
(6.3,0b01110),(7.8,0b00110),(10.4,0b10101),(12.5,0b01101),
(15.6,0b00101),(20.8,0b10100),(25,0b01100),(31.3,0b00100),
(41.7,0b10011),(50.0,0b01011),(62.5,0b00011),(83.3,0b10010),
(100.0,0b01010),(125.0,0b00010),(166.7,0b10001),(200.0,0b01001),
(250.0,0b00001),(333.3,0b10000),(400.0,0b01000),(500.0,0b00000)]
bwBits = 0b10101 # default value if a correct match is not found
for bw in rxBwOptions:
if bw[0] == rxBW:
bwBits = bw[1]
afcBwReg = (dccBits<<5)+bwBits
self.set_register_by_name('RegAfcBw', afcBwReg)
#--------- RegAfcFei Section ------------
if autoclearOn == 'off':
autoclearBits = 0b0
else:
autoclearBits = 0b1
if autoOn == 'on':
autoOnBits = 0b1
else:
autoOnBits = 0b0
afcFeiReg=(autoclearBits<<3) + (autoOnBits<<2)
self.set_register_by_name('RegAfcFei', afcFeiReg)
return
def set_auto_rx_restart(self, autoRestartOn='on', interPacketDelay=0x0):
"""set_auto_rx_restart, user function to enable/disable
the automatic Rx restart (RSSI phase) feature of the radio
and set the InterPacketRxDelay"""
if autoRestartOn == 'off':
autoRestartOnBit = 0b0
else: # autoRestartOn = on
autoRestartOnBit = 0b1
if interPacketDelay < 0 or interPacketDelay > 15:
interPacketDelay = 0
regPacketConfig = self.single_access_read(0x3D)
regPacketConfig = regPacketConfig & 0b00001101
regPacketConfig = regPacketConfig | (interPacketDelay<<4) + (autoRestartOnBit<<1)
self.set_register_by_name('RegPacketConfig2', regPacketConfig)
return
def set_bitrate(self,bitrate):
"""set_bitrate, user function to change the radio
transmission bitrate"""
bitrate=int(32000000/bitrate)
msb = (bitrate & 0xFF00)>>8
lsb = bitrate & 0xFF
self.variable_length_write(0x03, 2,[msb,lsb])
for reg in self.registerList:
if reg['name'] == 'RegBitRateMsb':
reg['value']= msb
elif reg['name'] == 'RegBitRateLsb':
reg['value']= lsb
else:
pass
return
def set_broadcast_address(self, address=0x07):
"""set_broadcast_address, user function to set the broadcast
address of the current radio"""
address = address & 0xFF # check to ensure address is only 8 bits long
self.set_register_by_name('RegBroadcastAdrs', address)
return
def set_checksum(self, crcOn='on', autoClearFifo='on'):
"""set_checksum, user function to enable a checksum
calculation function."""
if crcOn == 'off':
crcOnBit = 0b0
else:
crcOnBit = 0b1
if autoClearFifo == 'off':
autoClearBit = 0b1
else:
autoClearBit = 0b0
regPacketConfig = self.single_access_read(0x37)
regPacketConfig = regPacketConfig & 0b11100111
regPacketConfig = regPacketConfig | ((crcOnBit<<4)+(autoClearBit<<3))
self.set_register_by_name('RegPacketConfig1', regPacketConfig)
return
def set_data_mode(self, dataMode='packet', modulationType='FSK', modShaping='none', bitSynchOn='on'):
"""set_data_mode, function to set the data mode parameters"""
if dataMode == 'continuous':
if bitSynchOn == 'off':
dataModeBits = 0b11
else: # bitSynchOn = on
dataModeBits = 0b10
else: # dataMode = packet
dataModeBits = 0b00
if modulationType == 'OOK':
modTypeBits = 0b01
if modShaping == 1:
modShapingBits = 0b01 # filtering with fcutoff = BR
elif modShaping == 2:
modShapingBits = 0b10 # filtering with fcutoff = 2*BR
else: # modShaping = none
modShapingBits = 0b00
else: # modulationType = FSK
modTypeBits = 0b00
if modShaping == 1:
modShapingBits = 0b01 # Guassian filter, BT = 1
elif modShaping == 0.5:
modShapingBits = 0b10 # Gaussian filter, BT = 0.5
elif modShaping == 0.3:
modShapingBits = 0b11 # Gaussian filter, BT = 0.3
else: # modShaping = none
modShapingBits = 0b00
regDataMode = (dataModeBits<<5) + (modTypeBits<<3) + modShapingBits
self.set_register_by_name('RegDataModul', regDataMode)
return
def set_dc_free_encoding(self, encodingType='none'):
"""set_dc_free_encoding, user function to enable/disable
DC free encoding, 3 choices - none, manchester or whitening"""
if encodingType == 'manchester':
encodingBits = 0b01
elif encodingType == 'whitening':
encodingBits = 0b10
else : # encodingType = none
encodingBits = 0b00
regPacketConfig = self.single_access_read(0x37)
regPacketConfig = regPacketConfig & 0b10011111
regPacketConfig = regPacketConfig | (encodingBits<<5)
self.set_register_by_name('RegPacketConfig1', regPacketConfig)
return
def set_dio(self, dio0=0, dio1=0, dio2=0, dio3=0, dio4=0, dio5=0, clkOut='off'):
"""set_dio, user function to map the DIO (digital I/O) pins of
the radio as well as set the clock output frequency if used"""
dioList = [dio0, dio1, dio2, dio3, dio4, dio5]
# check to ensure dio entered is a valid dio mapping value
# i.e. within 0 (0b00) to 3 (0b11)
for i in range(0,6):
if dioList[i] < 0 or dioList[i] > 3:
dioList[i] = 0
# set clock output bits
if clkOut == 1:
clkOutBits = 0b000 # FXOSC
elif clkOut == 2:
clkOutBits = 0b001 # FXOSC/2
elif clkOut == 4:
clkOutBits = 0b010 # FXOSC/4
elif clkOut == 8:
clkOutBits = 0b011 # FXOSC/8
elif clkOut == 16:
clkOutBits = 0b100 # FXOSC/16
elif clkOut == 32:
clkOutBits = 0b101 # FXOSC/32
elif clkOut == 'auto':
clkOutBits = 0b110 # RC(autmatically enabled)
else: # clkOut = off
clkOutBits = 0b111 # Off
regDio1 = (dioList[0]<<6) + (dioList[1]<<4) + (dioList[2]<<2) + dioList[3]
regDio2 = (dioList[4]<<6) + (dioList[5]<<4) + clkOutBits
self.set_register_by_name('RegDioMapping1', regDio1)
self.set_register_by_name('RegDioMapping2', regDio2)
return
def set_encryption(self, aesOn='on', aesKeyList=[]):
"""set_encryption, user function to enable/disable AES encryption
and set the 16 AES encyption key registers. If aesKeyList is left empty
the AES key registers will be left as is"""
#---- RegPacketConfig2 section (AES enable/disable -------
if aesOn == 'off':
aesOnBit = 0b0
else: # aesOn = on
aesOnBit = 0b1
regPacketConfig = self.single_access_read(0x3D)
regPacketConfig = regPacketConfig & 0b11111110
regPacketConfig = regPacketConfig | aesOnBit
self.set_register_by_name('RegPacketConfig2', regPacketConfig)
#-----RegAesKey1 through 16 section -------------
if len(aesKeyList)>0: # if aesKeyList is empty, leave values as is
# Check length of aesKeyList and add or delete items as
# required to make the length 16
while len(aesKeyList) !=16:
if len(aesKeyList) < 16:
aesKeyList.append(0xB3)
else:
aesKeyList.pop()
self.variable_length_write(0x3E, 13, aesKeyList)
regAddrs=0x3E
for i in range(0,16):
for reg in self.registerList:
if reg['address'] == regAddrs:
reg['value']= aesKeyList[i]
regAddrs+=1
return
def set_fifo_threshold(self, txStartCond='FifoLevel', threshold=20):
"""set_fifo_threshold, user function to define the condition
to start packet transmission and set the Fifo threshold used to
trigger the FifoLevel interrupt"""
if txStartCond == 'FifoLevel':
txStartBit = 0b0
else: #TxStartCond = FifoNotEmpty
txStartBit = 0b1
if abs(threshold) > 66: # ensure threshold isn't bigger than the Fifo (66 bytes)
threshold = 66
regFifoThresh = (txStartBit<<7)+threshold
self.set_register_by_name('RegFifoThresh', regFifoThresh)
return
def set_frequency(self,frequency):
"""set_frequency, user function to change the radio
frequency"""
frequency=int(frequency/61.03515625)
msb = (frequency & 0xFF0000)>>16
mid = (frequency & 0xFF00)>>8
lsb = frequency & 0xFF
self.variable_length_write(0x07, 3,[msb,mid,lsb])
for reg in self.registerList:
if reg['name'] == 'RegFrfMsb':
reg['value']= msb
elif reg['name'] == 'RegFrfMid':
reg['value']= mid
elif reg['name'] == 'RegFrfLsb':
reg['value']= lsb
else:
pass
return
def set_frequency_deviation(self,fDev):
"""set_frequency_deviation, user function to change the radio
transmission frequency deviation"""
fDev=int(fDev/61.03515625)
msb = (fDev & 0xFF00)>>8
lsb = fDev & 0xFF
self.variable_length_write(0x05, 2,[msb,lsb])
for reg in self.registerList:
if reg['name'] == 'RegFdevMsb':
reg['value']= msb
elif reg['name'] == 'RegFdevLsb':
reg['value']= lsb
else:
pass
return
def set_interrupt_pin(self, intPin=0):
"""set_interrupt_pin, function to set the radio objects intPin variable"""
self.intPin = intPin # interrupt pin used
return
def set_lna(self, autoOn='on', gainSelect='G1', inputZ=50):
"""set_lna, user function to set the parameters for the
receiver's LNA (low noise amplifier)"""
if inputZ == 50:
zBit = 0b0
else: # inputZ = 200
zBit = 0b1
gainOptions =[('G1',0b001),('G2',0b010),('G3',0b011),('G4',0b100),
('G5',0b101),('G6',0b110)]
gainBits = 0b001 # default value if a correct match is not found
if autoOn == 'off':
for gain in gainOptions:
if gain[0] == gainSelect:
gainBits = gain[1]
lnaReg = (zBit<<7)+ gainBits
else:
lnaReg = zBit<<7
self.set_register_by_name('RegLna', lnaReg)
return
def set_mode_sequencer(self, sequencer='on'):
"""set_mode_sequencer, user function to enable/disable the
radio's automatic sequencer"""
if sequencer == 'off':
sequencerBit= 0b1
else: # sequencer = 'on'
sequencerBit = 0b0
regOpMode = self.single_access_read(0x01)
regOpMode = regOpMode & 0b01111111
regOpMode = regOpMode | (sequencerBit<<7)
self.set_register_by_name('RegOpMode', regOpMode)
return
def set_node_address(self, address=0x05):
"""set_node_address, user function to set the node address
of the current radio"""
address = address & 0xFF # check to ensure address is only 8 bits long
self.set_register_by_name('RegNodeAdrs', address)
return
def set_OCP(self, ocpOn='on', ocpMax=95):
"""set_OCP, user function to enable/disable OCP (Over Current
Protection) and set the max current (mA)"""
if ocpOn =='off':
ocpOnBit = 0b0
else: #ocpOn = on
ocpOnBit = 0b1
if ocpMax > 120:
ocpMax = 120
elif ocpMax < 45:
ocpMax = 45
ocpTrimBits = int((ocpMax-45)/5)
regOCP = (ocpOnBit<<4) + ocpTrimBits
self.set_register_by_name('RegOcp', regOCP)
return
def set_operating_mode(self, mode='standby'):
"""set_operating_mode, user function to set the radio's current
mode; transmit, receive, sleep, standby and freqSynth."""
if mode == 'transmit':
modeBits = 0b011
elif mode == 'receive':
modeBits = 0b100
elif mode == 'sleep':
modeBits = 0b000
elif mode == 'freqSynth':
modeBits = 0b010
else: #mode = standby
modeBits = 0b001
regOpMode = self.single_access_read(0x01)
regOpMode = regOpMode & 0b11100011
regOpMode = regOpMode | (modeBits<<2)
self.set_register_by_name('RegOpMode', regOpMode)
return
def set_packet_format(self, packetFormat='fixed',payloadLength=64):
"""set_packet_format, user function to set the packet format
including fixed vs variable vs unlimited length and payload length.
This sets part of RegPacketConfig1 and all of RegPayloadLength"""
self.packetFormat = packetFormat
#----- RegPacketConfig1 Section ---------
if packetFormat == 'variable':
packetFormatBit = 0b1
else: # packetFormat = fixed or unlimited
packetFormatBit = 0b0
regPacketConfig = self.single_access_read(0x37)
regPacketConfig = regPacketConfig & 0b01111111
regPacketConfig = regPacketConfig | (packetFormatBit<<7)
self.set_register_by_name('RegPacketConfig1', regPacketConfig)
#----- RegPayloadLength Section -------
payloadLength = abs(payloadLength)
if packetFormat != 'unlimited':
if payloadLength > 255:
payloadLength = 255
else:
payloadLength = 0
self.set_register_by_name('RegPayloadLength', payloadLength)
return
def set_power(self,ampSelect, powerLevel, powerRamp=40):
"""set_power, user function to choose the power amplifier(s),
transmit power used and power ramp time, this will set the following
2 registers, RegPaLevel and RegPaRamp"""
#------ RegPaLevel Section ----------
ampSelectBits=0
powerLevelBits=0
if ampSelect == 'Pa0':
""" haven't been able to get Pa0 working"""
ampSelectBits=0b100
if powerLevel <-18 or powerLevel >13:
powerLevel = -18
powerLevelBits = 18 + int(powerLevel)
elif ampSelect == 'Pa2' or ampSelect =='Pa2H' :
ampSelectBits=0b011
if ampSelect == 'Pa2':
if powerLevel <2 or powerLevel >17:
powerLevel = 2
powerLevelBits = 14 + int(powerLevel)
if ampSelect == 'Pa2H':
if powerLevel <5 or powerLevel >20:
powerLevel = 5
powerLevelBits = 11 + int(powerLevel)
# high power output requires different handling and other registers changes
# such as TestPa1, TestPa2 & TestAfc
else:
ampSelectBits=0b010 # default to Pa1
if powerLevel <-2 or powerLevel >13:
powerLevel = -2
powerLevelBits = 18 + int(powerLevel)
powerRegister=(ampSelectBits<<5)+powerLevelBits
self.set_register_by_name('RegPaLevel',powerRegister)
#------ RegPaRamp Section -----------
paRampOptions = [(3400,0b0000),(2000,0b0001),(1000,0b0010),(500,0b0011),
(250,0b0100),(125,0b0101),(100,0b0110),(62,0b0111),
(50,0b1000),(40,0b1001),(31,0b1010),(25,0b1011),
(20,0b1100),(15,0b1101),(12,0b1110),(10,0b1111)]
rampReg = 0b1001 # default value if a correct match is not found
for ramp in paRampOptions:
if ramp[0] == powerRamp:
rampReg = ramp[1]
self.set_register_by_name('RegPaRamp',rampReg)
return
def set_preamble_length(self, length=2):
"""set_preamble_length, user function to set the length
of the preamble (bytes)"""
if length < 1:
length = 1
msb = (length & 0xFF00)>>8
lsb = length & 0xFF
self.variable_length_write(0x2C, 2,[msb,lsb])
for reg in self.registerList:
if reg['name'] == 'RegPreambleMsb':
reg['value']= msb
elif reg['name'] == 'RegPreambleLsb':
reg['value']= lsb
else:
pass
return
def set_register_by_address(self, registerAddr, value):
"""set_register_by_address, user function to set a single
register by passing the register address and the value for it to be
set to. The radio object registerList is also updated"""
for reg in self.registerList:
if reg['address'] == registerAddr:
self.single_access_write(reg['address'], value)
reg['value']=value
break
return
def set_register_by_name(self, registerName, value):
"""set_register_by_name, user function to set a single
register by passing the register name and the value for it to
be set to. The radio object registerList is also updated"""
for reg in self.registerList:
if reg['name'] == registerName:
self.single_access_write(reg['address'], value)
reg['value']=value
break
return
def set_rssi_threshold(self, rssi):
"""set_rssi_threshold, user funstion to change the receive
signal strength indicator (rssi) threshold (dBm) used by the
receiver"""
rssi = int(abs(rssi)*2)
self.set_register_by_name('RegRssiThresh',rssi)
return
def set_rxbw(self, dccFreq=4, rxBW=10.4):
"""set_rxbw, user function to set the receiver bandwidth
parameters when afc (automatic frequency correction) is not
enabled"""
dccFreqOptions = [(16,0b000),(8,0b001),(4,0b010),(2,0b011),
(1,0b100),(0.5,0b101),(0.25,0b110),(0.125,0b111)]
dccBits = 0b010 # default value if a correct match is not found
for dcc in dccFreqOptions:
if dcc[0] == dccFreq:
dccBits = dcc[1]
rxBwOptions = [(2.6,0b10111),(3.1,0b01111),(3.9,0b00111),(5.2,0b10110),
(6.3,0b01110),(7.8,0b00110),(10.4,0b10101),(12.5,0b01101),
(15.6,0b00101),(20.8,0b10100),(25,0b01100),(31.3,0b00100),
(41.7,0b10011),(50.0,0b01011),(62.5,0b00011),(83.3,0b10010),
(100.0,0b01010),(125.0,0b00010),(166.7,0b10001),(200.0,0b01001),
(250.0,0b00001),(333.3,0b10000),(400.0,0b01000),(500.0,0b00000)]
bwBits = 0b10101 # default value if a correct match is not found
for bw in rxBwOptions:
if bw[0] == rxBW:
bwBits = bw[1]
rxBwReg = (dccBits<<5)+bwBits
self.set_register_by_name('RegRxBw', rxBwReg)
return
def set_sync(self, syncOn='on', syncSize=3, syncTol=2, fifoFill=0):
"""set_sync, user function to set the sync word options.
This will set the RegSyncConfig register """
if syncOn == 'off':
syncOnBit = 0b0
else:
syncOnBit = 0b1
if abs(syncSize) > 8:
syncSize = 8
syncSizeBits = syncSize-1
if abs(syncTol) > 7:
syncTol = 7
syncConfigReg = (syncOnBit<<7) + (fifoFill<<6) + (syncSizeBits<<3) + syncTol
self.set_register_by_name('RegSyncConfig', syncConfigReg)
return
def set_sync_word(self, syncValueList=[0xE2,0x4A,0x26]):
"""set_sync_word, user function to set the sync word.
This will set the 8 RegSyncValue registers"""
# Check length of syncValue list and add or delete items as
# required to make the length 8
while len(syncValueList) !=8:
if len(syncValueList) < 8:
syncValueList.append(0x0)
else:
syncValueList.pop()
self.variable_length_write(0x2F, 8, syncValueList)
regAddrs=0x2F
for i in range(0,len(syncValueList)):
for reg in self.registerList:
if reg['address'] == regAddrs:
reg['value']= syncValueList[i]
regAddrs+=1
return
def set_temperature_offset(self, offset=0):
"""set_temperature_offset, user function to change the
temperature offset value, used to calibrate the sensor reading"""
self.temperatureOffset = offset
return
def set_timeout_rssi_threshold(self, timeoutOn='off', multiplier=0x00):
"""set_time_rssi_threshold, user function enable/disable the
TimeoutRssiThresh interrupt and to set it's level if enabled"""
if timeoutOn == 'on':
if multiplier > 0 and multiplier < 256:
timeoutBits = multiplier
else:
timeoutBits = 0b11111111
else: # timeoutOn = off
timeoutBits = 0b0
#print(bin(timeoutBits))
self.set_register_by_name('RegRxTimeout2', timeoutBits)
return
def set_timeout_rx_start(self, timeoutOn='off', multiplier=0x00):
"""set_time_rx_start, user function enable/disable the
TimeoutRxStart interrupt and to set it's level if enabled"""
if timeoutOn == 'on':
if multiplier > 0 and multiplier < 256:
timeoutBits = multiplier
else:
timeoutBits = 0b11111111
else: # timeoutOn = off
timeoutBits = 0b0
self.set_register_by_name('RegRxTimeout1', timeoutBits)
return
if __name__=='__main__':
import RPi.GPIO as IO
import time, sys
mode='Tx'
enablePin = 26
resetPin = 19
# set up GPIO settings
IO.setwarnings(False)
IO.setmode(IO.BCM)
IO.setup(enablePin, IO.OUT)
IO.output(enablePin, False)
IO.setup(resetPin, IO.OUT)
IO.output(resetPin, False)
# testing area
IO.output(enablePin, True)
radio=Radio(0,1)
#radio.set_frequency(440000000)
#radio.set_bitrate(19196)
#radio.set_frequency_deviation(7500)
radio.set_power('Pa1',-2, 40)
#radio.set_rssi_threshold(-65)
#radio.set_afc('Improved', 7320, .125, 10.4, 'on', 'on')
#radio.set_rxbw(0.5, 62.5)
#radio.set_lna('off', 'G5', 200)
#radio.set_preamble_length(300)
#radio.set_sync('on', 5, 6, 0)
#radio.set_sync_word([0xD4,0x27,0x9A])
#radio.set_checksum('on','on')
#radio.set_address_filtering('node')
#radio.set_node_address(0x09)
#radio.set_broadcast_address(0x0A)
#radio.set_fifo_threshold('FifoNotEmpty', 7)
#radio.set_encryption('off' ,[0x3F,0x72,0x48, 0xB1, 0x3F,0x72,0x48, 0xB1,0x3F,0x72,0x48, 0xB1])
#radio.set_dc_free_encoding('manchester')
#radio.set_packet_format('unlimited', 128)
#radio.set_OCP('on', 95)
#radio.set_data_mode('continuous', 'OOK',2, 'off')
#radio.set_dio(dio5=3)
#radio.set_timeout_rx_start('off', 234)
#radio.set_timeout_rssi_threshold('off', 234)
#radio.set_auto_rx_restart('on', 12)
radio.set_temperature_offset(-3)
print(radio.temperature())
def print_radio_data():
"""print_radio_data"""
while len(radio.receiveData) > 0:
data=radio.receiveData.pop(0)
print(data)
return
if mode =='Tx':
for i in range(0,1):
radio.transmit('2x Houston we have lift off, repeat we have lift off', addressOn=True, address=5,packetLength=64)
time.sleep(0.125)
radio.transmit('set_datamode mmm packet vs ook etc m not as important until I ha', addressOn=True, address=5,packetLength=64)
time.sleep(0.125)
#radio.set_register_by_name('RegNodeAdrs',0x0B)
#radio.read_all_registers()
#radio.set_register_by_address(0x39,0x05)
#radio.set_operating_mode('transmit')
#radio.set_mode_sequencer('on')
radio.read_all_registers()
else: # Rx mode test area
# example with receive function put in the background via threading
radio.receive(5, True)
for i in range(0,15):
print(i)
print_radio_data()
time.sleep(1)
# example with receive function in the foreground
print('Going to 2nd receive mode')
radio.receiveData=[]
radioData=radio.receive(10)
for line in radioData:
print(line)
# example with receive function put in the background via threading
# timeout set to -1 for never off
print('Going to 3rd receive mode')
radio.receiveData=[]
radio.receive(-1, True)
for i in range(0,25):
print(i)
print_radio_data()
time.sleep(1)
radio.receiveTimeout = 0 # stop the receiver by forcing the timer to expire
time.sleep(1) # need to add delay to allow thread to catch up
print('Going to 4th receive mode')
radio.receiveData=[]
radioData=radio.receive(10)
for line in radioData:
print(line)
print(radio.last_rssi())
IO.output(enablePin, False)
IO.cleanup()
radio.spi.close()
|
owainm713/RFM69HCW-Python-Module
|
packetradio.py
|
Python
|
gpl-3.0
| 53,281
|
[
"Gaussian"
] |
961674f20f77dce83f459adb267d7179af4f331828e5ec38d06d4ec6e94e291d
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
import errors, traceback, message
import re, datetime, time
import StringIO
import pytz
from smshandler import SmsHandler
class TextSmsHandler(SmsHandler):
SCTS_FMT = "%y/%m/%d,%H:%M:%S"
CMGL_MATCHER=re.compile(r'^\+CMGL: (\d+),"(.+?)","(.+?)",*?,"(.+?)".*?$')
CMGL_STATUS='"REC UNREAD"'
def __init__(self, modem):
SmsHandler.__init__(self, modem)
def get_mode_cmd(self):
return "AT+CMGF=1"
def send_sms(self, recipient, text, max_messages = 255):
"""Sends an SMS to _recipient_ containing _text_. Some networks
will automatically chunk long messages into multiple parts,
and reassembled them upon delivery, but some will silently
drop them. At the moment, pyGSM does nothing to avoid this,
so try to keep _text_ under 160 characters.
Currently 'max_messages' is ignored
"""
old_mode = None
try:
try:
# cast the text to a string, to check that
# it doesn't contain non-ascii characters
try:
text = str(text)
# uh-oh. unicode ahoy
except UnicodeEncodeError:
# fetch and store the current mode (so we can
# restore it later), and override it with UCS2
csmp = self.modem.query("AT+CSMP?", "+CSMP:")
if csmp is not None:
old_mode = csmp.split(",")
mode = old_mode[:]
mode[3] = "8"
# enable hex mode, and set the encoding
# to UCS2 for the full character set
self.modem.command('AT+CSCS="HEX"')
self.modem.command("AT+CSMP=%s" % ",".join(mode))
text = text.encode("utf-16").encode("hex")
# initiate the sms, and give the device a second
# to raise an error. unfortunately, we can't just
# wait for the "> " prompt, because some modems
# will echo it FOLLOWED BY a CMS error
result = self.modem.command(
'AT+CMGS=\"%s\"' % (recipient),
read_timeout=1)
# if no error is raised within the timeout period,
# and the text-mode prompt WAS received, send the
# sms text, wait until it is accepted or rejected
# (text-mode messages are terminated with ascii char 26
# "SUBSTITUTE" (ctrl+z)), and return True (message sent)
except errors.GsmReadTimeoutError, err:
if err.pending_data[0] == ">":
self.modem.command(text, write_term=chr(26))
return True
# a timeout was raised, but no prompt nor
# error was received. i have no idea what
# is going on, so allow the error to propagate
else:
raise
# for all other errors...
# (likely CMS or CME from device)
except Exception, err:
traceback.print_exc(err)
# whatever went wrong, break out of the
# message prompt. if this is missed, all
# subsequent writes will go into the message!
self.modem.break_out_of_prompt()
# rule of thumb: pyGSM is meant to be embedded,
# so DO NOT EVER allow exceptions to propagate
# (obviously, this sucks. there should be an
# option, at least, but i'm being cautious)
return None
finally:
# if the mode was overridden above, (if this
# message contained unicode), switch it back
if old_mode is not None:
self.modem.command("AT+CSMP=%s" % ",".join(old_mode))
self.modem.command('AT+CSCS="GSM"')
return True
# returns a list of messages
def parse_stored_messages(self, lines):
# loop through all the lines attempting to match CMGL lines (the header)
# and then match NOT CMGL lines (the content)
# need to seed the loop first
messages = []
if len(lines)>0:
m=self.CMGL_MATCHER.match(lines[0])
while len(lines)>0:
if m is None:
# couldn't match OR no text data following match
raise(errors.GsmReadError())
# if here, we have a match AND text
# start by popping the header (which we have stored in the 'm'
# matcher object already)
lines.pop(0)
# now put the captures into independent vars
index, status, sender, timestamp = m.groups()
# now loop through, popping content until we get
# the next CMGL or out of lines
msg_buf=StringIO.StringIO()
while len(lines)>0:
m=self.CMGL_MATCHER.match(lines[0])
if m is not None:
# got another header, get out
break
else:
msg_buf.write(lines.pop(0))
# get msg text
msg_text=msg_buf.getvalue().strip()
# now create message
messages.append(self._incoming_to_msg(timestamp,sender,msg_text))
return messages
# returns a single message
def parse_incoming_message(self, header_line, text):
# since this line IS a CMT string (an incoming
# SMS), parse it and store it to deal with later
m = re.match(r'^\+CMT: "(.+?)",.*?,"(.+?)".*?$', header_line)
sender = ""
timestamp = None
if m is not None:
# extract the meta-info from the CMT line,
# and the message from the FOLLOWING line
sender, timestamp = m.groups()
# multi-part messages begin with ASCII 130 followed
# by "@" (ASCII 64). TODO: more docs on this, i wrote
# this via reverse engineering and lost my notes
if (ord(text[0]) == 130) and (text[1] == "@"):
part_text = text[7:]
# ensure we have a place for the incoming
# message part to live as they are delivered
if sender not in self.multipart:
self.multipart[sender] = []
# append THIS PART
self.multipart[sender].append(part_text)
# abort if this is not the last part
if ord(text[5]) != 173:
return None
# last part, so switch out the received
# part with the whole message, to be processed
# below (the sender and timestamp are the same
# for all parts, so no change needed there)
text = "".join(self.multipart[sender])
del self.multipart[sender]
return self._incoming_to_msg(timestamp, sender, text)
def _incoming_to_msg(self, timestamp, sender, text):
# since neither message notifications nor messages
# fetched from storage give any indication of their
# encoding, we're going to have to guess. if the
# text has a multiple-of-four length and starts
# with a UTF-16 Byte Order Mark, try to decode it
# into a unicode string
try:
if (len(text) % 4 == 0) and (len(text) > 0):
bom = text[:4].lower()
if bom == "fffe"\
or bom == "feff":
# decode the text into a unicode string,
# so developers embedding pyGSM need never
# experience this confusion and pain
text = text.decode("hex").decode("utf-16")
# oh dear. it looked like hex-encoded utf-16,
# but wasn't. who sends a message like that?!
except:
pass
# create and store the IncomingMessage object
time_sent = None
if timestamp is not None:
time_sent = self._parse_incoming_timestamp(timestamp)
return message.IncomingMessage(self, sender, time_sent, text)
def _parse_incoming_timestamp(self, timestamp):
"""Parse a Service Center Time Stamp (SCTS) string into a Python datetime
object, or None if the timestamp couldn't be parsed. The SCTS format does
not seem to be standardized, but looks something like: YY/MM/DD,HH:MM:SS."""
# timestamps usually have trailing timezones, measured
# in 15-minute intervals (?!), which is not handled by
# python's datetime lib. if _this_ timezone does, chop
# it off, and note the actual offset in minutes
tz_pattern = r"([-+])(\d+)$"
m = re.search(tz_pattern, timestamp)
if m is not None:
timestamp = re.sub(tz_pattern, "", timestamp)
tz_offset = datetime.timedelta(minutes=int(m.group(2)) * 15)
if m.group(1)=='-':
tz_offset = -tz_offset
# we won't be modifying the output, but
# still need an empty timedelta to subtract
else:
tz_offset = datetime.timedelta()
# attempt to parse the (maybe modified) timestamp into
# a time_struct, and convert it into a datetime object
try:
time_struct = time.strptime(timestamp, self.SCTS_FMT)
dt = datetime.datetime(*time_struct[:6])
dt.replace(tzinfo=pytz.utc)
# patch the time to represent UTC, since
dt-=tz_offset
return dt
# if the timestamp couldn't be parsed, we've encountered
# a format the pyGSM doesn't support. this sucks, but isn't
# important enough to explode like RubyGSM does
except ValueError:
traceback.print_exc()
return None
|
rapidsms/pygsm
|
lib/pygsm/textsmshandler.py
|
Python
|
bsd-3-clause
| 10,014
|
[
"FEFF"
] |
d0f9c087042ad86d72e0306a439926f5a2c38ab7990b3ba64787021e206817e9
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various learning rate decay functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.keras.utils import generic_utils
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
@keras_export("keras.optimizers.schedules.LearningRateSchedule")
class LearningRateSchedule(object):
"""A serializable learning rate decay schedule.
`LearningRateSchedule`s can be passed in as the learning rate of optimizers in
`tf.keras.optimizers`. They can be serialized and deserialized using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
"""
@abc.abstractmethod
def __call__(self, step):
raise NotImplementedError("Learning rate schedule must override __call__")
@abc.abstractmethod
def get_config(self):
raise NotImplementedError("Learning rate schedule must override get_config")
@classmethod
def from_config(cls, config):
"""Instantiates a `LearningRateSchedule` from its config.
Args:
config: Output of `get_config()`.
Returns:
A `LearningRateSchedule` instance.
"""
return cls(**config)
@keras_export("keras.optimizers.schedules.ExponentialDecay")
class ExponentialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an exponential decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies an exponential decay function
to an optimizer step, given a provided initial learning rate.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate * decay_rate ^ (step / decay_steps)
```
If the argument `staircase` is `True`, then `step / decay_steps` is
an integer division and the decayed learning rate follows a
staircase function.
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: When fitting a Keras model, decay every 100000 steps with a base
of 0.96:
```python
initial_learning_rate = 0.1
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate,
decay_steps=100000,
decay_rate=0.96,
staircase=True)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_schedule),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies exponential decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
decay_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The decay rate.
staircase: Boolean. If `True` decay the learning rate at discrete
intervals
name: String. Optional name of the operation. Defaults to
'ExponentialDecay'.
"""
super(ExponentialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "ExponentialDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
return math_ops.multiply(
initial_learning_rate, math_ops.pow(decay_rate, p), name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PiecewiseConstantDecay")
class PiecewiseConstantDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a piecewise constant decay schedule.
The function returns a 1-arg callable to compute the piecewise constant
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
Example: use a learning rate that's 1.0 for the first 100001 steps, 0.5
for the next 10000 steps, and 0.1 for any additional steps.
```python
step = tf.Variable(0, trainable=False)
boundaries = [100000, 110000]
values = [1.0, 0.5, 0.1]
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries, values)
# Later, whenever we perform an optimization step, we pass in the step.
learning_rate = learning_rate_fn(step)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as the boundary tensors.
The output of the 1-arg function that takes the `step`
is `values[0]` when `step <= boundaries[0]`,
`values[1]` when `step > boundaries[0]` and `step <= boundaries[1]`, ...,
and values[-1] when `step > boundaries[-1]`.
"""
def __init__(
self,
boundaries,
values,
name=None):
"""Piecewise constant from boundaries and interval values.
Args:
boundaries: A list of `Tensor`s or `int`s or `float`s with strictly
increasing entries, and with all elements having the same type as the
optimizer step.
values: A list of `Tensor`s or `float`s or `int`s that specifies the
values for the intervals defined by `boundaries`. It should have one
more element than `boundaries`, and all elements should have the same
type.
name: A string. Optional name of the operation. Defaults to
'PiecewiseConstant'.
Raises:
ValueError: if the number of elements in the lists do not match.
"""
super(PiecewiseConstantDecay, self).__init__()
if len(boundaries) != len(values) - 1:
raise ValueError(
"The length of boundaries should be 1 less than the length of values")
self.boundaries = boundaries
self.values = values
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PiecewiseConstant"):
boundaries = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(self.boundaries))
values = nest.map_structure(ops.convert_to_tensor_v2_with_dispatch,
nest.flatten(self.values))
x_recomp = ops.convert_to_tensor_v2_with_dispatch(step)
for i, b in enumerate(boundaries):
if b.dtype.base_dtype != x_recomp.dtype.base_dtype:
# We cast the boundaries to have the same type as the step
b = math_ops.cast(b, x_recomp.dtype.base_dtype)
boundaries[i] = b
pred_fn_pairs = []
pred_fn_pairs.append((x_recomp <= boundaries[0], lambda: values[0]))
pred_fn_pairs.append((x_recomp > boundaries[-1], lambda: values[-1]))
for low, high, v in zip(boundaries[:-1], boundaries[1:], values[1:-1]):
# Need to bind v here; can do this with lambda v=v: ...
pred = (x_recomp > low) & (x_recomp <= high)
pred_fn_pairs.append((pred, lambda v=v: v))
# The default isn't needed here because our conditions are mutually
# exclusive and exhaustive, but tf.case requires it.
default = lambda: values[0]
return control_flow_ops.case(pred_fn_pairs, default, exclusive=True)
def get_config(self):
return {
"boundaries": self.boundaries,
"values": self.values,
"name": self.name
}
@keras_export("keras.optimizers.schedules.PolynomialDecay")
class PolynomialDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a polynomial decay schedule.
It is commonly observed that a monotonically decreasing learning rate, whose
degree of change is carefully chosen, results in a better performing model.
This schedule applies a polynomial decay function to an optimizer step,
given a provided `initial_learning_rate`, to reach an `end_learning_rate`
in the given `decay_steps`.
It requires a `step` value to compute the decayed learning rate. You
can just pass a TensorFlow variable that you increment at each training
step.
The schedule is a 1-arg callable that produces a decayed learning rate
when passed the current optimizer step. This can be useful for changing the
learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
If `cycle` is True then a multiple of `decay_steps` is used, the first one
that is bigger than `step`.
```python
def decayed_learning_rate(step):
decay_steps = decay_steps * ceil(step / decay_steps)
return ((initial_learning_rate - end_learning_rate) *
(1 - step / decay_steps) ^ (power)
) + end_learning_rate
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a model while decaying from 0.1 to 0.01 in 10000 steps using
sqrt (i.e. power=0.5):
```python
...
starter_learning_rate = 0.1
end_learning_rate = 0.01
decay_steps = 10000
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
starter_learning_rate,
decay_steps,
end_learning_rate,
power=0.5)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
The learning rate schedule is also serializable and deserializable using
`tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False,
name=None):
"""Applies a polynomial decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Must be positive. See the decay computation above.
end_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The minimal end learning rate.
power: A scalar `float32` or `float64` `Tensor` or a
Python number. The power of the polynomial. Defaults to linear, 1.0.
cycle: A boolean, whether or not it should cycle beyond decay_steps.
name: String. Optional name of the operation. Defaults to
'PolynomialDecay'.
"""
super(PolynomialDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "PolynomialDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
end_learning_rate = math_ops.cast(self.end_learning_rate, dtype)
power = math_ops.cast(self.power, dtype)
global_step_recomp = math_ops.cast(step, dtype)
decay_steps_recomp = math_ops.cast(self.decay_steps, dtype)
if self.cycle:
# Find the first multiple of decay_steps that is bigger than
# global_step. If global_step is zero set the multiplier to 1
multiplier = control_flow_ops.cond(
math_ops.equal(global_step_recomp, 0), lambda: 1.0,
lambda: math_ops.ceil(global_step_recomp / self.decay_steps))
decay_steps_recomp = math_ops.multiply(decay_steps_recomp, multiplier)
else:
# Make sure that the global_step used is not bigger than decay_steps.
global_step_recomp = math_ops.minimum(global_step_recomp,
decay_steps_recomp)
p = math_ops.divide(global_step_recomp, decay_steps_recomp)
return math_ops.add(
math_ops.multiply(initial_learning_rate - end_learning_rate,
math_ops.pow(1 - p, power)),
end_learning_rate,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"end_learning_rate": self.end_learning_rate,
"power": self.power,
"cycle": self.cycle,
"name": self.name
}
@keras_export("keras.optimizers.schedules.InverseTimeDecay")
class InverseTimeDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses an inverse time decay schedule.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies the inverse decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * step / decay_step)
```
or, if `staircase` is `True`, as:
```python
def decayed_learning_rate(step):
return initial_learning_rate / (1 + decay_rate * floor(step / decay_step))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate.
Example: Fit a Keras model when decaying 1/t with a rate of 0.5:
```python
...
initial_learning_rate = 0.1
decay_steps = 1.0
decay_rate = 0.5
learning_rate_fn = keras.optimizers.schedules.InverseTimeDecay(
initial_learning_rate, decay_steps, decay_rate)
model.compile(optimizer=tf.keras.optimizers.SGD(
learning_rate=learning_rate_fn),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(data, labels, epochs=5)
```
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
decay_rate,
staircase=False,
name=None):
"""Applies inverse time decay to the initial learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` `Tensor` or a
Python number. The initial learning rate.
decay_steps: How often to apply decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase, as opposed to
continuous, fashion.
name: String. Optional name of the operation. Defaults to
'InverseTimeDecay'.
"""
super(InverseTimeDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.staircase = staircase
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "InverseTimeDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
decay_rate = math_ops.cast(self.decay_rate, dtype)
global_step_recomp = math_ops.cast(step, dtype)
p = global_step_recomp / decay_steps
if self.staircase:
p = math_ops.floor(p)
const = math_ops.cast(constant_op.constant(1), dtype)
denom = math_ops.add(const, math_ops.multiply(decay_rate, p))
return math_ops.divide(initial_learning_rate, denom, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"decay_rate": self.decay_rate,
"staircase": self.staircase,
"name": self.name
}
@keras_export("keras.optimizers.schedules.CosineDecay",
"keras.experimental.CosineDecay")
class CosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies a cosine decay function
to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
cosine_decay = 0.5 * (1 + cos(pi * step / decay_steps))
decayed = (1 - alpha) * cosine_decay + alpha
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(
initial_learning_rate, decay_steps)
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
alpha=0.0,
name=None):
"""Applies cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a
Python number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'CosineDecay'.
"""
super(CosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = 0.5 * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return math_ops.multiply(initial_learning_rate, decayed)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"alpha": self.alpha,
"name": self.name
}
@keras_export("keras.optimizers.schedules.CosineDecayRestarts",
"keras.experimental.CosineDecayRestarts")
class CosineDecayRestarts(LearningRateSchedule):
"""A LearningRateSchedule that uses a cosine decay schedule with restarts.
See [Loshchilov & Hutter, ICLR2016](https://arxiv.org/abs/1608.03983),
SGDR: Stochastic Gradient Descent with Warm Restarts.
When training a model, it is often useful to lower the learning rate as
the training progresses. This schedule applies a cosine decay function with
restarts to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
The learning rate multiplier first decays
from 1 to `alpha` for `first_decay_steps` steps. Then, a warm
restart is performed. Each new warm restart runs for `t_mul` times more
steps and with `m_mul` times smaller initial learning rate.
Example usage:
```python
first_decay_steps = 1000
lr_decayed_fn = (
tf.keras.optimizers.schedules.CosineDecayRestarts(
initial_learning_rate,
first_decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
first_decay_steps,
t_mul=2.0,
m_mul=1.0,
alpha=0.0,
name=None):
"""Applies cosine decay with restarts to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
first_decay_steps: A scalar `int32` or `int64` `Tensor` or a Python
number. Number of steps to decay over.
t_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the number of iterations in the i-th period
m_mul: A scalar `float32` or `float64` `Tensor` or a Python number.
Used to derive the initial learning rate of the i-th period:
alpha: A scalar `float32` or `float64` Tensor or a Python number.
Minimum learning rate value as a fraction of the initial_learning_rate.
name: String. Optional name of the operation. Defaults to 'SGDRDecay'.
"""
super(CosineDecayRestarts, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.first_decay_steps = first_decay_steps
self._t_mul = t_mul
self._m_mul = m_mul
self.alpha = alpha
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "SGDRDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
first_decay_steps = math_ops.cast(self.first_decay_steps, dtype)
alpha = math_ops.cast(self.alpha, dtype)
t_mul = math_ops.cast(self._t_mul, dtype)
m_mul = math_ops.cast(self._m_mul, dtype)
global_step_recomp = math_ops.cast(step, dtype)
completed_fraction = global_step_recomp / first_decay_steps
def compute_step(completed_fraction, geometric=False):
"""Helper for `cond` operation."""
if geometric:
i_restart = math_ops.floor(
math_ops.log(1.0 - completed_fraction * (1.0 - t_mul)) /
math_ops.log(t_mul))
sum_r = (1.0 - t_mul**i_restart) / (1.0 - t_mul)
completed_fraction = (completed_fraction - sum_r) / t_mul**i_restart
else:
i_restart = math_ops.floor(completed_fraction)
completed_fraction -= i_restart
return i_restart, completed_fraction
i_restart, completed_fraction = control_flow_ops.cond(
math_ops.equal(t_mul, 1.0),
lambda: compute_step(completed_fraction, geometric=False),
lambda: compute_step(completed_fraction, geometric=True))
m_fac = m_mul**i_restart
cosine_decayed = 0.5 * m_fac * (1.0 + math_ops.cos(
constant_op.constant(math.pi) * completed_fraction))
decayed = (1 - alpha) * cosine_decayed + alpha
return math_ops.multiply(initial_learning_rate, decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"first_decay_steps": self.first_decay_steps,
"t_mul": self._t_mul,
"m_mul": self._m_mul,
"alpha": self.alpha,
"name": self.name
}
# Note: this code is still used by V1 APIs.
class LinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.LinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
"""
super(LinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "LinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
return math_ops.multiply(initial_learning_rate, linear_cosine_decayed,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
# Note: this code is still used by V1 APIs.
class NoisyLinearCosineDecay(LearningRateSchedule):
"""A LearningRateSchedule that uses a noisy linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a noisy linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps)
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay + eps_t) * cosine_decay + beta
return initial_learning_rate * decayed
```
where eps_t is 0-centered gaussian noise with variance
initial_variance / (1 + global_step) ** variance_decay
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.NoisyLinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
initial_variance=1.0,
variance_decay=0.55,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies noisy linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
initial_variance: initial variance for the noise. See computation above.
variance_decay: decay for the noise's variance. See computation above.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'NoisyLinearCosineDecay'.
"""
super(NoisyLinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.initial_variance = initial_variance
self.variance_decay = variance_decay
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "NoisyLinearCosineDecay") as name:
initial_learning_rate = ops.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
initial_variance = math_ops.cast(self.initial_variance, dtype)
variance_decay = math_ops.cast(self.variance_decay, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
variance = initial_variance / (
math_ops.pow(1.0 + global_step_recomp, variance_decay))
std = math_ops.sqrt(variance)
noisy_linear_decayed = (
linear_decayed + random_ops.random_normal(
linear_decayed.shape, stddev=std))
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
noisy_linear_cosine_decayed = (
(alpha + noisy_linear_decayed) * cosine_decayed + beta)
return math_ops.multiply(
initial_learning_rate, noisy_linear_cosine_decayed, name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"initial_variance": self.initial_variance,
"variance_decay": self.variance_decay,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
@keras_export("keras.optimizers.schedules.serialize")
def serialize(learning_rate_schedule):
return generic_utils.serialize_keras_object(learning_rate_schedule)
@keras_export("keras.optimizers.schedules.deserialize")
def deserialize(config, custom_objects=None):
return generic_utils.deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name="decay")
|
annarev/tensorflow
|
tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py
|
Python
|
apache-2.0
| 38,548
|
[
"Gaussian"
] |
21964ad42acaa512efe64ea47b041c239bfa57d53d1e1905617ba595ab4ea79f
|
from django.db import models
from shortuuidfield import ShortUUIDField
from datetime import date
import datetime
from utils import parse_date , generate_weekend_list_of_range , generate_holidays_list , generate_day_list_of_range
from random import randint
from datetime import timedelta
LAST_NAME = [
'SMITH',
'JOHNSON',
'WILLIAMS',
'BROWN',
'JONES',
'MILLER',
'DAVIS',
'GARCIA',
'RODRIGUEZ',
'WILSON',
'MARTINEZ',
'ANDERSON',
'TAYLOR',
'THOMAS',
'HERNANDEZ',
'MOORE',
'MARTIN',
'JACKSON',
'THOMPSON',
'WHITE',
'LOPEZ',
'LEE',
'GONZALEZ',
'HARRIS',
'CLARK',
'LEWIS',
'ROBINSON',
'WALKER',
'PEREZ',
'HALL',
'YOUNG',
'ALLEN',
'SANCHEZ',
'WRIGHT',
'KING',
'SCOTT',
'GREEN',
'BAKER',
'ADAMS',
'NELSON',
'HILL',
'RAMIREZ',
'CAMPBELL',
'MITCHELL',
'ROBERTS',
'CARTER',
'PHILLIPS',
'EVANS',
'TURNER',
'TORRES',
]
DOG_NAMES = [
'Gus',
'Trapper',
'Finn',
'Cooper',
'Bailey',
'Boomer',
'Otto',
'Hawkeye',
'Wrigley',
'Ace',
'Butch',
'Lucky',
'Axel',
'Gunner',
'Diesel',
'Delgado',
'Max',
'Evan',
'Buddy',
'Ricky',
'Bentley',
'Czar',
'Chad',
'Coco',
'AJ',
'Rocky',
'Jake',
'Maximus',
'CJ',
'Moose',
'Dodge',
'Charlie',
'Cody',
'Dexter',
'Bear',
'Jack',
'Angus',
'Spencer',
'Otis',
'Brody',
'Tucker',
'Blue',
'Amos',
'Sam',
'Blitzen',
'Biscuit',
'Fritz',
'Grommit',
'Emmet',
'Shamus',
]
WEEKDEND_DAYS = generate_weekend_list_of_range(start_date = '1/1/2016' , end_date = '12/31/2016')
HOLIDAY_DAYS = generate_holidays_list()
NORMAL_DAYS = generate_day_list_of_range(start_date = '1/1/2016' , end_date = '12/31/2016')
class Dog(models.Model):
uuid = ShortUUIDField(max_length=255, db_index=False)
first_name = models.TextField(null=False, blank=False)
last_name = models.TextField(null=True, blank=True, default ="")
full_name = models.TextField(null=False, blank=False, unique=True)
@classmethod
def clear(cls):
cls.objects.all().delete()
@classmethod
def generate_random_dog(cls):
number_of_dogs = randint(20, 100)
for i in range(1,number_of_dogs):
first_name = DOG_NAMES[randint(0, len(DOG_NAMES)-1 )]
last_name = LAST_NAME[randint(0, len(LAST_NAME)-1 )]
try:
dog,created = cls.add(first_name=first_name, last_name=last_name)
except:
pass
@property
def generate_random_visit(self):
visit_number = randint(1, 5)
for v in range(1, visit_number):
option = randint(1, 10)
if option in [1 , 4 , 7 , 9 , 10]:#####WEEKEND
start_date = WEEKDEND_DAYS[randint(0, len(WEEKDEND_DAYS)-1)]
elif option in [2 , 5, 8 ]:
start_date = HOLIDAY_DAYS[randint(0, len(HOLIDAY_DAYS) - 1)]
elif option in [3 , 6]:
start_date = NORMAL_DAYS[randint(0, len(NORMAL_DAYS) - 1)]
else:
start_date = None
if start_date is not None:
days = randint(3, 10)
delta = timedelta(days=days)
end_date = start_date + delta
try:
self.add_visit(start_date=start_date, end_date=end_date)
except Exception,e:
pass
class Meta:
ordering = ['full_name']
def __str__(self):
return self.full_name or "{} {}".format(self.first_name or "", self.last_name or "").strip().upper()
@property
def url(self):
return ""
def update(self, first_name=None, last_name=None):
if first_name is not None and first_name.strip():
self.first_name = first_name
self.last_name = last_name
self.save()
return self
else:
raise Exception("First Name is a Required Field")
@classmethod
def add(cls, first_name=None, last_name=None, fetching=False):
created = False
instance = None
if first_name is not None and first_name.strip():
full_name = "{} {}".format(first_name , last_name or "").strip().upper()
instance, created = cls.objects.get_or_create(full_name = full_name)
if instance is not None and created:
instance.first_name = first_name
instance.last_name = last_name
instance.save()
elif instance is not None and not created and not fetching:
raise Exception('The Dog already Exists on the System')
elif instance is None and not fetching:
raise Exception('The Dog has not beed registred.')
elif not fetching:
raise Exception('The Dog Should Have at least First Name')
return instance, created
@classmethod
def is_dog_name_registred(cls, first_name=None, last_name=None):
if first_name is not None and first_name.__class__ is str and first_name.strip():
full_name = "{} {}".format(first_name , last_name or "").strip().upper()
return cls.objects.filter(full_name__iexact = full_name).exists()
return False
def is_the_house(self, date_obj=None):
visits = self.boarding_visits
if visits is not None:
if date_obj is None:
date_obj = date.today()
filtered_visits = visits.filter(start_date__lte = date_obj)
if filtered_visits.exists():
filtered_visits = filtered_visits.filter(end_date__gt = date_obj)
return filtered_visits.exists()
return False
def is_the_house_label(self, date_obj=None):
if self.is_the_house(date_obj=date_obj):
return 'Yes'
return 'No'
@property
def visits_detail(self):
visits_detail = []
visits = self.boarding_visits.all()
for visit in visits:
visits_detail.append({
'start_date' : visit.start_date.strftime('%m/%d/%Y'),
'end_date' : visit.end_date.strftime('%m/%d/%Y'),
})
return visits_detail
@property
def visits(self):
return self.boarding_visits.count()
def add_visit(self, start_date=None, end_date=None):
visit = None
created = False
if start_date is None:
raise Exception('Start Date is a Required Field')
elif start_date.__class__ is not date and start_date.__class__ is str:
start_date = parse_date(input_date=start_date)
if start_date is None:
raise Exception('Start Date Field is Wrong Format')
elif start_date.__class__ is not date:
raise Exception('Start Date Field is Wrong Format')
if end_date is None:
raise Exception('End Date is a Required Field')
elif end_date.__class__ is not date and end_date.__class__ is str:
end_date = parse_date(input_date=end_date)
if end_date is None:
raise Exception('End Date Field is Wrong Format')
elif end_date.__class__ is not date:
raise Exception('End Date Field is Wrong Format')
visit, created = BoardingVisit.add(dog=self, start_date=start_date, end_date=end_date)
return visit, created
@classmethod
def add_dog_visit(cls, dog_first_name=None, dog_last_name=None, start_date=None, end_date=None):
dog, dog_created = cls.add(first_name=dog_first_name, last_name=dog_last_name, fetching=True)
visit = None
if dog is not None:
visit, crated = dog.add_visit(start_date=start_date, end_date=end_date)
return dog, visit
def save(self, *args, **kwargs):
full_name = "{} {}".format(self.first_name, self.last_name or "").strip().upper()
if self.__class__.objects.all().exclude(uuid = self.uuid).filter(full_name__iexact = full_name).exists():
raise Exception('There is another dog with the same name')
self.full_name = full_name
super(Dog, self).save(*args, **kwargs)
class BoardingVisit(models.Model):
uuid = ShortUUIDField(max_length=255, db_index=False)
dog = models.ForeignKey('Dog', blank=False, null=False, related_name='boarding_visits')
start_date = models.DateField(blank=False, null=False)
end_date = models.DateField(blank=False, null=False)
class Meta:
ordering = ['start_date', 'end_date', 'dog']
def __str__(self):
return "Dog: {} : Start Date: {} to End Date: {}".format(self.dog, self.start_date, self.end_date)
@classmethod
def visits(cls, start_date=None, end_date=None):
visits = cls.objects.all()
if visits.exists():
if start_date is not None:
if start_date.__class__ is str:
start_date = parse_date(input_date=start_date)
if start_date is not None and start_date.__class__ is date:
visits = visits.filter(start_date__gte = start_date)
if visits.exists():
if end_date is not None:
if end_date.__class__ is str:
end_date = parse_date(input_date=end_date)
if end_date is not None and end_date.__class__ is date:
visits = visits.filter(end_date__lte=end_date)
return visits
@classmethod
def dog_has_visit(cls, dog=None):
if dog is not None and dog.__class__ is Dog:
return cls.objects.filter(dog=dog).exists()
return False
@classmethod
def dog_visits(cls, dog=None):
data = cls.objects.none()
if dog is not None and dog.__class__ is Dog:
data = cls.objects.filter(dog=dog)
return data
@classmethod
def dog_has_overlaping_dates(cls, dog=None, start_date=None, end_date=None):
if dog is not None and dog.__class__ is Dog and start_date is not None and start_date.__class__ is date and end_date is not None and end_date.__class__ is date:
if cls.dog_has_visit(dog=dog) and start_date < end_date:
visits = cls.dog_visits(dog=dog)
if visits is not None:
#######HERE COMES THE DATE RANGE FILTERING
#####FIRST LOOK UP FOR ANY VISIT WITHIN THE REQUESTING RANGE
if visits.filter(start_date__range=(start_date, end_date)).exists() and visits.filter(end_date__range=(start_date, end_date)).exists():
return True
#####SECOND LOOK UP FOR ANY VISIT WITHIN THE REQUESTING RANGE
if visits.filter(start_date__lte = start_date , end_date__gte = end_date).exists():
return True
return False
@classmethod
def add(cls, dog=None, start_date=None, end_date=None):
instance = None
created = False
if start_date is not None and end_date is not None:
if start_date >= end_date:
raise Exception('The Start Date have to be before the End Date')
if not cls.dog_has_overlaping_dates(dog=dog, start_date=start_date, end_date=end_date):
instance, created = cls.objects.get_or_create(dog=dog, start_date=start_date, end_date=end_date)
if instance is not None and not created:
raise Exception('This Dog has a overlaping Visit')
else:
raise Exception('This Dog has a overlaping Visit')
return instance, created
@classmethod
def clear(cls):
cls.objects.all().delete()
@classmethod
def randomized_boarding(cls):
cls.clear()
@classmethod
def dogs_in_house(cls, date_obj=None):
dogs_in_house = 0
if date_obj is not None and date_obj.__class__ is date:
filter = cls.objects.filter(start_date__lte=date_obj , end_date__gte=date_obj)
if filter.exists():
dogs_in_house = filter.count()
return dogs_in_house
@classmethod
def date_visits(cls, date_obj=None):
data = cls.objects.none()
if date_obj is not None:
data = cls.objects.filter(start_date__lte=date_obj, end_date__gte=date_obj)
return data
@property
def dog_name(self):
return self.dog.full_name or ""
@property
def dog_url(self):
return self.dog.url
|
R3SWebDevelopment/HappyDogs
|
HappyDogs/HappyDogs/apps/HappyDogs/models.py
|
Python
|
mit
| 12,617
|
[
"MOOSE",
"VisIt"
] |
24e504f547de7f8819ec6ef3497d4d1168e77fb4d852f881c712a7e2791f0d72
|
from __future__ import absolute_import, division, print_function
import sys
from py._code.code import FormattedExcinfo
import py
import warnings
import inspect
import _pytest
from _pytest._code.code import TerminalRepr
from _pytest.compat import (
NOTSET, exc_clear, _format_args,
getfslineno, get_real_func,
is_generator, isclass, getimfunc,
getlocation, getfuncargnames,
safe_getattr,
)
from _pytest.outcomes import fail, TEST_OUTCOME
from _pytest.compat import FuncargnamesCompatAttr
if sys.version_info[:2] == (2, 6):
from ordereddict import OrderedDict
else:
from collections import OrderedDict
def pytest_sessionstart(session):
import _pytest.python
scopename2class.update({
'class': _pytest.python.Class,
'module': _pytest.python.Module,
'function': _pytest.main.Item,
})
session._fixturemanager = FixtureManager(session)
scopename2class = {}
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache, scopenum + 1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
newargkeys = OrderedDict.fromkeys(k for k in argkeys if k not in ignore)
if newargkeys: # found a slicing key
slicing_argkey, _ = newargkeys.popitem()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_values = {} # argname -> fixture value
self._fixture_defs = {} # argname -> FixtureDef
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
# backward incompatible note: now a readonly property
return list(self._pyfuncitem._fixtureinfo.names_closure)
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def getfuncargvalue(self, argname):
""" Deprecated, use getfixturevalue. """
from _pytest import deprecated
warnings.warn(
deprecated.GETFUNCARGVALUE,
DeprecationWarning,
stacklevel=2)
return self.getfixturevalue(argname)
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfixturevalue(fixturedef)
self._fixture_values[argname] = result
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfixturevalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
if fixturedef.params is not None:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for the "
"current test.\n\nRequested fixture '{0}' defined in:\n{1}"
"\n\nRequested here:\n{2}:{3}".format(
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg)
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" % (
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" % (self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._fixture_values = request._fixture_values
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
raise ValueError(
"{0} {1}has an unsupported scope value '{2}'".format(
descr, 'from {0} '.format(where) if where else '',
scope)
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file %s, line %s" % (fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist and name not in available:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" % (", ".join(sorted(available)),)
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker,
lines[0].strip()), red=True)
for line in lines[1:]:
tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker,
line.strip()), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
def teardown():
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr='fixture {0}'.format(func.__name__),
where=baseid
)
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
exceptions = []
try:
while self._finalizer:
try:
func = self._finalizer.pop()
func()
except:
exceptions.append(sys.exc_info())
if exceptions:
e = exceptions[0]
del exceptions # ensure we don't keep all frames alive because of the traceback
py.builtin._reraise(*e)
finally:
ihook = self._fixturemanager.session.ihook
ihook.pytest_fixture_post_finalizer(fixturedef=self)
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
ihook = self._fixturemanager.session.ihook
return ihook.pytest_fixture_setup(fixturedef=self, request=request)
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
my_cache_key = request.param_index
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
class FixtureFunctionMarker:
def __init__(self, scope, params, autouse=False, ids=None, name=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.ids = ids
self.name = name
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or without parameters) to define a
fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module" or "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
Fixtures can optionally provide their values to test functions using a ``yield`` statement,
instead of ``return``. In this case, the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome. A fixture function must yield exactly once.
"""
if callable(scope) and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, name=name)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
if callable(scope) and params is None and not autouse:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, ids=ids, name=name)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
argnames = getfuncargnames(func, cls=cls)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
argnames = func_params[0]
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if argname not in func_params and argname not in argnames:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
if not callable(obj):
continue
marker = defaultfuncargprefixmarker
from _pytest import deprecated
self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
if marker.name:
name = marker.name
msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
'and be decorated with @pytest.fixture:\n%s' % name
assert not name.startswith(self._argprefix), msg
fixture_def = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
|
hoehnp/navit_test
|
lib/python2.7/site-packages/_pytest/fixtures.py
|
Python
|
gpl-2.0
| 45,173
|
[
"VisIt"
] |
2edbd9841f7071068a78cab4b6e06a3799706170c8c27077e1c2a91079e670dd
|
# coding: utf-8
from __future__ import unicode_literals, division
from custodian.custodian import Validator
from pymatgen.io.vasp import Vasprun
class VasprunXMLValidator(Validator):
"""
Checks that a valid vasprun.xml was generated
"""
def __init__(self):
pass
def check(self):
try:
Vasprun("vasprun.xml")
except:
return True
return False
|
davidwaroquiers/custodian
|
custodian/vasp/validators.py
|
Python
|
mit
| 422
|
[
"VASP",
"pymatgen"
] |
b6e781d524dcf42ac8b9252336380cbb0ad50981a379f9a5e75b000d3da78b30
|
## file for parsing blast
## need to see those that are kept and those that are not kept.
from Bio.Blast import NCBIXML
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.SeqIO.FastaIO import FastaWriter
from Bio.Seq import Seq
from Bio.Alphabet import IUPAC
import sys
blast_xml=sys.argv[1]
original_fasta=sys.argv[2]
good_records=sys.argv[3]
bad_records=sys.argv[4]
## so want to print the records that are found in the blast xml file to a new file
## first start by getting id of the records and then compare to the fasta file record
blast_records = NCBIXML.parse(open(blast_xml))
good_ids=[]
bad_ids=[]
for record in blast_records:
#print record
#print dir(record)
print "record id", record.query
if record.alignments:
# to print the "best" matches bit-score
print record.alignments[0].hsps[0].expect
# to print the "best" matches bit-score
print record.alignments[0].hsps[0].score
for alignment in record.alignments:
identities_list=[]
for something in alignment.hsps:
#print dir(something)
identities_list.append(something.identities)
print something.identities
# if any of the identities in the high-scoring pair
# is low I will get rid of that sequence
if any(i >= 20 for i in identities_list):
print "yes"
good_ids.append(record.query)
else:
print "identity too low"
bad_ids.append(record.query)
## but only want id added once....
elif not record.alignments:
print "no hit"
bad_ids.append(record.query)
else:
print "something weird"
#print "List of good ids", good_ids
#print "List of bad ids", bad_ids
original_fasta_file = SeqIO.parse(open(original_fasta,"rU"), "fasta")
destination_good = open(good_records,'w')
destination_bad = open(bad_records,'w')
for reads in original_fasta_file:
print reads.id
if reads.id in good_ids:
print "hoorah"
SeqIO.write(reads, destination_good, "fasta")
elif reads.id in bad_ids:
print "we did not find it"
SeqIO.write(reads, destination_bad, "fasta")
destination_good.close()
destination_bad.close()
|
jooolia/phylo_temporal_jericho
|
sequence_processing/parse_blast_results_for_good_and_bad.py
|
Python
|
mit
| 2,314
|
[
"BLAST"
] |
8ae948089c488d541bd26c7743fb4b6f01863b50a5039b675a560150a8150e69
|
import numpy as np
import mayavi.mlab as mlab
import moviepy.editor as mpy
duration = 2 # duration of the animation in seconds (it will loop)
# MAKE A FIGURE WITH MAYAVI
fig = mlab.figure(size=(500, 500), bgcolor=(1,1,1))
u = np.linspace(0,2*np.pi,100)
xx,yy,zz = np.cos(u), np.sin(3*u), np.sin(u) # Points
l = mlab.plot3d(xx,yy,zz, representation="wireframe", tube_sides=5,
line_width=.5, tube_radius=0.2, figure=fig)
# ANIMATE THE FIGURE WITH MOVIEPY, WRITE AN ANIMATED GIF
def make_frame(t):
""" Generates and returns the frame for time t. """
y = np.sin(3*u)*(0.2+0.5*np.cos(2*np.pi*t/duration))
l.mlab_source.set(y = y) # change y-coordinates of the mesh
mlab.view(azimuth= 360*t/duration, distance=9) # camera angle
return mlab.screenshot(antialiased=True) # return a RGB image
animation = mpy.VideoClip(make_frame, duration=duration).resize(0.5)
# Video generation takes 10 seconds, GIF generation takes 25s
animation.write_videofile("wireframe.mp4", fps=20)
|
solvery/lang-features
|
python/use_lib/video_1/mayavi_2.py
|
Python
|
gpl-2.0
| 1,037
|
[
"Mayavi"
] |
1c9467498229aa354341904d21049f3d4b7587a9dd9da35ea94ace8fbcd35406
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.